xref: /openbmc/qemu/target/hppa/translate.c (revision bc3da3cf)
1 /*
2  * HPPA emulation cpu translation for qemu.
3  *
4  * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "tcg/tcg-op-gvec.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "exec/log.h"
31 
32 #define HELPER_H "helper.h"
33 #include "exec/helper-info.c.inc"
34 #undef  HELPER_H
35 
36 /* Choose to use explicit sizes within this file. */
37 #undef tcg_temp_new
38 
39 typedef struct DisasCond {
40     TCGCond c;
41     TCGv_i64 a0, a1;
42 } DisasCond;
43 
44 typedef struct DisasContext {
45     DisasContextBase base;
46     CPUState *cs;
47 
48     uint64_t iaoq_f;
49     uint64_t iaoq_b;
50     uint64_t iaoq_n;
51     TCGv_i64 iaoq_n_var;
52 
53     DisasCond null_cond;
54     TCGLabel *null_lab;
55 
56     TCGv_i64 zero;
57 
58     uint32_t insn;
59     uint32_t tb_flags;
60     int mmu_idx;
61     int privilege;
62     bool psw_n_nonzero;
63     bool is_pa20;
64 
65 #ifdef CONFIG_USER_ONLY
66     MemOp unalign;
67 #endif
68 } DisasContext;
69 
70 #ifdef CONFIG_USER_ONLY
71 #define UNALIGN(C)  (C)->unalign
72 #else
73 #define UNALIGN(C)  MO_ALIGN
74 #endif
75 
76 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently.  */
77 static int expand_sm_imm(DisasContext *ctx, int val)
78 {
79     if (val & PSW_SM_E) {
80         val = (val & ~PSW_SM_E) | PSW_E;
81     }
82     if (val & PSW_SM_W) {
83         val = (val & ~PSW_SM_W) | PSW_W;
84     }
85     return val;
86 }
87 
88 /* Inverted space register indicates 0 means sr0 not inferred from base.  */
89 static int expand_sr3x(DisasContext *ctx, int val)
90 {
91     return ~val;
92 }
93 
94 /* Convert the M:A bits within a memory insn to the tri-state value
95    we use for the final M.  */
96 static int ma_to_m(DisasContext *ctx, int val)
97 {
98     return val & 2 ? (val & 1 ? -1 : 1) : 0;
99 }
100 
101 /* Convert the sign of the displacement to a pre or post-modify.  */
102 static int pos_to_m(DisasContext *ctx, int val)
103 {
104     return val ? 1 : -1;
105 }
106 
107 static int neg_to_m(DisasContext *ctx, int val)
108 {
109     return val ? -1 : 1;
110 }
111 
112 /* Used for branch targets and fp memory ops.  */
113 static int expand_shl2(DisasContext *ctx, int val)
114 {
115     return val << 2;
116 }
117 
118 /* Used for fp memory ops.  */
119 static int expand_shl3(DisasContext *ctx, int val)
120 {
121     return val << 3;
122 }
123 
124 /* Used for assemble_21.  */
125 static int expand_shl11(DisasContext *ctx, int val)
126 {
127     return val << 11;
128 }
129 
130 static int assemble_6(DisasContext *ctx, int val)
131 {
132     /*
133      * Officially, 32 * x + 32 - y.
134      * Here, x is already in bit 5, and y is [4:0].
135      * Since -y = ~y + 1, in 5 bits 32 - y => y ^ 31 + 1,
136      * with the overflow from bit 4 summing with x.
137      */
138     return (val ^ 31) + 1;
139 }
140 
141 /* Translate CMPI doubleword conditions to standard. */
142 static int cmpbid_c(DisasContext *ctx, int val)
143 {
144     return val ? val : 4; /* 0 == "*<<" */
145 }
146 
147 
148 /* Include the auto-generated decoder.  */
149 #include "decode-insns.c.inc"
150 
151 /* We are not using a goto_tb (for whatever reason), but have updated
152    the iaq (for whatever reason), so don't do it again on exit.  */
153 #define DISAS_IAQ_N_UPDATED  DISAS_TARGET_0
154 
155 /* We are exiting the TB, but have neither emitted a goto_tb, nor
156    updated the iaq for the next instruction to be executed.  */
157 #define DISAS_IAQ_N_STALE    DISAS_TARGET_1
158 
159 /* Similarly, but we want to return to the main loop immediately
160    to recognize unmasked interrupts.  */
161 #define DISAS_IAQ_N_STALE_EXIT      DISAS_TARGET_2
162 #define DISAS_EXIT                  DISAS_TARGET_3
163 
164 /* global register indexes */
165 static TCGv_i64 cpu_gr[32];
166 static TCGv_i64 cpu_sr[4];
167 static TCGv_i64 cpu_srH;
168 static TCGv_i64 cpu_iaoq_f;
169 static TCGv_i64 cpu_iaoq_b;
170 static TCGv_i64 cpu_iasq_f;
171 static TCGv_i64 cpu_iasq_b;
172 static TCGv_i64 cpu_sar;
173 static TCGv_i64 cpu_psw_n;
174 static TCGv_i64 cpu_psw_v;
175 static TCGv_i64 cpu_psw_cb;
176 static TCGv_i64 cpu_psw_cb_msb;
177 
178 void hppa_translate_init(void)
179 {
180 #define DEF_VAR(V)  { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
181 
182     typedef struct { TCGv_i64 *var; const char *name; int ofs; } GlobalVar;
183     static const GlobalVar vars[] = {
184         { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
185         DEF_VAR(psw_n),
186         DEF_VAR(psw_v),
187         DEF_VAR(psw_cb),
188         DEF_VAR(psw_cb_msb),
189         DEF_VAR(iaoq_f),
190         DEF_VAR(iaoq_b),
191     };
192 
193 #undef DEF_VAR
194 
195     /* Use the symbolic register names that match the disassembler.  */
196     static const char gr_names[32][4] = {
197         "r0",  "r1",  "r2",  "r3",  "r4",  "r5",  "r6",  "r7",
198         "r8",  "r9",  "r10", "r11", "r12", "r13", "r14", "r15",
199         "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
200         "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
201     };
202     /* SR[4-7] are not global registers so that we can index them.  */
203     static const char sr_names[5][4] = {
204         "sr0", "sr1", "sr2", "sr3", "srH"
205     };
206 
207     int i;
208 
209     cpu_gr[0] = NULL;
210     for (i = 1; i < 32; i++) {
211         cpu_gr[i] = tcg_global_mem_new(tcg_env,
212                                        offsetof(CPUHPPAState, gr[i]),
213                                        gr_names[i]);
214     }
215     for (i = 0; i < 4; i++) {
216         cpu_sr[i] = tcg_global_mem_new_i64(tcg_env,
217                                            offsetof(CPUHPPAState, sr[i]),
218                                            sr_names[i]);
219     }
220     cpu_srH = tcg_global_mem_new_i64(tcg_env,
221                                      offsetof(CPUHPPAState, sr[4]),
222                                      sr_names[4]);
223 
224     for (i = 0; i < ARRAY_SIZE(vars); ++i) {
225         const GlobalVar *v = &vars[i];
226         *v->var = tcg_global_mem_new(tcg_env, v->ofs, v->name);
227     }
228 
229     cpu_iasq_f = tcg_global_mem_new_i64(tcg_env,
230                                         offsetof(CPUHPPAState, iasq_f),
231                                         "iasq_f");
232     cpu_iasq_b = tcg_global_mem_new_i64(tcg_env,
233                                         offsetof(CPUHPPAState, iasq_b),
234                                         "iasq_b");
235 }
236 
237 static DisasCond cond_make_f(void)
238 {
239     return (DisasCond){
240         .c = TCG_COND_NEVER,
241         .a0 = NULL,
242         .a1 = NULL,
243     };
244 }
245 
246 static DisasCond cond_make_t(void)
247 {
248     return (DisasCond){
249         .c = TCG_COND_ALWAYS,
250         .a0 = NULL,
251         .a1 = NULL,
252     };
253 }
254 
255 static DisasCond cond_make_n(void)
256 {
257     return (DisasCond){
258         .c = TCG_COND_NE,
259         .a0 = cpu_psw_n,
260         .a1 = tcg_constant_i64(0)
261     };
262 }
263 
264 static DisasCond cond_make_tmp(TCGCond c, TCGv_i64 a0, TCGv_i64 a1)
265 {
266     assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
267     return (DisasCond){ .c = c, .a0 = a0, .a1 = a1 };
268 }
269 
270 static DisasCond cond_make_0_tmp(TCGCond c, TCGv_i64 a0)
271 {
272     return cond_make_tmp(c, a0, tcg_constant_i64(0));
273 }
274 
275 static DisasCond cond_make_0(TCGCond c, TCGv_i64 a0)
276 {
277     TCGv_i64 tmp = tcg_temp_new_i64();
278     tcg_gen_mov_i64(tmp, a0);
279     return cond_make_0_tmp(c, tmp);
280 }
281 
282 static DisasCond cond_make(TCGCond c, TCGv_i64 a0, TCGv_i64 a1)
283 {
284     TCGv_i64 t0 = tcg_temp_new_i64();
285     TCGv_i64 t1 = tcg_temp_new_i64();
286 
287     tcg_gen_mov_i64(t0, a0);
288     tcg_gen_mov_i64(t1, a1);
289     return cond_make_tmp(c, t0, t1);
290 }
291 
292 static void cond_free(DisasCond *cond)
293 {
294     switch (cond->c) {
295     default:
296         cond->a0 = NULL;
297         cond->a1 = NULL;
298         /* fallthru */
299     case TCG_COND_ALWAYS:
300         cond->c = TCG_COND_NEVER;
301         break;
302     case TCG_COND_NEVER:
303         break;
304     }
305 }
306 
307 static TCGv_i64 load_gpr(DisasContext *ctx, unsigned reg)
308 {
309     if (reg == 0) {
310         return ctx->zero;
311     } else {
312         return cpu_gr[reg];
313     }
314 }
315 
316 static TCGv_i64 dest_gpr(DisasContext *ctx, unsigned reg)
317 {
318     if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
319         return tcg_temp_new_i64();
320     } else {
321         return cpu_gr[reg];
322     }
323 }
324 
325 static void save_or_nullify(DisasContext *ctx, TCGv_i64 dest, TCGv_i64 t)
326 {
327     if (ctx->null_cond.c != TCG_COND_NEVER) {
328         tcg_gen_movcond_i64(ctx->null_cond.c, dest, ctx->null_cond.a0,
329                             ctx->null_cond.a1, dest, t);
330     } else {
331         tcg_gen_mov_i64(dest, t);
332     }
333 }
334 
335 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_i64 t)
336 {
337     if (reg != 0) {
338         save_or_nullify(ctx, cpu_gr[reg], t);
339     }
340 }
341 
342 #if HOST_BIG_ENDIAN
343 # define HI_OFS  0
344 # define LO_OFS  4
345 #else
346 # define HI_OFS  4
347 # define LO_OFS  0
348 #endif
349 
350 static TCGv_i32 load_frw_i32(unsigned rt)
351 {
352     TCGv_i32 ret = tcg_temp_new_i32();
353     tcg_gen_ld_i32(ret, tcg_env,
354                    offsetof(CPUHPPAState, fr[rt & 31])
355                    + (rt & 32 ? LO_OFS : HI_OFS));
356     return ret;
357 }
358 
359 static TCGv_i32 load_frw0_i32(unsigned rt)
360 {
361     if (rt == 0) {
362         TCGv_i32 ret = tcg_temp_new_i32();
363         tcg_gen_movi_i32(ret, 0);
364         return ret;
365     } else {
366         return load_frw_i32(rt);
367     }
368 }
369 
370 static TCGv_i64 load_frw0_i64(unsigned rt)
371 {
372     TCGv_i64 ret = tcg_temp_new_i64();
373     if (rt == 0) {
374         tcg_gen_movi_i64(ret, 0);
375     } else {
376         tcg_gen_ld32u_i64(ret, tcg_env,
377                           offsetof(CPUHPPAState, fr[rt & 31])
378                           + (rt & 32 ? LO_OFS : HI_OFS));
379     }
380     return ret;
381 }
382 
383 static void save_frw_i32(unsigned rt, TCGv_i32 val)
384 {
385     tcg_gen_st_i32(val, tcg_env,
386                    offsetof(CPUHPPAState, fr[rt & 31])
387                    + (rt & 32 ? LO_OFS : HI_OFS));
388 }
389 
390 #undef HI_OFS
391 #undef LO_OFS
392 
393 static TCGv_i64 load_frd(unsigned rt)
394 {
395     TCGv_i64 ret = tcg_temp_new_i64();
396     tcg_gen_ld_i64(ret, tcg_env, offsetof(CPUHPPAState, fr[rt]));
397     return ret;
398 }
399 
400 static TCGv_i64 load_frd0(unsigned rt)
401 {
402     if (rt == 0) {
403         TCGv_i64 ret = tcg_temp_new_i64();
404         tcg_gen_movi_i64(ret, 0);
405         return ret;
406     } else {
407         return load_frd(rt);
408     }
409 }
410 
411 static void save_frd(unsigned rt, TCGv_i64 val)
412 {
413     tcg_gen_st_i64(val, tcg_env, offsetof(CPUHPPAState, fr[rt]));
414 }
415 
416 static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
417 {
418 #ifdef CONFIG_USER_ONLY
419     tcg_gen_movi_i64(dest, 0);
420 #else
421     if (reg < 4) {
422         tcg_gen_mov_i64(dest, cpu_sr[reg]);
423     } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
424         tcg_gen_mov_i64(dest, cpu_srH);
425     } else {
426         tcg_gen_ld_i64(dest, tcg_env, offsetof(CPUHPPAState, sr[reg]));
427     }
428 #endif
429 }
430 
431 /* Skip over the implementation of an insn that has been nullified.
432    Use this when the insn is too complex for a conditional move.  */
433 static void nullify_over(DisasContext *ctx)
434 {
435     if (ctx->null_cond.c != TCG_COND_NEVER) {
436         /* The always condition should have been handled in the main loop.  */
437         assert(ctx->null_cond.c != TCG_COND_ALWAYS);
438 
439         ctx->null_lab = gen_new_label();
440 
441         /* If we're using PSW[N], copy it to a temp because... */
442         if (ctx->null_cond.a0 == cpu_psw_n) {
443             ctx->null_cond.a0 = tcg_temp_new_i64();
444             tcg_gen_mov_i64(ctx->null_cond.a0, cpu_psw_n);
445         }
446         /* ... we clear it before branching over the implementation,
447            so that (1) it's clear after nullifying this insn and
448            (2) if this insn nullifies the next, PSW[N] is valid.  */
449         if (ctx->psw_n_nonzero) {
450             ctx->psw_n_nonzero = false;
451             tcg_gen_movi_i64(cpu_psw_n, 0);
452         }
453 
454         tcg_gen_brcond_i64(ctx->null_cond.c, ctx->null_cond.a0,
455                            ctx->null_cond.a1, ctx->null_lab);
456         cond_free(&ctx->null_cond);
457     }
458 }
459 
460 /* Save the current nullification state to PSW[N].  */
461 static void nullify_save(DisasContext *ctx)
462 {
463     if (ctx->null_cond.c == TCG_COND_NEVER) {
464         if (ctx->psw_n_nonzero) {
465             tcg_gen_movi_i64(cpu_psw_n, 0);
466         }
467         return;
468     }
469     if (ctx->null_cond.a0 != cpu_psw_n) {
470         tcg_gen_setcond_i64(ctx->null_cond.c, cpu_psw_n,
471                             ctx->null_cond.a0, ctx->null_cond.a1);
472         ctx->psw_n_nonzero = true;
473     }
474     cond_free(&ctx->null_cond);
475 }
476 
477 /* Set a PSW[N] to X.  The intention is that this is used immediately
478    before a goto_tb/exit_tb, so that there is no fallthru path to other
479    code within the TB.  Therefore we do not update psw_n_nonzero.  */
480 static void nullify_set(DisasContext *ctx, bool x)
481 {
482     if (ctx->psw_n_nonzero || x) {
483         tcg_gen_movi_i64(cpu_psw_n, x);
484     }
485 }
486 
487 /* Mark the end of an instruction that may have been nullified.
488    This is the pair to nullify_over.  Always returns true so that
489    it may be tail-called from a translate function.  */
490 static bool nullify_end(DisasContext *ctx)
491 {
492     TCGLabel *null_lab = ctx->null_lab;
493     DisasJumpType status = ctx->base.is_jmp;
494 
495     /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
496        For UPDATED, we cannot update on the nullified path.  */
497     assert(status != DISAS_IAQ_N_UPDATED);
498 
499     if (likely(null_lab == NULL)) {
500         /* The current insn wasn't conditional or handled the condition
501            applied to it without a branch, so the (new) setting of
502            NULL_COND can be applied directly to the next insn.  */
503         return true;
504     }
505     ctx->null_lab = NULL;
506 
507     if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
508         /* The next instruction will be unconditional,
509            and NULL_COND already reflects that.  */
510         gen_set_label(null_lab);
511     } else {
512         /* The insn that we just executed is itself nullifying the next
513            instruction.  Store the condition in the PSW[N] global.
514            We asserted PSW[N] = 0 in nullify_over, so that after the
515            label we have the proper value in place.  */
516         nullify_save(ctx);
517         gen_set_label(null_lab);
518         ctx->null_cond = cond_make_n();
519     }
520     if (status == DISAS_NORETURN) {
521         ctx->base.is_jmp = DISAS_NEXT;
522     }
523     return true;
524 }
525 
526 static uint64_t gva_offset_mask(DisasContext *ctx)
527 {
528     return (ctx->tb_flags & PSW_W
529             ? MAKE_64BIT_MASK(0, 62)
530             : MAKE_64BIT_MASK(0, 32));
531 }
532 
533 static void copy_iaoq_entry(DisasContext *ctx, TCGv_i64 dest,
534                             uint64_t ival, TCGv_i64 vval)
535 {
536     uint64_t mask = gva_offset_mask(ctx);
537 
538     if (ival != -1) {
539         tcg_gen_movi_i64(dest, ival & mask);
540         return;
541     }
542     tcg_debug_assert(vval != NULL);
543 
544     /*
545      * We know that the IAOQ is already properly masked.
546      * This optimization is primarily for "iaoq_f = iaoq_b".
547      */
548     if (vval == cpu_iaoq_f || vval == cpu_iaoq_b) {
549         tcg_gen_mov_i64(dest, vval);
550     } else {
551         tcg_gen_andi_i64(dest, vval, mask);
552     }
553 }
554 
555 static inline uint64_t iaoq_dest(DisasContext *ctx, int64_t disp)
556 {
557     return ctx->iaoq_f + disp + 8;
558 }
559 
560 static void gen_excp_1(int exception)
561 {
562     gen_helper_excp(tcg_env, tcg_constant_i32(exception));
563 }
564 
565 static void gen_excp(DisasContext *ctx, int exception)
566 {
567     copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
568     copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
569     nullify_save(ctx);
570     gen_excp_1(exception);
571     ctx->base.is_jmp = DISAS_NORETURN;
572 }
573 
574 static bool gen_excp_iir(DisasContext *ctx, int exc)
575 {
576     nullify_over(ctx);
577     tcg_gen_st_i64(tcg_constant_i64(ctx->insn),
578                    tcg_env, offsetof(CPUHPPAState, cr[CR_IIR]));
579     gen_excp(ctx, exc);
580     return nullify_end(ctx);
581 }
582 
583 static bool gen_illegal(DisasContext *ctx)
584 {
585     return gen_excp_iir(ctx, EXCP_ILL);
586 }
587 
588 #ifdef CONFIG_USER_ONLY
589 #define CHECK_MOST_PRIVILEGED(EXCP) \
590     return gen_excp_iir(ctx, EXCP)
591 #else
592 #define CHECK_MOST_PRIVILEGED(EXCP) \
593     do {                                     \
594         if (ctx->privilege != 0) {           \
595             return gen_excp_iir(ctx, EXCP);  \
596         }                                    \
597     } while (0)
598 #endif
599 
600 static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
601 {
602     return translator_use_goto_tb(&ctx->base, dest);
603 }
604 
605 /* If the next insn is to be nullified, and it's on the same page,
606    and we're not attempting to set a breakpoint on it, then we can
607    totally skip the nullified insn.  This avoids creating and
608    executing a TB that merely branches to the next TB.  */
609 static bool use_nullify_skip(DisasContext *ctx)
610 {
611     return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
612             && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
613 }
614 
615 static void gen_goto_tb(DisasContext *ctx, int which,
616                         uint64_t f, uint64_t b)
617 {
618     if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
619         tcg_gen_goto_tb(which);
620         copy_iaoq_entry(ctx, cpu_iaoq_f, f, NULL);
621         copy_iaoq_entry(ctx, cpu_iaoq_b, b, NULL);
622         tcg_gen_exit_tb(ctx->base.tb, which);
623     } else {
624         copy_iaoq_entry(ctx, cpu_iaoq_f, f, cpu_iaoq_b);
625         copy_iaoq_entry(ctx, cpu_iaoq_b, b, ctx->iaoq_n_var);
626         tcg_gen_lookup_and_goto_ptr();
627     }
628 }
629 
630 static bool cond_need_sv(int c)
631 {
632     return c == 2 || c == 3 || c == 6;
633 }
634 
635 static bool cond_need_cb(int c)
636 {
637     return c == 4 || c == 5;
638 }
639 
640 /* Need extensions from TCGv_i32 to TCGv_i64. */
641 static bool cond_need_ext(DisasContext *ctx, bool d)
642 {
643     return !(ctx->is_pa20 && d);
644 }
645 
646 /*
647  * Compute conditional for arithmetic.  See Page 5-3, Table 5-1, of
648  * the Parisc 1.1 Architecture Reference Manual for details.
649  */
650 
651 static DisasCond do_cond(DisasContext *ctx, unsigned cf, bool d,
652                          TCGv_i64 res, TCGv_i64 cb_msb, TCGv_i64 sv)
653 {
654     DisasCond cond;
655     TCGv_i64 tmp;
656 
657     switch (cf >> 1) {
658     case 0: /* Never / TR    (0 / 1) */
659         cond = cond_make_f();
660         break;
661     case 1: /* = / <>        (Z / !Z) */
662         if (cond_need_ext(ctx, d)) {
663             tmp = tcg_temp_new_i64();
664             tcg_gen_ext32u_i64(tmp, res);
665             res = tmp;
666         }
667         cond = cond_make_0(TCG_COND_EQ, res);
668         break;
669     case 2: /* < / >=        (N ^ V / !(N ^ V) */
670         tmp = tcg_temp_new_i64();
671         tcg_gen_xor_i64(tmp, res, sv);
672         if (cond_need_ext(ctx, d)) {
673             tcg_gen_ext32s_i64(tmp, tmp);
674         }
675         cond = cond_make_0_tmp(TCG_COND_LT, tmp);
676         break;
677     case 3: /* <= / >        (N ^ V) | Z / !((N ^ V) | Z) */
678         /*
679          * Simplify:
680          *   (N ^ V) | Z
681          *   ((res < 0) ^ (sv < 0)) | !res
682          *   ((res ^ sv) < 0) | !res
683          *   (~(res ^ sv) >= 0) | !res
684          *   !(~(res ^ sv) >> 31) | !res
685          *   !(~(res ^ sv) >> 31 & res)
686          */
687         tmp = tcg_temp_new_i64();
688         tcg_gen_eqv_i64(tmp, res, sv);
689         if (cond_need_ext(ctx, d)) {
690             tcg_gen_sextract_i64(tmp, tmp, 31, 1);
691             tcg_gen_and_i64(tmp, tmp, res);
692             tcg_gen_ext32u_i64(tmp, tmp);
693         } else {
694             tcg_gen_sari_i64(tmp, tmp, 63);
695             tcg_gen_and_i64(tmp, tmp, res);
696         }
697         cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
698         break;
699     case 4: /* NUV / UV      (!C / C) */
700         /* Only bit 0 of cb_msb is ever set. */
701         cond = cond_make_0(TCG_COND_EQ, cb_msb);
702         break;
703     case 5: /* ZNV / VNZ     (!C | Z / C & !Z) */
704         tmp = tcg_temp_new_i64();
705         tcg_gen_neg_i64(tmp, cb_msb);
706         tcg_gen_and_i64(tmp, tmp, res);
707         if (cond_need_ext(ctx, d)) {
708             tcg_gen_ext32u_i64(tmp, tmp);
709         }
710         cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
711         break;
712     case 6: /* SV / NSV      (V / !V) */
713         if (cond_need_ext(ctx, d)) {
714             tmp = tcg_temp_new_i64();
715             tcg_gen_ext32s_i64(tmp, sv);
716             sv = tmp;
717         }
718         cond = cond_make_0(TCG_COND_LT, sv);
719         break;
720     case 7: /* OD / EV */
721         tmp = tcg_temp_new_i64();
722         tcg_gen_andi_i64(tmp, res, 1);
723         cond = cond_make_0_tmp(TCG_COND_NE, tmp);
724         break;
725     default:
726         g_assert_not_reached();
727     }
728     if (cf & 1) {
729         cond.c = tcg_invert_cond(cond.c);
730     }
731 
732     return cond;
733 }
734 
735 /* Similar, but for the special case of subtraction without borrow, we
736    can use the inputs directly.  This can allow other computation to be
737    deleted as unused.  */
738 
739 static DisasCond do_sub_cond(DisasContext *ctx, unsigned cf, bool d,
740                              TCGv_i64 res, TCGv_i64 in1,
741                              TCGv_i64 in2, TCGv_i64 sv)
742 {
743     TCGCond tc;
744     bool ext_uns;
745 
746     switch (cf >> 1) {
747     case 1: /* = / <> */
748         tc = TCG_COND_EQ;
749         ext_uns = true;
750         break;
751     case 2: /* < / >= */
752         tc = TCG_COND_LT;
753         ext_uns = false;
754         break;
755     case 3: /* <= / > */
756         tc = TCG_COND_LE;
757         ext_uns = false;
758         break;
759     case 4: /* << / >>= */
760         tc = TCG_COND_LTU;
761         ext_uns = true;
762         break;
763     case 5: /* <<= / >> */
764         tc = TCG_COND_LEU;
765         ext_uns = true;
766         break;
767     default:
768         return do_cond(ctx, cf, d, res, NULL, sv);
769     }
770 
771     if (cf & 1) {
772         tc = tcg_invert_cond(tc);
773     }
774     if (cond_need_ext(ctx, d)) {
775         TCGv_i64 t1 = tcg_temp_new_i64();
776         TCGv_i64 t2 = tcg_temp_new_i64();
777 
778         if (ext_uns) {
779             tcg_gen_ext32u_i64(t1, in1);
780             tcg_gen_ext32u_i64(t2, in2);
781         } else {
782             tcg_gen_ext32s_i64(t1, in1);
783             tcg_gen_ext32s_i64(t2, in2);
784         }
785         return cond_make_tmp(tc, t1, t2);
786     }
787     return cond_make(tc, in1, in2);
788 }
789 
790 /*
791  * Similar, but for logicals, where the carry and overflow bits are not
792  * computed, and use of them is undefined.
793  *
794  * Undefined or not, hardware does not trap.  It seems reasonable to
795  * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's
796  * how cases c={2,3} are treated.
797  */
798 
799 static DisasCond do_log_cond(DisasContext *ctx, unsigned cf, bool d,
800                              TCGv_i64 res)
801 {
802     TCGCond tc;
803     bool ext_uns;
804 
805     switch (cf) {
806     case 0:  /* never */
807     case 9:  /* undef, C */
808     case 11: /* undef, C & !Z */
809     case 12: /* undef, V */
810         return cond_make_f();
811 
812     case 1:  /* true */
813     case 8:  /* undef, !C */
814     case 10: /* undef, !C | Z */
815     case 13: /* undef, !V */
816         return cond_make_t();
817 
818     case 2:  /* == */
819         tc = TCG_COND_EQ;
820         ext_uns = true;
821         break;
822     case 3:  /* <> */
823         tc = TCG_COND_NE;
824         ext_uns = true;
825         break;
826     case 4:  /* < */
827         tc = TCG_COND_LT;
828         ext_uns = false;
829         break;
830     case 5:  /* >= */
831         tc = TCG_COND_GE;
832         ext_uns = false;
833         break;
834     case 6:  /* <= */
835         tc = TCG_COND_LE;
836         ext_uns = false;
837         break;
838     case 7:  /* > */
839         tc = TCG_COND_GT;
840         ext_uns = false;
841         break;
842 
843     case 14: /* OD */
844     case 15: /* EV */
845         return do_cond(ctx, cf, d, res, NULL, NULL);
846 
847     default:
848         g_assert_not_reached();
849     }
850 
851     if (cond_need_ext(ctx, d)) {
852         TCGv_i64 tmp = tcg_temp_new_i64();
853 
854         if (ext_uns) {
855             tcg_gen_ext32u_i64(tmp, res);
856         } else {
857             tcg_gen_ext32s_i64(tmp, res);
858         }
859         return cond_make_0_tmp(tc, tmp);
860     }
861     return cond_make_0(tc, res);
862 }
863 
864 /* Similar, but for shift/extract/deposit conditions.  */
865 
866 static DisasCond do_sed_cond(DisasContext *ctx, unsigned orig, bool d,
867                              TCGv_i64 res)
868 {
869     unsigned c, f;
870 
871     /* Convert the compressed condition codes to standard.
872        0-2 are the same as logicals (nv,<,<=), while 3 is OD.
873        4-7 are the reverse of 0-3.  */
874     c = orig & 3;
875     if (c == 3) {
876         c = 7;
877     }
878     f = (orig & 4) / 4;
879 
880     return do_log_cond(ctx, c * 2 + f, d, res);
881 }
882 
883 /* Similar, but for unit conditions.  */
884 
885 static DisasCond do_unit_cond(unsigned cf, bool d, TCGv_i64 res,
886                               TCGv_i64 in1, TCGv_i64 in2)
887 {
888     DisasCond cond;
889     TCGv_i64 tmp, cb = NULL;
890     uint64_t d_repl = d ? 0x0000000100000001ull : 1;
891 
892     if (cf & 8) {
893         /* Since we want to test lots of carry-out bits all at once, do not
894          * do our normal thing and compute carry-in of bit B+1 since that
895          * leaves us with carry bits spread across two words.
896          */
897         cb = tcg_temp_new_i64();
898         tmp = tcg_temp_new_i64();
899         tcg_gen_or_i64(cb, in1, in2);
900         tcg_gen_and_i64(tmp, in1, in2);
901         tcg_gen_andc_i64(cb, cb, res);
902         tcg_gen_or_i64(cb, cb, tmp);
903     }
904 
905     switch (cf >> 1) {
906     case 0: /* never / TR */
907     case 1: /* undefined */
908     case 5: /* undefined */
909         cond = cond_make_f();
910         break;
911 
912     case 2: /* SBZ / NBZ */
913         /* See hasless(v,1) from
914          * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
915          */
916         tmp = tcg_temp_new_i64();
917         tcg_gen_subi_i64(tmp, res, d_repl * 0x01010101u);
918         tcg_gen_andc_i64(tmp, tmp, res);
919         tcg_gen_andi_i64(tmp, tmp, d_repl * 0x80808080u);
920         cond = cond_make_0(TCG_COND_NE, tmp);
921         break;
922 
923     case 3: /* SHZ / NHZ */
924         tmp = tcg_temp_new_i64();
925         tcg_gen_subi_i64(tmp, res, d_repl * 0x00010001u);
926         tcg_gen_andc_i64(tmp, tmp, res);
927         tcg_gen_andi_i64(tmp, tmp, d_repl * 0x80008000u);
928         cond = cond_make_0(TCG_COND_NE, tmp);
929         break;
930 
931     case 4: /* SDC / NDC */
932         tcg_gen_andi_i64(cb, cb, d_repl * 0x88888888u);
933         cond = cond_make_0(TCG_COND_NE, cb);
934         break;
935 
936     case 6: /* SBC / NBC */
937         tcg_gen_andi_i64(cb, cb, d_repl * 0x80808080u);
938         cond = cond_make_0(TCG_COND_NE, cb);
939         break;
940 
941     case 7: /* SHC / NHC */
942         tcg_gen_andi_i64(cb, cb, d_repl * 0x80008000u);
943         cond = cond_make_0(TCG_COND_NE, cb);
944         break;
945 
946     default:
947         g_assert_not_reached();
948     }
949     if (cf & 1) {
950         cond.c = tcg_invert_cond(cond.c);
951     }
952 
953     return cond;
954 }
955 
956 static TCGv_i64 get_carry(DisasContext *ctx, bool d,
957                           TCGv_i64 cb, TCGv_i64 cb_msb)
958 {
959     if (cond_need_ext(ctx, d)) {
960         TCGv_i64 t = tcg_temp_new_i64();
961         tcg_gen_extract_i64(t, cb, 32, 1);
962         return t;
963     }
964     return cb_msb;
965 }
966 
967 static TCGv_i64 get_psw_carry(DisasContext *ctx, bool d)
968 {
969     return get_carry(ctx, d, cpu_psw_cb, cpu_psw_cb_msb);
970 }
971 
972 /* Compute signed overflow for addition.  */
973 static TCGv_i64 do_add_sv(DisasContext *ctx, TCGv_i64 res,
974                           TCGv_i64 in1, TCGv_i64 in2)
975 {
976     TCGv_i64 sv = tcg_temp_new_i64();
977     TCGv_i64 tmp = tcg_temp_new_i64();
978 
979     tcg_gen_xor_i64(sv, res, in1);
980     tcg_gen_xor_i64(tmp, in1, in2);
981     tcg_gen_andc_i64(sv, sv, tmp);
982 
983     return sv;
984 }
985 
986 /* Compute signed overflow for subtraction.  */
987 static TCGv_i64 do_sub_sv(DisasContext *ctx, TCGv_i64 res,
988                           TCGv_i64 in1, TCGv_i64 in2)
989 {
990     TCGv_i64 sv = tcg_temp_new_i64();
991     TCGv_i64 tmp = tcg_temp_new_i64();
992 
993     tcg_gen_xor_i64(sv, res, in1);
994     tcg_gen_xor_i64(tmp, in1, in2);
995     tcg_gen_and_i64(sv, sv, tmp);
996 
997     return sv;
998 }
999 
1000 static void do_add(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1001                    TCGv_i64 in2, unsigned shift, bool is_l,
1002                    bool is_tsv, bool is_tc, bool is_c, unsigned cf, bool d)
1003 {
1004     TCGv_i64 dest, cb, cb_msb, cb_cond, sv, tmp;
1005     unsigned c = cf >> 1;
1006     DisasCond cond;
1007 
1008     dest = tcg_temp_new_i64();
1009     cb = NULL;
1010     cb_msb = NULL;
1011     cb_cond = NULL;
1012 
1013     if (shift) {
1014         tmp = tcg_temp_new_i64();
1015         tcg_gen_shli_i64(tmp, in1, shift);
1016         in1 = tmp;
1017     }
1018 
1019     if (!is_l || cond_need_cb(c)) {
1020         cb_msb = tcg_temp_new_i64();
1021         cb = tcg_temp_new_i64();
1022 
1023         tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero, in2, ctx->zero);
1024         if (is_c) {
1025             tcg_gen_add2_i64(dest, cb_msb, dest, cb_msb,
1026                              get_psw_carry(ctx, d), ctx->zero);
1027         }
1028         tcg_gen_xor_i64(cb, in1, in2);
1029         tcg_gen_xor_i64(cb, cb, dest);
1030         if (cond_need_cb(c)) {
1031             cb_cond = get_carry(ctx, d, cb, cb_msb);
1032         }
1033     } else {
1034         tcg_gen_add_i64(dest, in1, in2);
1035         if (is_c) {
1036             tcg_gen_add_i64(dest, dest, get_psw_carry(ctx, d));
1037         }
1038     }
1039 
1040     /* Compute signed overflow if required.  */
1041     sv = NULL;
1042     if (is_tsv || cond_need_sv(c)) {
1043         sv = do_add_sv(ctx, dest, in1, in2);
1044         if (is_tsv) {
1045             /* ??? Need to include overflow from shift.  */
1046             gen_helper_tsv(tcg_env, sv);
1047         }
1048     }
1049 
1050     /* Emit any conditional trap before any writeback.  */
1051     cond = do_cond(ctx, cf, d, dest, cb_cond, sv);
1052     if (is_tc) {
1053         tmp = tcg_temp_new_i64();
1054         tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1);
1055         gen_helper_tcond(tcg_env, tmp);
1056     }
1057 
1058     /* Write back the result.  */
1059     if (!is_l) {
1060         save_or_nullify(ctx, cpu_psw_cb, cb);
1061         save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1062     }
1063     save_gpr(ctx, rt, dest);
1064 
1065     /* Install the new nullification.  */
1066     cond_free(&ctx->null_cond);
1067     ctx->null_cond = cond;
1068 }
1069 
1070 static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_d_sh *a,
1071                        bool is_l, bool is_tsv, bool is_tc, bool is_c)
1072 {
1073     TCGv_i64 tcg_r1, tcg_r2;
1074 
1075     if (a->cf) {
1076         nullify_over(ctx);
1077     }
1078     tcg_r1 = load_gpr(ctx, a->r1);
1079     tcg_r2 = load_gpr(ctx, a->r2);
1080     do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l,
1081            is_tsv, is_tc, is_c, a->cf, a->d);
1082     return nullify_end(ctx);
1083 }
1084 
1085 static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a,
1086                        bool is_tsv, bool is_tc)
1087 {
1088     TCGv_i64 tcg_im, tcg_r2;
1089 
1090     if (a->cf) {
1091         nullify_over(ctx);
1092     }
1093     tcg_im = tcg_constant_i64(a->i);
1094     tcg_r2 = load_gpr(ctx, a->r);
1095     /* All ADDI conditions are 32-bit. */
1096     do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf, false);
1097     return nullify_end(ctx);
1098 }
1099 
1100 static void do_sub(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1101                    TCGv_i64 in2, bool is_tsv, bool is_b,
1102                    bool is_tc, unsigned cf, bool d)
1103 {
1104     TCGv_i64 dest, sv, cb, cb_msb, tmp;
1105     unsigned c = cf >> 1;
1106     DisasCond cond;
1107 
1108     dest = tcg_temp_new_i64();
1109     cb = tcg_temp_new_i64();
1110     cb_msb = tcg_temp_new_i64();
1111 
1112     if (is_b) {
1113         /* DEST,C = IN1 + ~IN2 + C.  */
1114         tcg_gen_not_i64(cb, in2);
1115         tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero,
1116                          get_psw_carry(ctx, d), ctx->zero);
1117         tcg_gen_add2_i64(dest, cb_msb, dest, cb_msb, cb, ctx->zero);
1118         tcg_gen_xor_i64(cb, cb, in1);
1119         tcg_gen_xor_i64(cb, cb, dest);
1120     } else {
1121         /*
1122          * DEST,C = IN1 + ~IN2 + 1.  We can produce the same result in fewer
1123          * operations by seeding the high word with 1 and subtracting.
1124          */
1125         TCGv_i64 one = tcg_constant_i64(1);
1126         tcg_gen_sub2_i64(dest, cb_msb, in1, one, in2, ctx->zero);
1127         tcg_gen_eqv_i64(cb, in1, in2);
1128         tcg_gen_xor_i64(cb, cb, dest);
1129     }
1130 
1131     /* Compute signed overflow if required.  */
1132     sv = NULL;
1133     if (is_tsv || cond_need_sv(c)) {
1134         sv = do_sub_sv(ctx, dest, in1, in2);
1135         if (is_tsv) {
1136             gen_helper_tsv(tcg_env, sv);
1137         }
1138     }
1139 
1140     /* Compute the condition.  We cannot use the special case for borrow.  */
1141     if (!is_b) {
1142         cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv);
1143     } else {
1144         cond = do_cond(ctx, cf, d, dest, get_carry(ctx, d, cb, cb_msb), sv);
1145     }
1146 
1147     /* Emit any conditional trap before any writeback.  */
1148     if (is_tc) {
1149         tmp = tcg_temp_new_i64();
1150         tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1);
1151         gen_helper_tcond(tcg_env, tmp);
1152     }
1153 
1154     /* Write back the result.  */
1155     save_or_nullify(ctx, cpu_psw_cb, cb);
1156     save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1157     save_gpr(ctx, rt, dest);
1158 
1159     /* Install the new nullification.  */
1160     cond_free(&ctx->null_cond);
1161     ctx->null_cond = cond;
1162 }
1163 
1164 static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf_d *a,
1165                        bool is_tsv, bool is_b, bool is_tc)
1166 {
1167     TCGv_i64 tcg_r1, tcg_r2;
1168 
1169     if (a->cf) {
1170         nullify_over(ctx);
1171     }
1172     tcg_r1 = load_gpr(ctx, a->r1);
1173     tcg_r2 = load_gpr(ctx, a->r2);
1174     do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf, a->d);
1175     return nullify_end(ctx);
1176 }
1177 
1178 static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv)
1179 {
1180     TCGv_i64 tcg_im, tcg_r2;
1181 
1182     if (a->cf) {
1183         nullify_over(ctx);
1184     }
1185     tcg_im = tcg_constant_i64(a->i);
1186     tcg_r2 = load_gpr(ctx, a->r);
1187     /* All SUBI conditions are 32-bit. */
1188     do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf, false);
1189     return nullify_end(ctx);
1190 }
1191 
1192 static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1193                       TCGv_i64 in2, unsigned cf, bool d)
1194 {
1195     TCGv_i64 dest, sv;
1196     DisasCond cond;
1197 
1198     dest = tcg_temp_new_i64();
1199     tcg_gen_sub_i64(dest, in1, in2);
1200 
1201     /* Compute signed overflow if required.  */
1202     sv = NULL;
1203     if (cond_need_sv(cf >> 1)) {
1204         sv = do_sub_sv(ctx, dest, in1, in2);
1205     }
1206 
1207     /* Form the condition for the compare.  */
1208     cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv);
1209 
1210     /* Clear.  */
1211     tcg_gen_movi_i64(dest, 0);
1212     save_gpr(ctx, rt, dest);
1213 
1214     /* Install the new nullification.  */
1215     cond_free(&ctx->null_cond);
1216     ctx->null_cond = cond;
1217 }
1218 
1219 static void do_log(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1220                    TCGv_i64 in2, unsigned cf, bool d,
1221                    void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
1222 {
1223     TCGv_i64 dest = dest_gpr(ctx, rt);
1224 
1225     /* Perform the operation, and writeback.  */
1226     fn(dest, in1, in2);
1227     save_gpr(ctx, rt, dest);
1228 
1229     /* Install the new nullification.  */
1230     cond_free(&ctx->null_cond);
1231     if (cf) {
1232         ctx->null_cond = do_log_cond(ctx, cf, d, dest);
1233     }
1234 }
1235 
1236 static bool do_log_reg(DisasContext *ctx, arg_rrr_cf_d *a,
1237                        void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
1238 {
1239     TCGv_i64 tcg_r1, tcg_r2;
1240 
1241     if (a->cf) {
1242         nullify_over(ctx);
1243     }
1244     tcg_r1 = load_gpr(ctx, a->r1);
1245     tcg_r2 = load_gpr(ctx, a->r2);
1246     do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d, fn);
1247     return nullify_end(ctx);
1248 }
1249 
1250 static void do_unit(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1251                     TCGv_i64 in2, unsigned cf, bool d, bool is_tc,
1252                     void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
1253 {
1254     TCGv_i64 dest;
1255     DisasCond cond;
1256 
1257     if (cf == 0) {
1258         dest = dest_gpr(ctx, rt);
1259         fn(dest, in1, in2);
1260         save_gpr(ctx, rt, dest);
1261         cond_free(&ctx->null_cond);
1262     } else {
1263         dest = tcg_temp_new_i64();
1264         fn(dest, in1, in2);
1265 
1266         cond = do_unit_cond(cf, d, dest, in1, in2);
1267 
1268         if (is_tc) {
1269             TCGv_i64 tmp = tcg_temp_new_i64();
1270             tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1);
1271             gen_helper_tcond(tcg_env, tmp);
1272         }
1273         save_gpr(ctx, rt, dest);
1274 
1275         cond_free(&ctx->null_cond);
1276         ctx->null_cond = cond;
1277     }
1278 }
1279 
1280 #ifndef CONFIG_USER_ONLY
1281 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1282    from the top 2 bits of the base register.  There are a few system
1283    instructions that have a 3-bit space specifier, for which SR0 is
1284    not special.  To handle this, pass ~SP.  */
1285 static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_i64 base)
1286 {
1287     TCGv_ptr ptr;
1288     TCGv_i64 tmp;
1289     TCGv_i64 spc;
1290 
1291     if (sp != 0) {
1292         if (sp < 0) {
1293             sp = ~sp;
1294         }
1295         spc = tcg_temp_new_i64();
1296         load_spr(ctx, spc, sp);
1297         return spc;
1298     }
1299     if (ctx->tb_flags & TB_FLAG_SR_SAME) {
1300         return cpu_srH;
1301     }
1302 
1303     ptr = tcg_temp_new_ptr();
1304     tmp = tcg_temp_new_i64();
1305     spc = tcg_temp_new_i64();
1306 
1307     /* Extract top 2 bits of the address, shift left 3 for uint64_t index. */
1308     tcg_gen_shri_i64(tmp, base, (ctx->tb_flags & PSW_W ? 64 : 32) - 5);
1309     tcg_gen_andi_i64(tmp, tmp, 030);
1310     tcg_gen_trunc_i64_ptr(ptr, tmp);
1311 
1312     tcg_gen_add_ptr(ptr, ptr, tcg_env);
1313     tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
1314 
1315     return spc;
1316 }
1317 #endif
1318 
1319 static void form_gva(DisasContext *ctx, TCGv_i64 *pgva, TCGv_i64 *pofs,
1320                      unsigned rb, unsigned rx, int scale, int64_t disp,
1321                      unsigned sp, int modify, bool is_phys)
1322 {
1323     TCGv_i64 base = load_gpr(ctx, rb);
1324     TCGv_i64 ofs;
1325     TCGv_i64 addr;
1326 
1327     /* Note that RX is mutually exclusive with DISP.  */
1328     if (rx) {
1329         ofs = tcg_temp_new_i64();
1330         tcg_gen_shli_i64(ofs, cpu_gr[rx], scale);
1331         tcg_gen_add_i64(ofs, ofs, base);
1332     } else if (disp || modify) {
1333         ofs = tcg_temp_new_i64();
1334         tcg_gen_addi_i64(ofs, base, disp);
1335     } else {
1336         ofs = base;
1337     }
1338 
1339     *pofs = ofs;
1340     *pgva = addr = tcg_temp_new_i64();
1341     tcg_gen_andi_i64(addr, modify <= 0 ? ofs : base, gva_offset_mask(ctx));
1342 #ifndef CONFIG_USER_ONLY
1343     if (!is_phys) {
1344         tcg_gen_or_i64(addr, addr, space_select(ctx, sp, base));
1345     }
1346 #endif
1347 }
1348 
1349 /* Emit a memory load.  The modify parameter should be
1350  * < 0 for pre-modify,
1351  * > 0 for post-modify,
1352  * = 0 for no base register update.
1353  */
1354 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
1355                        unsigned rx, int scale, int64_t disp,
1356                        unsigned sp, int modify, MemOp mop)
1357 {
1358     TCGv_i64 ofs;
1359     TCGv_i64 addr;
1360 
1361     /* Caller uses nullify_over/nullify_end.  */
1362     assert(ctx->null_cond.c == TCG_COND_NEVER);
1363 
1364     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1365              ctx->mmu_idx == MMU_PHYS_IDX);
1366     tcg_gen_qemu_ld_i32(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1367     if (modify) {
1368         save_gpr(ctx, rb, ofs);
1369     }
1370 }
1371 
1372 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1373                        unsigned rx, int scale, int64_t disp,
1374                        unsigned sp, int modify, MemOp mop)
1375 {
1376     TCGv_i64 ofs;
1377     TCGv_i64 addr;
1378 
1379     /* Caller uses nullify_over/nullify_end.  */
1380     assert(ctx->null_cond.c == TCG_COND_NEVER);
1381 
1382     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1383              ctx->mmu_idx == MMU_PHYS_IDX);
1384     tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1385     if (modify) {
1386         save_gpr(ctx, rb, ofs);
1387     }
1388 }
1389 
1390 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1391                         unsigned rx, int scale, int64_t disp,
1392                         unsigned sp, int modify, MemOp mop)
1393 {
1394     TCGv_i64 ofs;
1395     TCGv_i64 addr;
1396 
1397     /* Caller uses nullify_over/nullify_end.  */
1398     assert(ctx->null_cond.c == TCG_COND_NEVER);
1399 
1400     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1401              ctx->mmu_idx == MMU_PHYS_IDX);
1402     tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1403     if (modify) {
1404         save_gpr(ctx, rb, ofs);
1405     }
1406 }
1407 
1408 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1409                         unsigned rx, int scale, int64_t disp,
1410                         unsigned sp, int modify, MemOp mop)
1411 {
1412     TCGv_i64 ofs;
1413     TCGv_i64 addr;
1414 
1415     /* Caller uses nullify_over/nullify_end.  */
1416     assert(ctx->null_cond.c == TCG_COND_NEVER);
1417 
1418     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1419              ctx->mmu_idx == MMU_PHYS_IDX);
1420     tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1421     if (modify) {
1422         save_gpr(ctx, rb, ofs);
1423     }
1424 }
1425 
1426 static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1427                     unsigned rx, int scale, int64_t disp,
1428                     unsigned sp, int modify, MemOp mop)
1429 {
1430     TCGv_i64 dest;
1431 
1432     nullify_over(ctx);
1433 
1434     if (modify == 0) {
1435         /* No base register update.  */
1436         dest = dest_gpr(ctx, rt);
1437     } else {
1438         /* Make sure if RT == RB, we see the result of the load.  */
1439         dest = tcg_temp_new_i64();
1440     }
1441     do_load_64(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
1442     save_gpr(ctx, rt, dest);
1443 
1444     return nullify_end(ctx);
1445 }
1446 
1447 static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1448                       unsigned rx, int scale, int64_t disp,
1449                       unsigned sp, int modify)
1450 {
1451     TCGv_i32 tmp;
1452 
1453     nullify_over(ctx);
1454 
1455     tmp = tcg_temp_new_i32();
1456     do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1457     save_frw_i32(rt, tmp);
1458 
1459     if (rt == 0) {
1460         gen_helper_loaded_fr0(tcg_env);
1461     }
1462 
1463     return nullify_end(ctx);
1464 }
1465 
1466 static bool trans_fldw(DisasContext *ctx, arg_ldst *a)
1467 {
1468     return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1469                      a->disp, a->sp, a->m);
1470 }
1471 
1472 static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1473                       unsigned rx, int scale, int64_t disp,
1474                       unsigned sp, int modify)
1475 {
1476     TCGv_i64 tmp;
1477 
1478     nullify_over(ctx);
1479 
1480     tmp = tcg_temp_new_i64();
1481     do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1482     save_frd(rt, tmp);
1483 
1484     if (rt == 0) {
1485         gen_helper_loaded_fr0(tcg_env);
1486     }
1487 
1488     return nullify_end(ctx);
1489 }
1490 
1491 static bool trans_fldd(DisasContext *ctx, arg_ldst *a)
1492 {
1493     return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1494                      a->disp, a->sp, a->m);
1495 }
1496 
1497 static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1498                      int64_t disp, unsigned sp,
1499                      int modify, MemOp mop)
1500 {
1501     nullify_over(ctx);
1502     do_store_64(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
1503     return nullify_end(ctx);
1504 }
1505 
1506 static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1507                        unsigned rx, int scale, int64_t disp,
1508                        unsigned sp, int modify)
1509 {
1510     TCGv_i32 tmp;
1511 
1512     nullify_over(ctx);
1513 
1514     tmp = load_frw_i32(rt);
1515     do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1516 
1517     return nullify_end(ctx);
1518 }
1519 
1520 static bool trans_fstw(DisasContext *ctx, arg_ldst *a)
1521 {
1522     return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1523                       a->disp, a->sp, a->m);
1524 }
1525 
1526 static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1527                        unsigned rx, int scale, int64_t disp,
1528                        unsigned sp, int modify)
1529 {
1530     TCGv_i64 tmp;
1531 
1532     nullify_over(ctx);
1533 
1534     tmp = load_frd(rt);
1535     do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1536 
1537     return nullify_end(ctx);
1538 }
1539 
1540 static bool trans_fstd(DisasContext *ctx, arg_ldst *a)
1541 {
1542     return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1543                       a->disp, a->sp, a->m);
1544 }
1545 
1546 static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
1547                        void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
1548 {
1549     TCGv_i32 tmp;
1550 
1551     nullify_over(ctx);
1552     tmp = load_frw0_i32(ra);
1553 
1554     func(tmp, tcg_env, tmp);
1555 
1556     save_frw_i32(rt, tmp);
1557     return nullify_end(ctx);
1558 }
1559 
1560 static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
1561                        void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
1562 {
1563     TCGv_i32 dst;
1564     TCGv_i64 src;
1565 
1566     nullify_over(ctx);
1567     src = load_frd(ra);
1568     dst = tcg_temp_new_i32();
1569 
1570     func(dst, tcg_env, src);
1571 
1572     save_frw_i32(rt, dst);
1573     return nullify_end(ctx);
1574 }
1575 
1576 static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
1577                        void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
1578 {
1579     TCGv_i64 tmp;
1580 
1581     nullify_over(ctx);
1582     tmp = load_frd0(ra);
1583 
1584     func(tmp, tcg_env, tmp);
1585 
1586     save_frd(rt, tmp);
1587     return nullify_end(ctx);
1588 }
1589 
1590 static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
1591                        void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
1592 {
1593     TCGv_i32 src;
1594     TCGv_i64 dst;
1595 
1596     nullify_over(ctx);
1597     src = load_frw0_i32(ra);
1598     dst = tcg_temp_new_i64();
1599 
1600     func(dst, tcg_env, src);
1601 
1602     save_frd(rt, dst);
1603     return nullify_end(ctx);
1604 }
1605 
1606 static bool do_fop_weww(DisasContext *ctx, unsigned rt,
1607                         unsigned ra, unsigned rb,
1608                         void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
1609 {
1610     TCGv_i32 a, b;
1611 
1612     nullify_over(ctx);
1613     a = load_frw0_i32(ra);
1614     b = load_frw0_i32(rb);
1615 
1616     func(a, tcg_env, a, b);
1617 
1618     save_frw_i32(rt, a);
1619     return nullify_end(ctx);
1620 }
1621 
1622 static bool do_fop_dedd(DisasContext *ctx, unsigned rt,
1623                         unsigned ra, unsigned rb,
1624                         void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
1625 {
1626     TCGv_i64 a, b;
1627 
1628     nullify_over(ctx);
1629     a = load_frd0(ra);
1630     b = load_frd0(rb);
1631 
1632     func(a, tcg_env, a, b);
1633 
1634     save_frd(rt, a);
1635     return nullify_end(ctx);
1636 }
1637 
1638 /* Emit an unconditional branch to a direct target, which may or may not
1639    have already had nullification handled.  */
1640 static bool do_dbranch(DisasContext *ctx, uint64_t dest,
1641                        unsigned link, bool is_n)
1642 {
1643     if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1644         if (link != 0) {
1645             copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1646         }
1647         ctx->iaoq_n = dest;
1648         if (is_n) {
1649             ctx->null_cond.c = TCG_COND_ALWAYS;
1650         }
1651     } else {
1652         nullify_over(ctx);
1653 
1654         if (link != 0) {
1655             copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1656         }
1657 
1658         if (is_n && use_nullify_skip(ctx)) {
1659             nullify_set(ctx, 0);
1660             gen_goto_tb(ctx, 0, dest, dest + 4);
1661         } else {
1662             nullify_set(ctx, is_n);
1663             gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1664         }
1665 
1666         nullify_end(ctx);
1667 
1668         nullify_set(ctx, 0);
1669         gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
1670         ctx->base.is_jmp = DISAS_NORETURN;
1671     }
1672     return true;
1673 }
1674 
1675 /* Emit a conditional branch to a direct target.  If the branch itself
1676    is nullified, we should have already used nullify_over.  */
1677 static bool do_cbranch(DisasContext *ctx, int64_t disp, bool is_n,
1678                        DisasCond *cond)
1679 {
1680     uint64_t dest = iaoq_dest(ctx, disp);
1681     TCGLabel *taken = NULL;
1682     TCGCond c = cond->c;
1683     bool n;
1684 
1685     assert(ctx->null_cond.c == TCG_COND_NEVER);
1686 
1687     /* Handle TRUE and NEVER as direct branches.  */
1688     if (c == TCG_COND_ALWAYS) {
1689         return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
1690     }
1691     if (c == TCG_COND_NEVER) {
1692         return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
1693     }
1694 
1695     taken = gen_new_label();
1696     tcg_gen_brcond_i64(c, cond->a0, cond->a1, taken);
1697     cond_free(cond);
1698 
1699     /* Not taken: Condition not satisfied; nullify on backward branches. */
1700     n = is_n && disp < 0;
1701     if (n && use_nullify_skip(ctx)) {
1702         nullify_set(ctx, 0);
1703         gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
1704     } else {
1705         if (!n && ctx->null_lab) {
1706             gen_set_label(ctx->null_lab);
1707             ctx->null_lab = NULL;
1708         }
1709         nullify_set(ctx, n);
1710         if (ctx->iaoq_n == -1) {
1711             /* The temporary iaoq_n_var died at the branch above.
1712                Regenerate it here instead of saving it.  */
1713             tcg_gen_addi_i64(ctx->iaoq_n_var, cpu_iaoq_b, 4);
1714         }
1715         gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
1716     }
1717 
1718     gen_set_label(taken);
1719 
1720     /* Taken: Condition satisfied; nullify on forward branches.  */
1721     n = is_n && disp >= 0;
1722     if (n && use_nullify_skip(ctx)) {
1723         nullify_set(ctx, 0);
1724         gen_goto_tb(ctx, 1, dest, dest + 4);
1725     } else {
1726         nullify_set(ctx, n);
1727         gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
1728     }
1729 
1730     /* Not taken: the branch itself was nullified.  */
1731     if (ctx->null_lab) {
1732         gen_set_label(ctx->null_lab);
1733         ctx->null_lab = NULL;
1734         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
1735     } else {
1736         ctx->base.is_jmp = DISAS_NORETURN;
1737     }
1738     return true;
1739 }
1740 
1741 /* Emit an unconditional branch to an indirect target.  This handles
1742    nullification of the branch itself.  */
1743 static bool do_ibranch(DisasContext *ctx, TCGv_i64 dest,
1744                        unsigned link, bool is_n)
1745 {
1746     TCGv_i64 a0, a1, next, tmp;
1747     TCGCond c;
1748 
1749     assert(ctx->null_lab == NULL);
1750 
1751     if (ctx->null_cond.c == TCG_COND_NEVER) {
1752         if (link != 0) {
1753             copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1754         }
1755         next = tcg_temp_new_i64();
1756         tcg_gen_mov_i64(next, dest);
1757         if (is_n) {
1758             if (use_nullify_skip(ctx)) {
1759                 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, next);
1760                 tcg_gen_addi_i64(next, next, 4);
1761                 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, next);
1762                 nullify_set(ctx, 0);
1763                 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1764                 return true;
1765             }
1766             ctx->null_cond.c = TCG_COND_ALWAYS;
1767         }
1768         ctx->iaoq_n = -1;
1769         ctx->iaoq_n_var = next;
1770     } else if (is_n && use_nullify_skip(ctx)) {
1771         /* The (conditional) branch, B, nullifies the next insn, N,
1772            and we're allowed to skip execution N (no single-step or
1773            tracepoint in effect).  Since the goto_ptr that we must use
1774            for the indirect branch consumes no special resources, we
1775            can (conditionally) skip B and continue execution.  */
1776         /* The use_nullify_skip test implies we have a known control path.  */
1777         tcg_debug_assert(ctx->iaoq_b != -1);
1778         tcg_debug_assert(ctx->iaoq_n != -1);
1779 
1780         /* We do have to handle the non-local temporary, DEST, before
1781            branching.  Since IOAQ_F is not really live at this point, we
1782            can simply store DEST optimistically.  Similarly with IAOQ_B.  */
1783         copy_iaoq_entry(ctx, cpu_iaoq_f, -1, dest);
1784         next = tcg_temp_new_i64();
1785         tcg_gen_addi_i64(next, dest, 4);
1786         copy_iaoq_entry(ctx, cpu_iaoq_b, -1, next);
1787 
1788         nullify_over(ctx);
1789         if (link != 0) {
1790             copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1791         }
1792         tcg_gen_lookup_and_goto_ptr();
1793         return nullify_end(ctx);
1794     } else {
1795         c = ctx->null_cond.c;
1796         a0 = ctx->null_cond.a0;
1797         a1 = ctx->null_cond.a1;
1798 
1799         tmp = tcg_temp_new_i64();
1800         next = tcg_temp_new_i64();
1801 
1802         copy_iaoq_entry(ctx, tmp, ctx->iaoq_n, ctx->iaoq_n_var);
1803         tcg_gen_movcond_i64(c, next, a0, a1, tmp, dest);
1804         ctx->iaoq_n = -1;
1805         ctx->iaoq_n_var = next;
1806 
1807         if (link != 0) {
1808             tcg_gen_movcond_i64(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
1809         }
1810 
1811         if (is_n) {
1812             /* The branch nullifies the next insn, which means the state of N
1813                after the branch is the inverse of the state of N that applied
1814                to the branch.  */
1815             tcg_gen_setcond_i64(tcg_invert_cond(c), cpu_psw_n, a0, a1);
1816             cond_free(&ctx->null_cond);
1817             ctx->null_cond = cond_make_n();
1818             ctx->psw_n_nonzero = true;
1819         } else {
1820             cond_free(&ctx->null_cond);
1821         }
1822     }
1823     return true;
1824 }
1825 
1826 /* Implement
1827  *    if (IAOQ_Front{30..31} < GR[b]{30..31})
1828  *      IAOQ_Next{30..31} ← GR[b]{30..31};
1829  *    else
1830  *      IAOQ_Next{30..31} ← IAOQ_Front{30..31};
1831  * which keeps the privilege level from being increased.
1832  */
1833 static TCGv_i64 do_ibranch_priv(DisasContext *ctx, TCGv_i64 offset)
1834 {
1835     TCGv_i64 dest;
1836     switch (ctx->privilege) {
1837     case 0:
1838         /* Privilege 0 is maximum and is allowed to decrease.  */
1839         return offset;
1840     case 3:
1841         /* Privilege 3 is minimum and is never allowed to increase.  */
1842         dest = tcg_temp_new_i64();
1843         tcg_gen_ori_i64(dest, offset, 3);
1844         break;
1845     default:
1846         dest = tcg_temp_new_i64();
1847         tcg_gen_andi_i64(dest, offset, -4);
1848         tcg_gen_ori_i64(dest, dest, ctx->privilege);
1849         tcg_gen_movcond_i64(TCG_COND_GTU, dest, dest, offset, dest, offset);
1850         break;
1851     }
1852     return dest;
1853 }
1854 
1855 #ifdef CONFIG_USER_ONLY
1856 /* On Linux, page zero is normally marked execute only + gateway.
1857    Therefore normal read or write is supposed to fail, but specific
1858    offsets have kernel code mapped to raise permissions to implement
1859    system calls.  Handling this via an explicit check here, rather
1860    in than the "be disp(sr2,r0)" instruction that probably sent us
1861    here, is the easiest way to handle the branch delay slot on the
1862    aforementioned BE.  */
1863 static void do_page_zero(DisasContext *ctx)
1864 {
1865     TCGv_i64 tmp;
1866 
1867     /* If by some means we get here with PSW[N]=1, that implies that
1868        the B,GATE instruction would be skipped, and we'd fault on the
1869        next insn within the privileged page.  */
1870     switch (ctx->null_cond.c) {
1871     case TCG_COND_NEVER:
1872         break;
1873     case TCG_COND_ALWAYS:
1874         tcg_gen_movi_i64(cpu_psw_n, 0);
1875         goto do_sigill;
1876     default:
1877         /* Since this is always the first (and only) insn within the
1878            TB, we should know the state of PSW[N] from TB->FLAGS.  */
1879         g_assert_not_reached();
1880     }
1881 
1882     /* Check that we didn't arrive here via some means that allowed
1883        non-sequential instruction execution.  Normally the PSW[B] bit
1884        detects this by disallowing the B,GATE instruction to execute
1885        under such conditions.  */
1886     if (ctx->iaoq_b != ctx->iaoq_f + 4) {
1887         goto do_sigill;
1888     }
1889 
1890     switch (ctx->iaoq_f & -4) {
1891     case 0x00: /* Null pointer call */
1892         gen_excp_1(EXCP_IMP);
1893         ctx->base.is_jmp = DISAS_NORETURN;
1894         break;
1895 
1896     case 0xb0: /* LWS */
1897         gen_excp_1(EXCP_SYSCALL_LWS);
1898         ctx->base.is_jmp = DISAS_NORETURN;
1899         break;
1900 
1901     case 0xe0: /* SET_THREAD_POINTER */
1902         tcg_gen_st_i64(cpu_gr[26], tcg_env, offsetof(CPUHPPAState, cr[27]));
1903         tmp = tcg_temp_new_i64();
1904         tcg_gen_ori_i64(tmp, cpu_gr[31], 3);
1905         copy_iaoq_entry(ctx, cpu_iaoq_f, -1, tmp);
1906         tcg_gen_addi_i64(tmp, tmp, 4);
1907         copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
1908         ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1909         break;
1910 
1911     case 0x100: /* SYSCALL */
1912         gen_excp_1(EXCP_SYSCALL);
1913         ctx->base.is_jmp = DISAS_NORETURN;
1914         break;
1915 
1916     default:
1917     do_sigill:
1918         gen_excp_1(EXCP_ILL);
1919         ctx->base.is_jmp = DISAS_NORETURN;
1920         break;
1921     }
1922 }
1923 #endif
1924 
1925 static bool trans_nop(DisasContext *ctx, arg_nop *a)
1926 {
1927     cond_free(&ctx->null_cond);
1928     return true;
1929 }
1930 
1931 static bool trans_break(DisasContext *ctx, arg_break *a)
1932 {
1933     return gen_excp_iir(ctx, EXCP_BREAK);
1934 }
1935 
1936 static bool trans_sync(DisasContext *ctx, arg_sync *a)
1937 {
1938     /* No point in nullifying the memory barrier.  */
1939     tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1940 
1941     cond_free(&ctx->null_cond);
1942     return true;
1943 }
1944 
1945 static bool trans_mfia(DisasContext *ctx, arg_mfia *a)
1946 {
1947     unsigned rt = a->t;
1948     TCGv_i64 tmp = dest_gpr(ctx, rt);
1949     tcg_gen_movi_i64(tmp, ctx->iaoq_f);
1950     save_gpr(ctx, rt, tmp);
1951 
1952     cond_free(&ctx->null_cond);
1953     return true;
1954 }
1955 
1956 static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a)
1957 {
1958     unsigned rt = a->t;
1959     unsigned rs = a->sp;
1960     TCGv_i64 t0 = tcg_temp_new_i64();
1961 
1962     load_spr(ctx, t0, rs);
1963     tcg_gen_shri_i64(t0, t0, 32);
1964 
1965     save_gpr(ctx, rt, t0);
1966 
1967     cond_free(&ctx->null_cond);
1968     return true;
1969 }
1970 
1971 static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
1972 {
1973     unsigned rt = a->t;
1974     unsigned ctl = a->r;
1975     TCGv_i64 tmp;
1976 
1977     switch (ctl) {
1978     case CR_SAR:
1979         if (a->e == 0) {
1980             /* MFSAR without ,W masks low 5 bits.  */
1981             tmp = dest_gpr(ctx, rt);
1982             tcg_gen_andi_i64(tmp, cpu_sar, 31);
1983             save_gpr(ctx, rt, tmp);
1984             goto done;
1985         }
1986         save_gpr(ctx, rt, cpu_sar);
1987         goto done;
1988     case CR_IT: /* Interval Timer */
1989         /* FIXME: Respect PSW_S bit.  */
1990         nullify_over(ctx);
1991         tmp = dest_gpr(ctx, rt);
1992         if (translator_io_start(&ctx->base)) {
1993             gen_helper_read_interval_timer(tmp);
1994             ctx->base.is_jmp = DISAS_IAQ_N_STALE;
1995         } else {
1996             gen_helper_read_interval_timer(tmp);
1997         }
1998         save_gpr(ctx, rt, tmp);
1999         return nullify_end(ctx);
2000     case 26:
2001     case 27:
2002         break;
2003     default:
2004         /* All other control registers are privileged.  */
2005         CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2006         break;
2007     }
2008 
2009     tmp = tcg_temp_new_i64();
2010     tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2011     save_gpr(ctx, rt, tmp);
2012 
2013  done:
2014     cond_free(&ctx->null_cond);
2015     return true;
2016 }
2017 
2018 static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a)
2019 {
2020     unsigned rr = a->r;
2021     unsigned rs = a->sp;
2022     TCGv_i64 tmp;
2023 
2024     if (rs >= 5) {
2025         CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2026     }
2027     nullify_over(ctx);
2028 
2029     tmp = tcg_temp_new_i64();
2030     tcg_gen_shli_i64(tmp, load_gpr(ctx, rr), 32);
2031 
2032     if (rs >= 4) {
2033         tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUHPPAState, sr[rs]));
2034         ctx->tb_flags &= ~TB_FLAG_SR_SAME;
2035     } else {
2036         tcg_gen_mov_i64(cpu_sr[rs], tmp);
2037     }
2038 
2039     return nullify_end(ctx);
2040 }
2041 
2042 static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
2043 {
2044     unsigned ctl = a->t;
2045     TCGv_i64 reg;
2046     TCGv_i64 tmp;
2047 
2048     if (ctl == CR_SAR) {
2049         reg = load_gpr(ctx, a->r);
2050         tmp = tcg_temp_new_i64();
2051         tcg_gen_andi_i64(tmp, reg, ctx->is_pa20 ? 63 : 31);
2052         save_or_nullify(ctx, cpu_sar, tmp);
2053 
2054         cond_free(&ctx->null_cond);
2055         return true;
2056     }
2057 
2058     /* All other control registers are privileged or read-only.  */
2059     CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2060 
2061 #ifndef CONFIG_USER_ONLY
2062     nullify_over(ctx);
2063     reg = load_gpr(ctx, a->r);
2064 
2065     switch (ctl) {
2066     case CR_IT:
2067         gen_helper_write_interval_timer(tcg_env, reg);
2068         break;
2069     case CR_EIRR:
2070         gen_helper_write_eirr(tcg_env, reg);
2071         break;
2072     case CR_EIEM:
2073         gen_helper_write_eiem(tcg_env, reg);
2074         ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2075         break;
2076 
2077     case CR_IIASQ:
2078     case CR_IIAOQ:
2079         /* FIXME: Respect PSW_Q bit */
2080         /* The write advances the queue and stores to the back element.  */
2081         tmp = tcg_temp_new_i64();
2082         tcg_gen_ld_i64(tmp, tcg_env,
2083                        offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2084         tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2085         tcg_gen_st_i64(reg, tcg_env,
2086                        offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2087         break;
2088 
2089     case CR_PID1:
2090     case CR_PID2:
2091     case CR_PID3:
2092     case CR_PID4:
2093         tcg_gen_st_i64(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2094 #ifndef CONFIG_USER_ONLY
2095         gen_helper_change_prot_id(tcg_env);
2096 #endif
2097         break;
2098 
2099     default:
2100         tcg_gen_st_i64(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2101         break;
2102     }
2103     return nullify_end(ctx);
2104 #endif
2105 }
2106 
2107 static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a)
2108 {
2109     TCGv_i64 tmp = tcg_temp_new_i64();
2110 
2111     tcg_gen_not_i64(tmp, load_gpr(ctx, a->r));
2112     tcg_gen_andi_i64(tmp, tmp, ctx->is_pa20 ? 63 : 31);
2113     save_or_nullify(ctx, cpu_sar, tmp);
2114 
2115     cond_free(&ctx->null_cond);
2116     return true;
2117 }
2118 
2119 static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a)
2120 {
2121     TCGv_i64 dest = dest_gpr(ctx, a->t);
2122 
2123 #ifdef CONFIG_USER_ONLY
2124     /* We don't implement space registers in user mode. */
2125     tcg_gen_movi_i64(dest, 0);
2126 #else
2127     tcg_gen_mov_i64(dest, space_select(ctx, a->sp, load_gpr(ctx, a->b)));
2128     tcg_gen_shri_i64(dest, dest, 32);
2129 #endif
2130     save_gpr(ctx, a->t, dest);
2131 
2132     cond_free(&ctx->null_cond);
2133     return true;
2134 }
2135 
2136 static bool trans_rsm(DisasContext *ctx, arg_rsm *a)
2137 {
2138     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2139 #ifndef CONFIG_USER_ONLY
2140     TCGv_i64 tmp;
2141 
2142     nullify_over(ctx);
2143 
2144     tmp = tcg_temp_new_i64();
2145     tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2146     tcg_gen_andi_i64(tmp, tmp, ~a->i);
2147     gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2148     save_gpr(ctx, a->t, tmp);
2149 
2150     /* Exit the TB to recognize new interrupts, e.g. PSW_M.  */
2151     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2152     return nullify_end(ctx);
2153 #endif
2154 }
2155 
2156 static bool trans_ssm(DisasContext *ctx, arg_ssm *a)
2157 {
2158     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2159 #ifndef CONFIG_USER_ONLY
2160     TCGv_i64 tmp;
2161 
2162     nullify_over(ctx);
2163 
2164     tmp = tcg_temp_new_i64();
2165     tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2166     tcg_gen_ori_i64(tmp, tmp, a->i);
2167     gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2168     save_gpr(ctx, a->t, tmp);
2169 
2170     /* Exit the TB to recognize new interrupts, e.g. PSW_I.  */
2171     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2172     return nullify_end(ctx);
2173 #endif
2174 }
2175 
2176 static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a)
2177 {
2178     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2179 #ifndef CONFIG_USER_ONLY
2180     TCGv_i64 tmp, reg;
2181     nullify_over(ctx);
2182 
2183     reg = load_gpr(ctx, a->r);
2184     tmp = tcg_temp_new_i64();
2185     gen_helper_swap_system_mask(tmp, tcg_env, reg);
2186 
2187     /* Exit the TB to recognize new interrupts.  */
2188     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2189     return nullify_end(ctx);
2190 #endif
2191 }
2192 
2193 static bool do_rfi(DisasContext *ctx, bool rfi_r)
2194 {
2195     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2196 #ifndef CONFIG_USER_ONLY
2197     nullify_over(ctx);
2198 
2199     if (rfi_r) {
2200         gen_helper_rfi_r(tcg_env);
2201     } else {
2202         gen_helper_rfi(tcg_env);
2203     }
2204     /* Exit the TB to recognize new interrupts.  */
2205     tcg_gen_exit_tb(NULL, 0);
2206     ctx->base.is_jmp = DISAS_NORETURN;
2207 
2208     return nullify_end(ctx);
2209 #endif
2210 }
2211 
2212 static bool trans_rfi(DisasContext *ctx, arg_rfi *a)
2213 {
2214     return do_rfi(ctx, false);
2215 }
2216 
2217 static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a)
2218 {
2219     return do_rfi(ctx, true);
2220 }
2221 
2222 static bool trans_halt(DisasContext *ctx, arg_halt *a)
2223 {
2224     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2225 #ifndef CONFIG_USER_ONLY
2226     nullify_over(ctx);
2227     gen_helper_halt(tcg_env);
2228     ctx->base.is_jmp = DISAS_NORETURN;
2229     return nullify_end(ctx);
2230 #endif
2231 }
2232 
2233 static bool trans_reset(DisasContext *ctx, arg_reset *a)
2234 {
2235     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2236 #ifndef CONFIG_USER_ONLY
2237     nullify_over(ctx);
2238     gen_helper_reset(tcg_env);
2239     ctx->base.is_jmp = DISAS_NORETURN;
2240     return nullify_end(ctx);
2241 #endif
2242 }
2243 
2244 static bool trans_getshadowregs(DisasContext *ctx, arg_getshadowregs *a)
2245 {
2246     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2247 #ifndef CONFIG_USER_ONLY
2248     nullify_over(ctx);
2249     gen_helper_getshadowregs(tcg_env);
2250     return nullify_end(ctx);
2251 #endif
2252 }
2253 
2254 static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a)
2255 {
2256     if (a->m) {
2257         TCGv_i64 dest = dest_gpr(ctx, a->b);
2258         TCGv_i64 src1 = load_gpr(ctx, a->b);
2259         TCGv_i64 src2 = load_gpr(ctx, a->x);
2260 
2261         /* The only thing we need to do is the base register modification.  */
2262         tcg_gen_add_i64(dest, src1, src2);
2263         save_gpr(ctx, a->b, dest);
2264     }
2265     cond_free(&ctx->null_cond);
2266     return true;
2267 }
2268 
2269 static bool trans_probe(DisasContext *ctx, arg_probe *a)
2270 {
2271     TCGv_i64 dest, ofs;
2272     TCGv_i32 level, want;
2273     TCGv_i64 addr;
2274 
2275     nullify_over(ctx);
2276 
2277     dest = dest_gpr(ctx, a->t);
2278     form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2279 
2280     if (a->imm) {
2281         level = tcg_constant_i32(a->ri);
2282     } else {
2283         level = tcg_temp_new_i32();
2284         tcg_gen_extrl_i64_i32(level, load_gpr(ctx, a->ri));
2285         tcg_gen_andi_i32(level, level, 3);
2286     }
2287     want = tcg_constant_i32(a->write ? PAGE_WRITE : PAGE_READ);
2288 
2289     gen_helper_probe(dest, tcg_env, addr, level, want);
2290 
2291     save_gpr(ctx, a->t, dest);
2292     return nullify_end(ctx);
2293 }
2294 
2295 static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a)
2296 {
2297     if (ctx->is_pa20) {
2298         return false;
2299     }
2300     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2301 #ifndef CONFIG_USER_ONLY
2302     TCGv_i64 addr;
2303     TCGv_i64 ofs, reg;
2304 
2305     nullify_over(ctx);
2306 
2307     form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2308     reg = load_gpr(ctx, a->r);
2309     if (a->addr) {
2310         gen_helper_itlba_pa11(tcg_env, addr, reg);
2311     } else {
2312         gen_helper_itlbp_pa11(tcg_env, addr, reg);
2313     }
2314 
2315     /* Exit TB for TLB change if mmu is enabled.  */
2316     if (ctx->tb_flags & PSW_C) {
2317         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2318     }
2319     return nullify_end(ctx);
2320 #endif
2321 }
2322 
2323 static bool trans_pxtlbx(DisasContext *ctx, arg_pxtlbx *a)
2324 {
2325     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2326 #ifndef CONFIG_USER_ONLY
2327     TCGv_i64 addr;
2328     TCGv_i64 ofs;
2329 
2330     nullify_over(ctx);
2331 
2332     form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2333     if (a->m) {
2334         save_gpr(ctx, a->b, ofs);
2335     }
2336     if (a->local) {
2337         gen_helper_ptlbe(tcg_env);
2338     } else {
2339         gen_helper_ptlb(tcg_env, addr);
2340     }
2341 
2342     /* Exit TB for TLB change if mmu is enabled.  */
2343     if (ctx->tb_flags & PSW_C) {
2344         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2345     }
2346     return nullify_end(ctx);
2347 #endif
2348 }
2349 
2350 /*
2351  * Implement the pcxl and pcxl2 Fast TLB Insert instructions.
2352  * See
2353  *     https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf
2354  *     page 13-9 (195/206)
2355  */
2356 static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a)
2357 {
2358     if (ctx->is_pa20) {
2359         return false;
2360     }
2361     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2362 #ifndef CONFIG_USER_ONLY
2363     TCGv_i64 addr, atl, stl;
2364     TCGv_i64 reg;
2365 
2366     nullify_over(ctx);
2367 
2368     /*
2369      * FIXME:
2370      *  if (not (pcxl or pcxl2))
2371      *    return gen_illegal(ctx);
2372      */
2373 
2374     atl = tcg_temp_new_i64();
2375     stl = tcg_temp_new_i64();
2376     addr = tcg_temp_new_i64();
2377 
2378     tcg_gen_ld32u_i64(stl, tcg_env,
2379                       a->data ? offsetof(CPUHPPAState, cr[CR_ISR])
2380                       : offsetof(CPUHPPAState, cr[CR_IIASQ]));
2381     tcg_gen_ld32u_i64(atl, tcg_env,
2382                       a->data ? offsetof(CPUHPPAState, cr[CR_IOR])
2383                       : offsetof(CPUHPPAState, cr[CR_IIAOQ]));
2384     tcg_gen_shli_i64(stl, stl, 32);
2385     tcg_gen_or_i64(addr, atl, stl);
2386 
2387     reg = load_gpr(ctx, a->r);
2388     if (a->addr) {
2389         gen_helper_itlba_pa11(tcg_env, addr, reg);
2390     } else {
2391         gen_helper_itlbp_pa11(tcg_env, addr, reg);
2392     }
2393 
2394     /* Exit TB for TLB change if mmu is enabled.  */
2395     if (ctx->tb_flags & PSW_C) {
2396         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2397     }
2398     return nullify_end(ctx);
2399 #endif
2400 }
2401 
2402 static bool trans_ixtlbt(DisasContext *ctx, arg_ixtlbt *a)
2403 {
2404     if (!ctx->is_pa20) {
2405         return false;
2406     }
2407     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2408 #ifndef CONFIG_USER_ONLY
2409     nullify_over(ctx);
2410     {
2411         TCGv_i64 src1 = load_gpr(ctx, a->r1);
2412         TCGv_i64 src2 = load_gpr(ctx, a->r2);
2413 
2414         if (a->data) {
2415             gen_helper_idtlbt_pa20(tcg_env, src1, src2);
2416         } else {
2417             gen_helper_iitlbt_pa20(tcg_env, src1, src2);
2418         }
2419     }
2420     /* Exit TB for TLB change if mmu is enabled.  */
2421     if (ctx->tb_flags & PSW_C) {
2422         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2423     }
2424     return nullify_end(ctx);
2425 #endif
2426 }
2427 
2428 static bool trans_lpa(DisasContext *ctx, arg_ldst *a)
2429 {
2430     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2431 #ifndef CONFIG_USER_ONLY
2432     TCGv_i64 vaddr;
2433     TCGv_i64 ofs, paddr;
2434 
2435     nullify_over(ctx);
2436 
2437     form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2438 
2439     paddr = tcg_temp_new_i64();
2440     gen_helper_lpa(paddr, tcg_env, vaddr);
2441 
2442     /* Note that physical address result overrides base modification.  */
2443     if (a->m) {
2444         save_gpr(ctx, a->b, ofs);
2445     }
2446     save_gpr(ctx, a->t, paddr);
2447 
2448     return nullify_end(ctx);
2449 #endif
2450 }
2451 
2452 static bool trans_lci(DisasContext *ctx, arg_lci *a)
2453 {
2454     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2455 
2456     /* The Coherence Index is an implementation-defined function of the
2457        physical address.  Two addresses with the same CI have a coherent
2458        view of the cache.  Our implementation is to return 0 for all,
2459        since the entire address space is coherent.  */
2460     save_gpr(ctx, a->t, ctx->zero);
2461 
2462     cond_free(&ctx->null_cond);
2463     return true;
2464 }
2465 
2466 static bool trans_add(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2467 {
2468     return do_add_reg(ctx, a, false, false, false, false);
2469 }
2470 
2471 static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2472 {
2473     return do_add_reg(ctx, a, true, false, false, false);
2474 }
2475 
2476 static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2477 {
2478     return do_add_reg(ctx, a, false, true, false, false);
2479 }
2480 
2481 static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2482 {
2483     return do_add_reg(ctx, a, false, false, false, true);
2484 }
2485 
2486 static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2487 {
2488     return do_add_reg(ctx, a, false, true, false, true);
2489 }
2490 
2491 static bool trans_sub(DisasContext *ctx, arg_rrr_cf_d *a)
2492 {
2493     return do_sub_reg(ctx, a, false, false, false);
2494 }
2495 
2496 static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf_d *a)
2497 {
2498     return do_sub_reg(ctx, a, true, false, false);
2499 }
2500 
2501 static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2502 {
2503     return do_sub_reg(ctx, a, false, false, true);
2504 }
2505 
2506 static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2507 {
2508     return do_sub_reg(ctx, a, true, false, true);
2509 }
2510 
2511 static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf_d *a)
2512 {
2513     return do_sub_reg(ctx, a, false, true, false);
2514 }
2515 
2516 static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf_d *a)
2517 {
2518     return do_sub_reg(ctx, a, true, true, false);
2519 }
2520 
2521 static bool trans_andcm(DisasContext *ctx, arg_rrr_cf_d *a)
2522 {
2523     return do_log_reg(ctx, a, tcg_gen_andc_i64);
2524 }
2525 
2526 static bool trans_and(DisasContext *ctx, arg_rrr_cf_d *a)
2527 {
2528     return do_log_reg(ctx, a, tcg_gen_and_i64);
2529 }
2530 
2531 static bool trans_or(DisasContext *ctx, arg_rrr_cf_d *a)
2532 {
2533     if (a->cf == 0) {
2534         unsigned r2 = a->r2;
2535         unsigned r1 = a->r1;
2536         unsigned rt = a->t;
2537 
2538         if (rt == 0) { /* NOP */
2539             cond_free(&ctx->null_cond);
2540             return true;
2541         }
2542         if (r2 == 0) { /* COPY */
2543             if (r1 == 0) {
2544                 TCGv_i64 dest = dest_gpr(ctx, rt);
2545                 tcg_gen_movi_i64(dest, 0);
2546                 save_gpr(ctx, rt, dest);
2547             } else {
2548                 save_gpr(ctx, rt, cpu_gr[r1]);
2549             }
2550             cond_free(&ctx->null_cond);
2551             return true;
2552         }
2553 #ifndef CONFIG_USER_ONLY
2554         /* These are QEMU extensions and are nops in the real architecture:
2555          *
2556          * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2557          * or %r31,%r31,%r31 -- death loop; offline cpu
2558          *                      currently implemented as idle.
2559          */
2560         if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */
2561             /* No need to check for supervisor, as userland can only pause
2562                until the next timer interrupt.  */
2563             nullify_over(ctx);
2564 
2565             /* Advance the instruction queue.  */
2566             copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
2567             copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
2568             nullify_set(ctx, 0);
2569 
2570             /* Tell the qemu main loop to halt until this cpu has work.  */
2571             tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
2572                            offsetof(CPUState, halted) - offsetof(HPPACPU, env));
2573             gen_excp_1(EXCP_HALTED);
2574             ctx->base.is_jmp = DISAS_NORETURN;
2575 
2576             return nullify_end(ctx);
2577         }
2578 #endif
2579     }
2580     return do_log_reg(ctx, a, tcg_gen_or_i64);
2581 }
2582 
2583 static bool trans_xor(DisasContext *ctx, arg_rrr_cf_d *a)
2584 {
2585     return do_log_reg(ctx, a, tcg_gen_xor_i64);
2586 }
2587 
2588 static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf_d *a)
2589 {
2590     TCGv_i64 tcg_r1, tcg_r2;
2591 
2592     if (a->cf) {
2593         nullify_over(ctx);
2594     }
2595     tcg_r1 = load_gpr(ctx, a->r1);
2596     tcg_r2 = load_gpr(ctx, a->r2);
2597     do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d);
2598     return nullify_end(ctx);
2599 }
2600 
2601 static bool trans_uxor(DisasContext *ctx, arg_rrr_cf_d *a)
2602 {
2603     TCGv_i64 tcg_r1, tcg_r2;
2604 
2605     if (a->cf) {
2606         nullify_over(ctx);
2607     }
2608     tcg_r1 = load_gpr(ctx, a->r1);
2609     tcg_r2 = load_gpr(ctx, a->r2);
2610     do_unit(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d, false, tcg_gen_xor_i64);
2611     return nullify_end(ctx);
2612 }
2613 
2614 static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a, bool is_tc)
2615 {
2616     TCGv_i64 tcg_r1, tcg_r2, tmp;
2617 
2618     if (a->cf) {
2619         nullify_over(ctx);
2620     }
2621     tcg_r1 = load_gpr(ctx, a->r1);
2622     tcg_r2 = load_gpr(ctx, a->r2);
2623     tmp = tcg_temp_new_i64();
2624     tcg_gen_not_i64(tmp, tcg_r2);
2625     do_unit(ctx, a->t, tcg_r1, tmp, a->cf, a->d, is_tc, tcg_gen_add_i64);
2626     return nullify_end(ctx);
2627 }
2628 
2629 static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a)
2630 {
2631     return do_uaddcm(ctx, a, false);
2632 }
2633 
2634 static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2635 {
2636     return do_uaddcm(ctx, a, true);
2637 }
2638 
2639 static bool do_dcor(DisasContext *ctx, arg_rr_cf_d *a, bool is_i)
2640 {
2641     TCGv_i64 tmp;
2642 
2643     nullify_over(ctx);
2644 
2645     tmp = tcg_temp_new_i64();
2646     tcg_gen_shri_i64(tmp, cpu_psw_cb, 3);
2647     if (!is_i) {
2648         tcg_gen_not_i64(tmp, tmp);
2649     }
2650     tcg_gen_andi_i64(tmp, tmp, (uint64_t)0x1111111111111111ull);
2651     tcg_gen_muli_i64(tmp, tmp, 6);
2652     do_unit(ctx, a->t, load_gpr(ctx, a->r), tmp, a->cf, a->d, false,
2653             is_i ? tcg_gen_add_i64 : tcg_gen_sub_i64);
2654     return nullify_end(ctx);
2655 }
2656 
2657 static bool trans_dcor(DisasContext *ctx, arg_rr_cf_d *a)
2658 {
2659     return do_dcor(ctx, a, false);
2660 }
2661 
2662 static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf_d *a)
2663 {
2664     return do_dcor(ctx, a, true);
2665 }
2666 
2667 static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a)
2668 {
2669     TCGv_i64 dest, add1, add2, addc, in1, in2;
2670     TCGv_i64 cout;
2671 
2672     nullify_over(ctx);
2673 
2674     in1 = load_gpr(ctx, a->r1);
2675     in2 = load_gpr(ctx, a->r2);
2676 
2677     add1 = tcg_temp_new_i64();
2678     add2 = tcg_temp_new_i64();
2679     addc = tcg_temp_new_i64();
2680     dest = tcg_temp_new_i64();
2681 
2682     /* Form R1 << 1 | PSW[CB]{8}.  */
2683     tcg_gen_add_i64(add1, in1, in1);
2684     tcg_gen_add_i64(add1, add1, get_psw_carry(ctx, false));
2685 
2686     /*
2687      * Add or subtract R2, depending on PSW[V].  Proper computation of
2688      * carry requires that we subtract via + ~R2 + 1, as described in
2689      * the manual.  By extracting and masking V, we can produce the
2690      * proper inputs to the addition without movcond.
2691      */
2692     tcg_gen_sextract_i64(addc, cpu_psw_v, 31, 1);
2693     tcg_gen_xor_i64(add2, in2, addc);
2694     tcg_gen_andi_i64(addc, addc, 1);
2695 
2696     tcg_gen_add2_i64(dest, cpu_psw_cb_msb, add1, ctx->zero, add2, ctx->zero);
2697     tcg_gen_add2_i64(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb,
2698                      addc, ctx->zero);
2699 
2700     /* Write back the result register.  */
2701     save_gpr(ctx, a->t, dest);
2702 
2703     /* Write back PSW[CB].  */
2704     tcg_gen_xor_i64(cpu_psw_cb, add1, add2);
2705     tcg_gen_xor_i64(cpu_psw_cb, cpu_psw_cb, dest);
2706 
2707     /* Write back PSW[V] for the division step.  */
2708     cout = get_psw_carry(ctx, false);
2709     tcg_gen_neg_i64(cpu_psw_v, cout);
2710     tcg_gen_xor_i64(cpu_psw_v, cpu_psw_v, in2);
2711 
2712     /* Install the new nullification.  */
2713     if (a->cf) {
2714         TCGv_i64 sv = NULL;
2715         if (cond_need_sv(a->cf >> 1)) {
2716             /* ??? The lshift is supposed to contribute to overflow.  */
2717             sv = do_add_sv(ctx, dest, add1, add2);
2718         }
2719         ctx->null_cond = do_cond(ctx, a->cf, false, dest, cout, sv);
2720     }
2721 
2722     return nullify_end(ctx);
2723 }
2724 
2725 static bool trans_addi(DisasContext *ctx, arg_rri_cf *a)
2726 {
2727     return do_add_imm(ctx, a, false, false);
2728 }
2729 
2730 static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a)
2731 {
2732     return do_add_imm(ctx, a, true, false);
2733 }
2734 
2735 static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a)
2736 {
2737     return do_add_imm(ctx, a, false, true);
2738 }
2739 
2740 static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a)
2741 {
2742     return do_add_imm(ctx, a, true, true);
2743 }
2744 
2745 static bool trans_subi(DisasContext *ctx, arg_rri_cf *a)
2746 {
2747     return do_sub_imm(ctx, a, false);
2748 }
2749 
2750 static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a)
2751 {
2752     return do_sub_imm(ctx, a, true);
2753 }
2754 
2755 static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf_d *a)
2756 {
2757     TCGv_i64 tcg_im, tcg_r2;
2758 
2759     if (a->cf) {
2760         nullify_over(ctx);
2761     }
2762 
2763     tcg_im = tcg_constant_i64(a->i);
2764     tcg_r2 = load_gpr(ctx, a->r);
2765     do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf, a->d);
2766 
2767     return nullify_end(ctx);
2768 }
2769 
2770 static bool do_multimedia(DisasContext *ctx, arg_rrr *a,
2771                           void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
2772 {
2773     TCGv_i64 r1, r2, dest;
2774 
2775     if (!ctx->is_pa20) {
2776         return false;
2777     }
2778 
2779     nullify_over(ctx);
2780 
2781     r1 = load_gpr(ctx, a->r1);
2782     r2 = load_gpr(ctx, a->r2);
2783     dest = dest_gpr(ctx, a->t);
2784 
2785     fn(dest, r1, r2);
2786     save_gpr(ctx, a->t, dest);
2787 
2788     return nullify_end(ctx);
2789 }
2790 
2791 static bool do_multimedia_sh(DisasContext *ctx, arg_rri *a,
2792                              void (*fn)(TCGv_i64, TCGv_i64, int64_t))
2793 {
2794     TCGv_i64 r, dest;
2795 
2796     if (!ctx->is_pa20) {
2797         return false;
2798     }
2799 
2800     nullify_over(ctx);
2801 
2802     r = load_gpr(ctx, a->r);
2803     dest = dest_gpr(ctx, a->t);
2804 
2805     fn(dest, r, a->i);
2806     save_gpr(ctx, a->t, dest);
2807 
2808     return nullify_end(ctx);
2809 }
2810 
2811 static bool do_multimedia_shadd(DisasContext *ctx, arg_rrr_sh *a,
2812                                 void (*fn)(TCGv_i64, TCGv_i64,
2813                                            TCGv_i64, TCGv_i32))
2814 {
2815     TCGv_i64 r1, r2, dest;
2816 
2817     if (!ctx->is_pa20) {
2818         return false;
2819     }
2820 
2821     nullify_over(ctx);
2822 
2823     r1 = load_gpr(ctx, a->r1);
2824     r2 = load_gpr(ctx, a->r2);
2825     dest = dest_gpr(ctx, a->t);
2826 
2827     fn(dest, r1, r2, tcg_constant_i32(a->sh));
2828     save_gpr(ctx, a->t, dest);
2829 
2830     return nullify_end(ctx);
2831 }
2832 
2833 static bool trans_hadd(DisasContext *ctx, arg_rrr *a)
2834 {
2835     return do_multimedia(ctx, a, tcg_gen_vec_add16_i64);
2836 }
2837 
2838 static bool trans_hadd_ss(DisasContext *ctx, arg_rrr *a)
2839 {
2840     return do_multimedia(ctx, a, gen_helper_hadd_ss);
2841 }
2842 
2843 static bool trans_hadd_us(DisasContext *ctx, arg_rrr *a)
2844 {
2845     return do_multimedia(ctx, a, gen_helper_hadd_us);
2846 }
2847 
2848 static bool trans_havg(DisasContext *ctx, arg_rrr *a)
2849 {
2850     return do_multimedia(ctx, a, gen_helper_havg);
2851 }
2852 
2853 static bool trans_hshl(DisasContext *ctx, arg_rri *a)
2854 {
2855     return do_multimedia_sh(ctx, a, tcg_gen_vec_shl16i_i64);
2856 }
2857 
2858 static bool trans_hshr_s(DisasContext *ctx, arg_rri *a)
2859 {
2860     return do_multimedia_sh(ctx, a, tcg_gen_vec_sar16i_i64);
2861 }
2862 
2863 static bool trans_hshr_u(DisasContext *ctx, arg_rri *a)
2864 {
2865     return do_multimedia_sh(ctx, a, tcg_gen_vec_shr16i_i64);
2866 }
2867 
2868 static bool trans_hshladd(DisasContext *ctx, arg_rrr_sh *a)
2869 {
2870     return do_multimedia_shadd(ctx, a, gen_helper_hshladd);
2871 }
2872 
2873 static bool trans_hshradd(DisasContext *ctx, arg_rrr_sh *a)
2874 {
2875     return do_multimedia_shadd(ctx, a, gen_helper_hshradd);
2876 }
2877 
2878 static bool trans_hsub(DisasContext *ctx, arg_rrr *a)
2879 {
2880     return do_multimedia(ctx, a, tcg_gen_vec_sub16_i64);
2881 }
2882 
2883 static bool trans_hsub_ss(DisasContext *ctx, arg_rrr *a)
2884 {
2885     return do_multimedia(ctx, a, gen_helper_hsub_ss);
2886 }
2887 
2888 static bool trans_hsub_us(DisasContext *ctx, arg_rrr *a)
2889 {
2890     return do_multimedia(ctx, a, gen_helper_hsub_us);
2891 }
2892 
2893 static void gen_mixh_l(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
2894 {
2895     uint64_t mask = 0xffff0000ffff0000ull;
2896     TCGv_i64 tmp = tcg_temp_new_i64();
2897 
2898     tcg_gen_andi_i64(tmp, r2, mask);
2899     tcg_gen_andi_i64(dst, r1, mask);
2900     tcg_gen_shri_i64(tmp, tmp, 16);
2901     tcg_gen_or_i64(dst, dst, tmp);
2902 }
2903 
2904 static bool trans_mixh_l(DisasContext *ctx, arg_rrr *a)
2905 {
2906     return do_multimedia(ctx, a, gen_mixh_l);
2907 }
2908 
2909 static void gen_mixh_r(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
2910 {
2911     uint64_t mask = 0x0000ffff0000ffffull;
2912     TCGv_i64 tmp = tcg_temp_new_i64();
2913 
2914     tcg_gen_andi_i64(tmp, r1, mask);
2915     tcg_gen_andi_i64(dst, r2, mask);
2916     tcg_gen_shli_i64(tmp, tmp, 16);
2917     tcg_gen_or_i64(dst, dst, tmp);
2918 }
2919 
2920 static bool trans_mixh_r(DisasContext *ctx, arg_rrr *a)
2921 {
2922     return do_multimedia(ctx, a, gen_mixh_r);
2923 }
2924 
2925 static void gen_mixw_l(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
2926 {
2927     TCGv_i64 tmp = tcg_temp_new_i64();
2928 
2929     tcg_gen_shri_i64(tmp, r2, 32);
2930     tcg_gen_deposit_i64(dst, r1, tmp, 0, 32);
2931 }
2932 
2933 static bool trans_mixw_l(DisasContext *ctx, arg_rrr *a)
2934 {
2935     return do_multimedia(ctx, a, gen_mixw_l);
2936 }
2937 
2938 static void gen_mixw_r(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
2939 {
2940     tcg_gen_deposit_i64(dst, r2, r1, 32, 32);
2941 }
2942 
2943 static bool trans_mixw_r(DisasContext *ctx, arg_rrr *a)
2944 {
2945     return do_multimedia(ctx, a, gen_mixw_r);
2946 }
2947 
2948 static bool trans_permh(DisasContext *ctx, arg_permh *a)
2949 {
2950     TCGv_i64 r, t0, t1, t2, t3;
2951 
2952     if (!ctx->is_pa20) {
2953         return false;
2954     }
2955 
2956     nullify_over(ctx);
2957 
2958     r = load_gpr(ctx, a->r1);
2959     t0 = tcg_temp_new_i64();
2960     t1 = tcg_temp_new_i64();
2961     t2 = tcg_temp_new_i64();
2962     t3 = tcg_temp_new_i64();
2963 
2964     tcg_gen_extract_i64(t0, r, (3 - a->c0) * 16, 16);
2965     tcg_gen_extract_i64(t1, r, (3 - a->c1) * 16, 16);
2966     tcg_gen_extract_i64(t2, r, (3 - a->c2) * 16, 16);
2967     tcg_gen_extract_i64(t3, r, (3 - a->c3) * 16, 16);
2968 
2969     tcg_gen_deposit_i64(t0, t1, t0, 16, 48);
2970     tcg_gen_deposit_i64(t2, t3, t2, 16, 48);
2971     tcg_gen_deposit_i64(t0, t2, t0, 32, 32);
2972 
2973     save_gpr(ctx, a->t, t0);
2974     return nullify_end(ctx);
2975 }
2976 
2977 static bool trans_ld(DisasContext *ctx, arg_ldst *a)
2978 {
2979     if (!ctx->is_pa20 && a->size > MO_32) {
2980         return gen_illegal(ctx);
2981     }
2982     return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0,
2983                    a->disp, a->sp, a->m, a->size | MO_TE);
2984 }
2985 
2986 static bool trans_st(DisasContext *ctx, arg_ldst *a)
2987 {
2988     assert(a->x == 0 && a->scale == 0);
2989     if (!ctx->is_pa20 && a->size > MO_32) {
2990         return gen_illegal(ctx);
2991     }
2992     return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE);
2993 }
2994 
2995 static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
2996 {
2997     MemOp mop = MO_TE | MO_ALIGN | a->size;
2998     TCGv_i64 dest, ofs;
2999     TCGv_i64 addr;
3000 
3001     if (!ctx->is_pa20 && a->size > MO_32) {
3002         return gen_illegal(ctx);
3003     }
3004 
3005     nullify_over(ctx);
3006 
3007     if (a->m) {
3008         /* Base register modification.  Make sure if RT == RB,
3009            we see the result of the load.  */
3010         dest = tcg_temp_new_i64();
3011     } else {
3012         dest = dest_gpr(ctx, a->t);
3013     }
3014 
3015     form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? a->size : 0,
3016              a->disp, a->sp, a->m, ctx->mmu_idx == MMU_PHYS_IDX);
3017 
3018     /*
3019      * For hppa1.1, LDCW is undefined unless aligned mod 16.
3020      * However actual hardware succeeds with aligned mod 4.
3021      * Detect this case and log a GUEST_ERROR.
3022      *
3023      * TODO: HPPA64 relaxes the over-alignment requirement
3024      * with the ,co completer.
3025      */
3026     gen_helper_ldc_check(addr);
3027 
3028     tcg_gen_atomic_xchg_i64(dest, addr, ctx->zero, ctx->mmu_idx, mop);
3029 
3030     if (a->m) {
3031         save_gpr(ctx, a->b, ofs);
3032     }
3033     save_gpr(ctx, a->t, dest);
3034 
3035     return nullify_end(ctx);
3036 }
3037 
3038 static bool trans_stby(DisasContext *ctx, arg_stby *a)
3039 {
3040     TCGv_i64 ofs, val;
3041     TCGv_i64 addr;
3042 
3043     nullify_over(ctx);
3044 
3045     form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
3046              ctx->mmu_idx == MMU_PHYS_IDX);
3047     val = load_gpr(ctx, a->r);
3048     if (a->a) {
3049         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3050             gen_helper_stby_e_parallel(tcg_env, addr, val);
3051         } else {
3052             gen_helper_stby_e(tcg_env, addr, val);
3053         }
3054     } else {
3055         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3056             gen_helper_stby_b_parallel(tcg_env, addr, val);
3057         } else {
3058             gen_helper_stby_b(tcg_env, addr, val);
3059         }
3060     }
3061     if (a->m) {
3062         tcg_gen_andi_i64(ofs, ofs, ~3);
3063         save_gpr(ctx, a->b, ofs);
3064     }
3065 
3066     return nullify_end(ctx);
3067 }
3068 
3069 static bool trans_stdby(DisasContext *ctx, arg_stby *a)
3070 {
3071     TCGv_i64 ofs, val;
3072     TCGv_i64 addr;
3073 
3074     if (!ctx->is_pa20) {
3075         return false;
3076     }
3077     nullify_over(ctx);
3078 
3079     form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
3080              ctx->mmu_idx == MMU_PHYS_IDX);
3081     val = load_gpr(ctx, a->r);
3082     if (a->a) {
3083         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3084             gen_helper_stdby_e_parallel(tcg_env, addr, val);
3085         } else {
3086             gen_helper_stdby_e(tcg_env, addr, val);
3087         }
3088     } else {
3089         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3090             gen_helper_stdby_b_parallel(tcg_env, addr, val);
3091         } else {
3092             gen_helper_stdby_b(tcg_env, addr, val);
3093         }
3094     }
3095     if (a->m) {
3096         tcg_gen_andi_i64(ofs, ofs, ~7);
3097         save_gpr(ctx, a->b, ofs);
3098     }
3099 
3100     return nullify_end(ctx);
3101 }
3102 
3103 static bool trans_lda(DisasContext *ctx, arg_ldst *a)
3104 {
3105     int hold_mmu_idx = ctx->mmu_idx;
3106 
3107     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3108     ctx->mmu_idx = MMU_PHYS_IDX;
3109     trans_ld(ctx, a);
3110     ctx->mmu_idx = hold_mmu_idx;
3111     return true;
3112 }
3113 
3114 static bool trans_sta(DisasContext *ctx, arg_ldst *a)
3115 {
3116     int hold_mmu_idx = ctx->mmu_idx;
3117 
3118     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3119     ctx->mmu_idx = MMU_PHYS_IDX;
3120     trans_st(ctx, a);
3121     ctx->mmu_idx = hold_mmu_idx;
3122     return true;
3123 }
3124 
3125 static bool trans_ldil(DisasContext *ctx, arg_ldil *a)
3126 {
3127     TCGv_i64 tcg_rt = dest_gpr(ctx, a->t);
3128 
3129     tcg_gen_movi_i64(tcg_rt, a->i);
3130     save_gpr(ctx, a->t, tcg_rt);
3131     cond_free(&ctx->null_cond);
3132     return true;
3133 }
3134 
3135 static bool trans_addil(DisasContext *ctx, arg_addil *a)
3136 {
3137     TCGv_i64 tcg_rt = load_gpr(ctx, a->r);
3138     TCGv_i64 tcg_r1 = dest_gpr(ctx, 1);
3139 
3140     tcg_gen_addi_i64(tcg_r1, tcg_rt, a->i);
3141     save_gpr(ctx, 1, tcg_r1);
3142     cond_free(&ctx->null_cond);
3143     return true;
3144 }
3145 
3146 static bool trans_ldo(DisasContext *ctx, arg_ldo *a)
3147 {
3148     TCGv_i64 tcg_rt = dest_gpr(ctx, a->t);
3149 
3150     /* Special case rb == 0, for the LDI pseudo-op.
3151        The COPY pseudo-op is handled for free within tcg_gen_addi_i64.  */
3152     if (a->b == 0) {
3153         tcg_gen_movi_i64(tcg_rt, a->i);
3154     } else {
3155         tcg_gen_addi_i64(tcg_rt, cpu_gr[a->b], a->i);
3156     }
3157     save_gpr(ctx, a->t, tcg_rt);
3158     cond_free(&ctx->null_cond);
3159     return true;
3160 }
3161 
3162 static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_i64 in1,
3163                     unsigned c, unsigned f, bool d, unsigned n, int disp)
3164 {
3165     TCGv_i64 dest, in2, sv;
3166     DisasCond cond;
3167 
3168     in2 = load_gpr(ctx, r);
3169     dest = tcg_temp_new_i64();
3170 
3171     tcg_gen_sub_i64(dest, in1, in2);
3172 
3173     sv = NULL;
3174     if (cond_need_sv(c)) {
3175         sv = do_sub_sv(ctx, dest, in1, in2);
3176     }
3177 
3178     cond = do_sub_cond(ctx, c * 2 + f, d, dest, in1, in2, sv);
3179     return do_cbranch(ctx, disp, n, &cond);
3180 }
3181 
3182 static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a)
3183 {
3184     if (!ctx->is_pa20 && a->d) {
3185         return false;
3186     }
3187     nullify_over(ctx);
3188     return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1),
3189                    a->c, a->f, a->d, a->n, a->disp);
3190 }
3191 
3192 static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a)
3193 {
3194     if (!ctx->is_pa20 && a->d) {
3195         return false;
3196     }
3197     nullify_over(ctx);
3198     return do_cmpb(ctx, a->r, tcg_constant_i64(a->i),
3199                    a->c, a->f, a->d, a->n, a->disp);
3200 }
3201 
3202 static bool do_addb(DisasContext *ctx, unsigned r, TCGv_i64 in1,
3203                     unsigned c, unsigned f, unsigned n, int disp)
3204 {
3205     TCGv_i64 dest, in2, sv, cb_cond;
3206     DisasCond cond;
3207     bool d = false;
3208 
3209     /*
3210      * For hppa64, the ADDB conditions change with PSW.W,
3211      * dropping ZNV, SV, OD in favor of double-word EQ, LT, LE.
3212      */
3213     if (ctx->tb_flags & PSW_W) {
3214         d = c >= 5;
3215         if (d) {
3216             c &= 3;
3217         }
3218     }
3219 
3220     in2 = load_gpr(ctx, r);
3221     dest = tcg_temp_new_i64();
3222     sv = NULL;
3223     cb_cond = NULL;
3224 
3225     if (cond_need_cb(c)) {
3226         TCGv_i64 cb = tcg_temp_new_i64();
3227         TCGv_i64 cb_msb = tcg_temp_new_i64();
3228 
3229         tcg_gen_movi_i64(cb_msb, 0);
3230         tcg_gen_add2_i64(dest, cb_msb, in1, cb_msb, in2, cb_msb);
3231         tcg_gen_xor_i64(cb, in1, in2);
3232         tcg_gen_xor_i64(cb, cb, dest);
3233         cb_cond = get_carry(ctx, d, cb, cb_msb);
3234     } else {
3235         tcg_gen_add_i64(dest, in1, in2);
3236     }
3237     if (cond_need_sv(c)) {
3238         sv = do_add_sv(ctx, dest, in1, in2);
3239     }
3240 
3241     cond = do_cond(ctx, c * 2 + f, d, dest, cb_cond, sv);
3242     save_gpr(ctx, r, dest);
3243     return do_cbranch(ctx, disp, n, &cond);
3244 }
3245 
3246 static bool trans_addb(DisasContext *ctx, arg_addb *a)
3247 {
3248     nullify_over(ctx);
3249     return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3250 }
3251 
3252 static bool trans_addbi(DisasContext *ctx, arg_addbi *a)
3253 {
3254     nullify_over(ctx);
3255     return do_addb(ctx, a->r, tcg_constant_i64(a->i), a->c, a->f, a->n, a->disp);
3256 }
3257 
3258 static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a)
3259 {
3260     TCGv_i64 tmp, tcg_r;
3261     DisasCond cond;
3262 
3263     nullify_over(ctx);
3264 
3265     tmp = tcg_temp_new_i64();
3266     tcg_r = load_gpr(ctx, a->r);
3267     if (cond_need_ext(ctx, a->d)) {
3268         /* Force shift into [32,63] */
3269         tcg_gen_ori_i64(tmp, cpu_sar, 32);
3270         tcg_gen_shl_i64(tmp, tcg_r, tmp);
3271     } else {
3272         tcg_gen_shl_i64(tmp, tcg_r, cpu_sar);
3273     }
3274 
3275     cond = cond_make_0_tmp(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3276     return do_cbranch(ctx, a->disp, a->n, &cond);
3277 }
3278 
3279 static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a)
3280 {
3281     TCGv_i64 tmp, tcg_r;
3282     DisasCond cond;
3283     int p;
3284 
3285     nullify_over(ctx);
3286 
3287     tmp = tcg_temp_new_i64();
3288     tcg_r = load_gpr(ctx, a->r);
3289     p = a->p | (cond_need_ext(ctx, a->d) ? 32 : 0);
3290     tcg_gen_shli_i64(tmp, tcg_r, p);
3291 
3292     cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3293     return do_cbranch(ctx, a->disp, a->n, &cond);
3294 }
3295 
3296 static bool trans_movb(DisasContext *ctx, arg_movb *a)
3297 {
3298     TCGv_i64 dest;
3299     DisasCond cond;
3300 
3301     nullify_over(ctx);
3302 
3303     dest = dest_gpr(ctx, a->r2);
3304     if (a->r1 == 0) {
3305         tcg_gen_movi_i64(dest, 0);
3306     } else {
3307         tcg_gen_mov_i64(dest, cpu_gr[a->r1]);
3308     }
3309 
3310     /* All MOVB conditions are 32-bit. */
3311     cond = do_sed_cond(ctx, a->c, false, dest);
3312     return do_cbranch(ctx, a->disp, a->n, &cond);
3313 }
3314 
3315 static bool trans_movbi(DisasContext *ctx, arg_movbi *a)
3316 {
3317     TCGv_i64 dest;
3318     DisasCond cond;
3319 
3320     nullify_over(ctx);
3321 
3322     dest = dest_gpr(ctx, a->r);
3323     tcg_gen_movi_i64(dest, a->i);
3324 
3325     /* All MOVBI conditions are 32-bit. */
3326     cond = do_sed_cond(ctx, a->c, false, dest);
3327     return do_cbranch(ctx, a->disp, a->n, &cond);
3328 }
3329 
3330 static bool trans_shrp_sar(DisasContext *ctx, arg_shrp_sar *a)
3331 {
3332     TCGv_i64 dest, src2;
3333 
3334     if (!ctx->is_pa20 && a->d) {
3335         return false;
3336     }
3337     if (a->c) {
3338         nullify_over(ctx);
3339     }
3340 
3341     dest = dest_gpr(ctx, a->t);
3342     src2 = load_gpr(ctx, a->r2);
3343     if (a->r1 == 0) {
3344         if (a->d) {
3345             tcg_gen_shr_i64(dest, src2, cpu_sar);
3346         } else {
3347             TCGv_i64 tmp = tcg_temp_new_i64();
3348 
3349             tcg_gen_ext32u_i64(dest, src2);
3350             tcg_gen_andi_i64(tmp, cpu_sar, 31);
3351             tcg_gen_shr_i64(dest, dest, tmp);
3352         }
3353     } else if (a->r1 == a->r2) {
3354         if (a->d) {
3355             tcg_gen_rotr_i64(dest, src2, cpu_sar);
3356         } else {
3357             TCGv_i32 t32 = tcg_temp_new_i32();
3358             TCGv_i32 s32 = tcg_temp_new_i32();
3359 
3360             tcg_gen_extrl_i64_i32(t32, src2);
3361             tcg_gen_extrl_i64_i32(s32, cpu_sar);
3362             tcg_gen_andi_i32(s32, s32, 31);
3363             tcg_gen_rotr_i32(t32, t32, s32);
3364             tcg_gen_extu_i32_i64(dest, t32);
3365         }
3366     } else {
3367         TCGv_i64 src1 = load_gpr(ctx, a->r1);
3368 
3369         if (a->d) {
3370             TCGv_i64 t = tcg_temp_new_i64();
3371             TCGv_i64 n = tcg_temp_new_i64();
3372 
3373             tcg_gen_xori_i64(n, cpu_sar, 63);
3374             tcg_gen_shl_i64(t, src2, n);
3375             tcg_gen_shli_i64(t, t, 1);
3376             tcg_gen_shr_i64(dest, src1, cpu_sar);
3377             tcg_gen_or_i64(dest, dest, t);
3378         } else {
3379             TCGv_i64 t = tcg_temp_new_i64();
3380             TCGv_i64 s = tcg_temp_new_i64();
3381 
3382             tcg_gen_concat32_i64(t, src2, src1);
3383             tcg_gen_andi_i64(s, cpu_sar, 31);
3384             tcg_gen_shr_i64(dest, t, s);
3385         }
3386     }
3387     save_gpr(ctx, a->t, dest);
3388 
3389     /* Install the new nullification.  */
3390     cond_free(&ctx->null_cond);
3391     if (a->c) {
3392         ctx->null_cond = do_sed_cond(ctx, a->c, false, dest);
3393     }
3394     return nullify_end(ctx);
3395 }
3396 
3397 static bool trans_shrp_imm(DisasContext *ctx, arg_shrp_imm *a)
3398 {
3399     unsigned width, sa;
3400     TCGv_i64 dest, t2;
3401 
3402     if (!ctx->is_pa20 && a->d) {
3403         return false;
3404     }
3405     if (a->c) {
3406         nullify_over(ctx);
3407     }
3408 
3409     width = a->d ? 64 : 32;
3410     sa = width - 1 - a->cpos;
3411 
3412     dest = dest_gpr(ctx, a->t);
3413     t2 = load_gpr(ctx, a->r2);
3414     if (a->r1 == 0) {
3415         tcg_gen_extract_i64(dest, t2, sa, width - sa);
3416     } else if (width == TARGET_LONG_BITS) {
3417         tcg_gen_extract2_i64(dest, t2, cpu_gr[a->r1], sa);
3418     } else {
3419         assert(!a->d);
3420         if (a->r1 == a->r2) {
3421             TCGv_i32 t32 = tcg_temp_new_i32();
3422             tcg_gen_extrl_i64_i32(t32, t2);
3423             tcg_gen_rotri_i32(t32, t32, sa);
3424             tcg_gen_extu_i32_i64(dest, t32);
3425         } else {
3426             tcg_gen_concat32_i64(dest, t2, cpu_gr[a->r1]);
3427             tcg_gen_extract_i64(dest, dest, sa, 32);
3428         }
3429     }
3430     save_gpr(ctx, a->t, dest);
3431 
3432     /* Install the new nullification.  */
3433     cond_free(&ctx->null_cond);
3434     if (a->c) {
3435         ctx->null_cond = do_sed_cond(ctx, a->c, false, dest);
3436     }
3437     return nullify_end(ctx);
3438 }
3439 
3440 static bool trans_extr_sar(DisasContext *ctx, arg_extr_sar *a)
3441 {
3442     unsigned widthm1 = a->d ? 63 : 31;
3443     TCGv_i64 dest, src, tmp;
3444 
3445     if (!ctx->is_pa20 && a->d) {
3446         return false;
3447     }
3448     if (a->c) {
3449         nullify_over(ctx);
3450     }
3451 
3452     dest = dest_gpr(ctx, a->t);
3453     src = load_gpr(ctx, a->r);
3454     tmp = tcg_temp_new_i64();
3455 
3456     /* Recall that SAR is using big-endian bit numbering.  */
3457     tcg_gen_andi_i64(tmp, cpu_sar, widthm1);
3458     tcg_gen_xori_i64(tmp, tmp, widthm1);
3459 
3460     if (a->se) {
3461         if (!a->d) {
3462             tcg_gen_ext32s_i64(dest, src);
3463             src = dest;
3464         }
3465         tcg_gen_sar_i64(dest, src, tmp);
3466         tcg_gen_sextract_i64(dest, dest, 0, a->len);
3467     } else {
3468         if (!a->d) {
3469             tcg_gen_ext32u_i64(dest, src);
3470             src = dest;
3471         }
3472         tcg_gen_shr_i64(dest, src, tmp);
3473         tcg_gen_extract_i64(dest, dest, 0, a->len);
3474     }
3475     save_gpr(ctx, a->t, dest);
3476 
3477     /* Install the new nullification.  */
3478     cond_free(&ctx->null_cond);
3479     if (a->c) {
3480         ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3481     }
3482     return nullify_end(ctx);
3483 }
3484 
3485 static bool trans_extr_imm(DisasContext *ctx, arg_extr_imm *a)
3486 {
3487     unsigned len, cpos, width;
3488     TCGv_i64 dest, src;
3489 
3490     if (!ctx->is_pa20 && a->d) {
3491         return false;
3492     }
3493     if (a->c) {
3494         nullify_over(ctx);
3495     }
3496 
3497     len = a->len;
3498     width = a->d ? 64 : 32;
3499     cpos = width - 1 - a->pos;
3500     if (cpos + len > width) {
3501         len = width - cpos;
3502     }
3503 
3504     dest = dest_gpr(ctx, a->t);
3505     src = load_gpr(ctx, a->r);
3506     if (a->se) {
3507         tcg_gen_sextract_i64(dest, src, cpos, len);
3508     } else {
3509         tcg_gen_extract_i64(dest, src, cpos, len);
3510     }
3511     save_gpr(ctx, a->t, dest);
3512 
3513     /* Install the new nullification.  */
3514     cond_free(&ctx->null_cond);
3515     if (a->c) {
3516         ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3517     }
3518     return nullify_end(ctx);
3519 }
3520 
3521 static bool trans_depi_imm(DisasContext *ctx, arg_depi_imm *a)
3522 {
3523     unsigned len, width;
3524     uint64_t mask0, mask1;
3525     TCGv_i64 dest;
3526 
3527     if (!ctx->is_pa20 && a->d) {
3528         return false;
3529     }
3530     if (a->c) {
3531         nullify_over(ctx);
3532     }
3533 
3534     len = a->len;
3535     width = a->d ? 64 : 32;
3536     if (a->cpos + len > width) {
3537         len = width - a->cpos;
3538     }
3539 
3540     dest = dest_gpr(ctx, a->t);
3541     mask0 = deposit64(0, a->cpos, len, a->i);
3542     mask1 = deposit64(-1, a->cpos, len, a->i);
3543 
3544     if (a->nz) {
3545         TCGv_i64 src = load_gpr(ctx, a->t);
3546         tcg_gen_andi_i64(dest, src, mask1);
3547         tcg_gen_ori_i64(dest, dest, mask0);
3548     } else {
3549         tcg_gen_movi_i64(dest, mask0);
3550     }
3551     save_gpr(ctx, a->t, dest);
3552 
3553     /* Install the new nullification.  */
3554     cond_free(&ctx->null_cond);
3555     if (a->c) {
3556         ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3557     }
3558     return nullify_end(ctx);
3559 }
3560 
3561 static bool trans_dep_imm(DisasContext *ctx, arg_dep_imm *a)
3562 {
3563     unsigned rs = a->nz ? a->t : 0;
3564     unsigned len, width;
3565     TCGv_i64 dest, val;
3566 
3567     if (!ctx->is_pa20 && a->d) {
3568         return false;
3569     }
3570     if (a->c) {
3571         nullify_over(ctx);
3572     }
3573 
3574     len = a->len;
3575     width = a->d ? 64 : 32;
3576     if (a->cpos + len > width) {
3577         len = width - a->cpos;
3578     }
3579 
3580     dest = dest_gpr(ctx, a->t);
3581     val = load_gpr(ctx, a->r);
3582     if (rs == 0) {
3583         tcg_gen_deposit_z_i64(dest, val, a->cpos, len);
3584     } else {
3585         tcg_gen_deposit_i64(dest, cpu_gr[rs], val, a->cpos, len);
3586     }
3587     save_gpr(ctx, a->t, dest);
3588 
3589     /* Install the new nullification.  */
3590     cond_free(&ctx->null_cond);
3591     if (a->c) {
3592         ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3593     }
3594     return nullify_end(ctx);
3595 }
3596 
3597 static bool do_dep_sar(DisasContext *ctx, unsigned rt, unsigned c,
3598                        bool d, bool nz, unsigned len, TCGv_i64 val)
3599 {
3600     unsigned rs = nz ? rt : 0;
3601     unsigned widthm1 = d ? 63 : 31;
3602     TCGv_i64 mask, tmp, shift, dest;
3603     uint64_t msb = 1ULL << (len - 1);
3604 
3605     dest = dest_gpr(ctx, rt);
3606     shift = tcg_temp_new_i64();
3607     tmp = tcg_temp_new_i64();
3608 
3609     /* Convert big-endian bit numbering in SAR to left-shift.  */
3610     tcg_gen_andi_i64(shift, cpu_sar, widthm1);
3611     tcg_gen_xori_i64(shift, shift, widthm1);
3612 
3613     mask = tcg_temp_new_i64();
3614     tcg_gen_movi_i64(mask, msb + (msb - 1));
3615     tcg_gen_and_i64(tmp, val, mask);
3616     if (rs) {
3617         tcg_gen_shl_i64(mask, mask, shift);
3618         tcg_gen_shl_i64(tmp, tmp, shift);
3619         tcg_gen_andc_i64(dest, cpu_gr[rs], mask);
3620         tcg_gen_or_i64(dest, dest, tmp);
3621     } else {
3622         tcg_gen_shl_i64(dest, tmp, shift);
3623     }
3624     save_gpr(ctx, rt, dest);
3625 
3626     /* Install the new nullification.  */
3627     cond_free(&ctx->null_cond);
3628     if (c) {
3629         ctx->null_cond = do_sed_cond(ctx, c, d, dest);
3630     }
3631     return nullify_end(ctx);
3632 }
3633 
3634 static bool trans_dep_sar(DisasContext *ctx, arg_dep_sar *a)
3635 {
3636     if (!ctx->is_pa20 && a->d) {
3637         return false;
3638     }
3639     if (a->c) {
3640         nullify_over(ctx);
3641     }
3642     return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len,
3643                       load_gpr(ctx, a->r));
3644 }
3645 
3646 static bool trans_depi_sar(DisasContext *ctx, arg_depi_sar *a)
3647 {
3648     if (!ctx->is_pa20 && a->d) {
3649         return false;
3650     }
3651     if (a->c) {
3652         nullify_over(ctx);
3653     }
3654     return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len,
3655                       tcg_constant_i64(a->i));
3656 }
3657 
3658 static bool trans_be(DisasContext *ctx, arg_be *a)
3659 {
3660     TCGv_i64 tmp;
3661 
3662 #ifdef CONFIG_USER_ONLY
3663     /* ??? It seems like there should be a good way of using
3664        "be disp(sr2, r0)", the canonical gateway entry mechanism
3665        to our advantage.  But that appears to be inconvenient to
3666        manage along side branch delay slots.  Therefore we handle
3667        entry into the gateway page via absolute address.  */
3668     /* Since we don't implement spaces, just branch.  Do notice the special
3669        case of "be disp(*,r0)" using a direct branch to disp, so that we can
3670        goto_tb to the TB containing the syscall.  */
3671     if (a->b == 0) {
3672         return do_dbranch(ctx, a->disp, a->l, a->n);
3673     }
3674 #else
3675     nullify_over(ctx);
3676 #endif
3677 
3678     tmp = tcg_temp_new_i64();
3679     tcg_gen_addi_i64(tmp, load_gpr(ctx, a->b), a->disp);
3680     tmp = do_ibranch_priv(ctx, tmp);
3681 
3682 #ifdef CONFIG_USER_ONLY
3683     return do_ibranch(ctx, tmp, a->l, a->n);
3684 #else
3685     TCGv_i64 new_spc = tcg_temp_new_i64();
3686 
3687     load_spr(ctx, new_spc, a->sp);
3688     if (a->l) {
3689         copy_iaoq_entry(ctx, cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
3690         tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f);
3691     }
3692     if (a->n && use_nullify_skip(ctx)) {
3693         copy_iaoq_entry(ctx, cpu_iaoq_f, -1, tmp);
3694         tcg_gen_addi_i64(tmp, tmp, 4);
3695         copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
3696         tcg_gen_mov_i64(cpu_iasq_f, new_spc);
3697         tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f);
3698     } else {
3699         copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3700         if (ctx->iaoq_b == -1) {
3701             tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3702         }
3703         copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
3704         tcg_gen_mov_i64(cpu_iasq_b, new_spc);
3705         nullify_set(ctx, a->n);
3706     }
3707     tcg_gen_lookup_and_goto_ptr();
3708     ctx->base.is_jmp = DISAS_NORETURN;
3709     return nullify_end(ctx);
3710 #endif
3711 }
3712 
3713 static bool trans_bl(DisasContext *ctx, arg_bl *a)
3714 {
3715     return do_dbranch(ctx, iaoq_dest(ctx, a->disp), a->l, a->n);
3716 }
3717 
3718 static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a)
3719 {
3720     uint64_t dest = iaoq_dest(ctx, a->disp);
3721 
3722     nullify_over(ctx);
3723 
3724     /* Make sure the caller hasn't done something weird with the queue.
3725      * ??? This is not quite the same as the PSW[B] bit, which would be
3726      * expensive to track.  Real hardware will trap for
3727      *    b  gateway
3728      *    b  gateway+4  (in delay slot of first branch)
3729      * However, checking for a non-sequential instruction queue *will*
3730      * diagnose the security hole
3731      *    b  gateway
3732      *    b  evil
3733      * in which instructions at evil would run with increased privs.
3734      */
3735     if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) {
3736         return gen_illegal(ctx);
3737     }
3738 
3739 #ifndef CONFIG_USER_ONLY
3740     if (ctx->tb_flags & PSW_C) {
3741         CPUHPPAState *env = cpu_env(ctx->cs);
3742         int type = hppa_artype_for_page(env, ctx->base.pc_next);
3743         /* If we could not find a TLB entry, then we need to generate an
3744            ITLB miss exception so the kernel will provide it.
3745            The resulting TLB fill operation will invalidate this TB and
3746            we will re-translate, at which point we *will* be able to find
3747            the TLB entry and determine if this is in fact a gateway page.  */
3748         if (type < 0) {
3749             gen_excp(ctx, EXCP_ITLB_MISS);
3750             return true;
3751         }
3752         /* No change for non-gateway pages or for priv decrease.  */
3753         if (type >= 4 && type - 4 < ctx->privilege) {
3754             dest = deposit32(dest, 0, 2, type - 4);
3755         }
3756     } else {
3757         dest &= -4;  /* priv = 0 */
3758     }
3759 #endif
3760 
3761     if (a->l) {
3762         TCGv_i64 tmp = dest_gpr(ctx, a->l);
3763         if (ctx->privilege < 3) {
3764             tcg_gen_andi_i64(tmp, tmp, -4);
3765         }
3766         tcg_gen_ori_i64(tmp, tmp, ctx->privilege);
3767         save_gpr(ctx, a->l, tmp);
3768     }
3769 
3770     return do_dbranch(ctx, dest, 0, a->n);
3771 }
3772 
3773 static bool trans_blr(DisasContext *ctx, arg_blr *a)
3774 {
3775     if (a->x) {
3776         TCGv_i64 tmp = tcg_temp_new_i64();
3777         tcg_gen_shli_i64(tmp, load_gpr(ctx, a->x), 3);
3778         tcg_gen_addi_i64(tmp, tmp, ctx->iaoq_f + 8);
3779         /* The computation here never changes privilege level.  */
3780         return do_ibranch(ctx, tmp, a->l, a->n);
3781     } else {
3782         /* BLR R0,RX is a good way to load PC+8 into RX.  */
3783         return do_dbranch(ctx, ctx->iaoq_f + 8, a->l, a->n);
3784     }
3785 }
3786 
3787 static bool trans_bv(DisasContext *ctx, arg_bv *a)
3788 {
3789     TCGv_i64 dest;
3790 
3791     if (a->x == 0) {
3792         dest = load_gpr(ctx, a->b);
3793     } else {
3794         dest = tcg_temp_new_i64();
3795         tcg_gen_shli_i64(dest, load_gpr(ctx, a->x), 3);
3796         tcg_gen_add_i64(dest, dest, load_gpr(ctx, a->b));
3797     }
3798     dest = do_ibranch_priv(ctx, dest);
3799     return do_ibranch(ctx, dest, 0, a->n);
3800 }
3801 
3802 static bool trans_bve(DisasContext *ctx, arg_bve *a)
3803 {
3804     TCGv_i64 dest;
3805 
3806 #ifdef CONFIG_USER_ONLY
3807     dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3808     return do_ibranch(ctx, dest, a->l, a->n);
3809 #else
3810     nullify_over(ctx);
3811     dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3812 
3813     copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3814     if (ctx->iaoq_b == -1) {
3815         tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3816     }
3817     copy_iaoq_entry(ctx, cpu_iaoq_b, -1, dest);
3818     tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest));
3819     if (a->l) {
3820         copy_iaoq_entry(ctx, cpu_gr[a->l], ctx->iaoq_n, ctx->iaoq_n_var);
3821     }
3822     nullify_set(ctx, a->n);
3823     tcg_gen_lookup_and_goto_ptr();
3824     ctx->base.is_jmp = DISAS_NORETURN;
3825     return nullify_end(ctx);
3826 #endif
3827 }
3828 
3829 static bool trans_nopbts(DisasContext *ctx, arg_nopbts *a)
3830 {
3831     /* All branch target stack instructions implement as nop. */
3832     return ctx->is_pa20;
3833 }
3834 
3835 /*
3836  * Float class 0
3837  */
3838 
3839 static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3840 {
3841     tcg_gen_mov_i32(dst, src);
3842 }
3843 
3844 static bool trans_fid_f(DisasContext *ctx, arg_fid_f *a)
3845 {
3846     uint64_t ret;
3847 
3848     if (ctx->is_pa20) {
3849         ret = 0x13080000000000ULL; /* PA8700 (PCX-W2) */
3850     } else {
3851         ret = 0x0f080000000000ULL; /* PA7300LC (PCX-L2) */
3852     }
3853 
3854     nullify_over(ctx);
3855     save_frd(0, tcg_constant_i64(ret));
3856     return nullify_end(ctx);
3857 }
3858 
3859 static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a)
3860 {
3861     return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f);
3862 }
3863 
3864 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3865 {
3866     tcg_gen_mov_i64(dst, src);
3867 }
3868 
3869 static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a)
3870 {
3871     return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d);
3872 }
3873 
3874 static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3875 {
3876     tcg_gen_andi_i32(dst, src, INT32_MAX);
3877 }
3878 
3879 static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a)
3880 {
3881     return do_fop_wew(ctx, a->t, a->r, gen_fabs_f);
3882 }
3883 
3884 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3885 {
3886     tcg_gen_andi_i64(dst, src, INT64_MAX);
3887 }
3888 
3889 static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a)
3890 {
3891     return do_fop_ded(ctx, a->t, a->r, gen_fabs_d);
3892 }
3893 
3894 static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a)
3895 {
3896     return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s);
3897 }
3898 
3899 static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a)
3900 {
3901     return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d);
3902 }
3903 
3904 static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a)
3905 {
3906     return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s);
3907 }
3908 
3909 static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a)
3910 {
3911     return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d);
3912 }
3913 
3914 static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3915 {
3916     tcg_gen_xori_i32(dst, src, INT32_MIN);
3917 }
3918 
3919 static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a)
3920 {
3921     return do_fop_wew(ctx, a->t, a->r, gen_fneg_f);
3922 }
3923 
3924 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3925 {
3926     tcg_gen_xori_i64(dst, src, INT64_MIN);
3927 }
3928 
3929 static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a)
3930 {
3931     return do_fop_ded(ctx, a->t, a->r, gen_fneg_d);
3932 }
3933 
3934 static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3935 {
3936     tcg_gen_ori_i32(dst, src, INT32_MIN);
3937 }
3938 
3939 static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a)
3940 {
3941     return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f);
3942 }
3943 
3944 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3945 {
3946     tcg_gen_ori_i64(dst, src, INT64_MIN);
3947 }
3948 
3949 static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a)
3950 {
3951     return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d);
3952 }
3953 
3954 /*
3955  * Float class 1
3956  */
3957 
3958 static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a)
3959 {
3960     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s);
3961 }
3962 
3963 static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a)
3964 {
3965     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d);
3966 }
3967 
3968 static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a)
3969 {
3970     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s);
3971 }
3972 
3973 static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a)
3974 {
3975     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s);
3976 }
3977 
3978 static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a)
3979 {
3980     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d);
3981 }
3982 
3983 static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a)
3984 {
3985     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d);
3986 }
3987 
3988 static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a)
3989 {
3990     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w);
3991 }
3992 
3993 static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a)
3994 {
3995     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w);
3996 }
3997 
3998 static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a)
3999 {
4000     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw);
4001 }
4002 
4003 static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a)
4004 {
4005     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw);
4006 }
4007 
4008 static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a)
4009 {
4010     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w);
4011 }
4012 
4013 static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a)
4014 {
4015     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w);
4016 }
4017 
4018 static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a)
4019 {
4020     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw);
4021 }
4022 
4023 static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a)
4024 {
4025     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw);
4026 }
4027 
4028 static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a)
4029 {
4030     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s);
4031 }
4032 
4033 static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a)
4034 {
4035     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s);
4036 }
4037 
4038 static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a)
4039 {
4040     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d);
4041 }
4042 
4043 static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a)
4044 {
4045     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d);
4046 }
4047 
4048 static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a)
4049 {
4050     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw);
4051 }
4052 
4053 static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a)
4054 {
4055     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw);
4056 }
4057 
4058 static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a)
4059 {
4060     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw);
4061 }
4062 
4063 static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a)
4064 {
4065     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw);
4066 }
4067 
4068 static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a)
4069 {
4070     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw);
4071 }
4072 
4073 static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a)
4074 {
4075     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw);
4076 }
4077 
4078 static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a)
4079 {
4080     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw);
4081 }
4082 
4083 static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a)
4084 {
4085     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw);
4086 }
4087 
4088 /*
4089  * Float class 2
4090  */
4091 
4092 static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a)
4093 {
4094     TCGv_i32 ta, tb, tc, ty;
4095 
4096     nullify_over(ctx);
4097 
4098     ta = load_frw0_i32(a->r1);
4099     tb = load_frw0_i32(a->r2);
4100     ty = tcg_constant_i32(a->y);
4101     tc = tcg_constant_i32(a->c);
4102 
4103     gen_helper_fcmp_s(tcg_env, ta, tb, ty, tc);
4104 
4105     return nullify_end(ctx);
4106 }
4107 
4108 static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a)
4109 {
4110     TCGv_i64 ta, tb;
4111     TCGv_i32 tc, ty;
4112 
4113     nullify_over(ctx);
4114 
4115     ta = load_frd0(a->r1);
4116     tb = load_frd0(a->r2);
4117     ty = tcg_constant_i32(a->y);
4118     tc = tcg_constant_i32(a->c);
4119 
4120     gen_helper_fcmp_d(tcg_env, ta, tb, ty, tc);
4121 
4122     return nullify_end(ctx);
4123 }
4124 
4125 static bool trans_ftest(DisasContext *ctx, arg_ftest *a)
4126 {
4127     TCGv_i64 t;
4128 
4129     nullify_over(ctx);
4130 
4131     t = tcg_temp_new_i64();
4132     tcg_gen_ld32u_i64(t, tcg_env, offsetof(CPUHPPAState, fr0_shadow));
4133 
4134     if (a->y == 1) {
4135         int mask;
4136         bool inv = false;
4137 
4138         switch (a->c) {
4139         case 0: /* simple */
4140             tcg_gen_andi_i64(t, t, 0x4000000);
4141             ctx->null_cond = cond_make_0(TCG_COND_NE, t);
4142             goto done;
4143         case 2: /* rej */
4144             inv = true;
4145             /* fallthru */
4146         case 1: /* acc */
4147             mask = 0x43ff800;
4148             break;
4149         case 6: /* rej8 */
4150             inv = true;
4151             /* fallthru */
4152         case 5: /* acc8 */
4153             mask = 0x43f8000;
4154             break;
4155         case 9: /* acc6 */
4156             mask = 0x43e0000;
4157             break;
4158         case 13: /* acc4 */
4159             mask = 0x4380000;
4160             break;
4161         case 17: /* acc2 */
4162             mask = 0x4200000;
4163             break;
4164         default:
4165             gen_illegal(ctx);
4166             return true;
4167         }
4168         if (inv) {
4169             TCGv_i64 c = tcg_constant_i64(mask);
4170             tcg_gen_or_i64(t, t, c);
4171             ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
4172         } else {
4173             tcg_gen_andi_i64(t, t, mask);
4174             ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
4175         }
4176     } else {
4177         unsigned cbit = (a->y ^ 1) - 1;
4178 
4179         tcg_gen_extract_i64(t, t, 21 - cbit, 1);
4180         ctx->null_cond = cond_make_0(TCG_COND_NE, t);
4181     }
4182 
4183  done:
4184     return nullify_end(ctx);
4185 }
4186 
4187 /*
4188  * Float class 2
4189  */
4190 
4191 static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a)
4192 {
4193     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s);
4194 }
4195 
4196 static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a)
4197 {
4198     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d);
4199 }
4200 
4201 static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a)
4202 {
4203     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s);
4204 }
4205 
4206 static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a)
4207 {
4208     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d);
4209 }
4210 
4211 static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a)
4212 {
4213     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s);
4214 }
4215 
4216 static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a)
4217 {
4218     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d);
4219 }
4220 
4221 static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a)
4222 {
4223     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s);
4224 }
4225 
4226 static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a)
4227 {
4228     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d);
4229 }
4230 
4231 static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a)
4232 {
4233     TCGv_i64 x, y;
4234 
4235     nullify_over(ctx);
4236 
4237     x = load_frw0_i64(a->r1);
4238     y = load_frw0_i64(a->r2);
4239     tcg_gen_mul_i64(x, x, y);
4240     save_frd(a->t, x);
4241 
4242     return nullify_end(ctx);
4243 }
4244 
4245 /* Convert the fmpyadd single-precision register encodings to standard.  */
4246 static inline int fmpyadd_s_reg(unsigned r)
4247 {
4248     return (r & 16) * 2 + 16 + (r & 15);
4249 }
4250 
4251 static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4252 {
4253     int tm = fmpyadd_s_reg(a->tm);
4254     int ra = fmpyadd_s_reg(a->ra);
4255     int ta = fmpyadd_s_reg(a->ta);
4256     int rm2 = fmpyadd_s_reg(a->rm2);
4257     int rm1 = fmpyadd_s_reg(a->rm1);
4258 
4259     nullify_over(ctx);
4260 
4261     do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
4262     do_fop_weww(ctx, ta, ta, ra,
4263                 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
4264 
4265     return nullify_end(ctx);
4266 }
4267 
4268 static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a)
4269 {
4270     return do_fmpyadd_s(ctx, a, false);
4271 }
4272 
4273 static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a)
4274 {
4275     return do_fmpyadd_s(ctx, a, true);
4276 }
4277 
4278 static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4279 {
4280     nullify_over(ctx);
4281 
4282     do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d);
4283     do_fop_dedd(ctx, a->ta, a->ta, a->ra,
4284                 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
4285 
4286     return nullify_end(ctx);
4287 }
4288 
4289 static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a)
4290 {
4291     return do_fmpyadd_d(ctx, a, false);
4292 }
4293 
4294 static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a)
4295 {
4296     return do_fmpyadd_d(ctx, a, true);
4297 }
4298 
4299 static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a)
4300 {
4301     TCGv_i32 x, y, z;
4302 
4303     nullify_over(ctx);
4304     x = load_frw0_i32(a->rm1);
4305     y = load_frw0_i32(a->rm2);
4306     z = load_frw0_i32(a->ra3);
4307 
4308     if (a->neg) {
4309         gen_helper_fmpynfadd_s(x, tcg_env, x, y, z);
4310     } else {
4311         gen_helper_fmpyfadd_s(x, tcg_env, x, y, z);
4312     }
4313 
4314     save_frw_i32(a->t, x);
4315     return nullify_end(ctx);
4316 }
4317 
4318 static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a)
4319 {
4320     TCGv_i64 x, y, z;
4321 
4322     nullify_over(ctx);
4323     x = load_frd0(a->rm1);
4324     y = load_frd0(a->rm2);
4325     z = load_frd0(a->ra3);
4326 
4327     if (a->neg) {
4328         gen_helper_fmpynfadd_d(x, tcg_env, x, y, z);
4329     } else {
4330         gen_helper_fmpyfadd_d(x, tcg_env, x, y, z);
4331     }
4332 
4333     save_frd(a->t, x);
4334     return nullify_end(ctx);
4335 }
4336 
4337 static bool trans_diag(DisasContext *ctx, arg_diag *a)
4338 {
4339     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
4340 #ifndef CONFIG_USER_ONLY
4341     if (a->i == 0x100) {
4342         /* emulate PDC BTLB, called by SeaBIOS-hppa */
4343         nullify_over(ctx);
4344         gen_helper_diag_btlb(tcg_env);
4345         return nullify_end(ctx);
4346     }
4347 #endif
4348     qemu_log_mask(LOG_UNIMP, "DIAG opcode 0x%04x ignored\n", a->i);
4349     return true;
4350 }
4351 
4352 static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
4353 {
4354     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4355     int bound;
4356 
4357     ctx->cs = cs;
4358     ctx->tb_flags = ctx->base.tb->flags;
4359     ctx->is_pa20 = hppa_is_pa20(cpu_env(cs));
4360 
4361 #ifdef CONFIG_USER_ONLY
4362     ctx->privilege = MMU_IDX_TO_PRIV(MMU_USER_IDX);
4363     ctx->mmu_idx = MMU_USER_IDX;
4364     ctx->iaoq_f = ctx->base.pc_first | ctx->privilege;
4365     ctx->iaoq_b = ctx->base.tb->cs_base | ctx->privilege;
4366     ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN);
4367 #else
4368     ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
4369     ctx->mmu_idx = (ctx->tb_flags & PSW_D
4370                     ? PRIV_P_TO_MMU_IDX(ctx->privilege, ctx->tb_flags & PSW_P)
4371                     : MMU_PHYS_IDX);
4372 
4373     /* Recover the IAOQ values from the GVA + PRIV.  */
4374     uint64_t cs_base = ctx->base.tb->cs_base;
4375     uint64_t iasq_f = cs_base & ~0xffffffffull;
4376     int32_t diff = cs_base;
4377 
4378     ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege;
4379     ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1);
4380 #endif
4381     ctx->iaoq_n = -1;
4382     ctx->iaoq_n_var = NULL;
4383 
4384     ctx->zero = tcg_constant_i64(0);
4385 
4386     /* Bound the number of instructions by those left on the page.  */
4387     bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
4388     ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
4389 }
4390 
4391 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
4392 {
4393     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4394 
4395     /* Seed the nullification status from PSW[N], as saved in TB->FLAGS.  */
4396     ctx->null_cond = cond_make_f();
4397     ctx->psw_n_nonzero = false;
4398     if (ctx->tb_flags & PSW_N) {
4399         ctx->null_cond.c = TCG_COND_ALWAYS;
4400         ctx->psw_n_nonzero = true;
4401     }
4402     ctx->null_lab = NULL;
4403 }
4404 
4405 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4406 {
4407     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4408 
4409     tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b);
4410 }
4411 
4412 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4413 {
4414     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4415     CPUHPPAState *env = cpu_env(cs);
4416     DisasJumpType ret;
4417 
4418     /* Execute one insn.  */
4419 #ifdef CONFIG_USER_ONLY
4420     if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
4421         do_page_zero(ctx);
4422         ret = ctx->base.is_jmp;
4423         assert(ret != DISAS_NEXT);
4424     } else
4425 #endif
4426     {
4427         /* Always fetch the insn, even if nullified, so that we check
4428            the page permissions for execute.  */
4429         uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
4430 
4431         /* Set up the IA queue for the next insn.
4432            This will be overwritten by a branch.  */
4433         if (ctx->iaoq_b == -1) {
4434             ctx->iaoq_n = -1;
4435             ctx->iaoq_n_var = tcg_temp_new_i64();
4436             tcg_gen_addi_i64(ctx->iaoq_n_var, cpu_iaoq_b, 4);
4437         } else {
4438             ctx->iaoq_n = ctx->iaoq_b + 4;
4439             ctx->iaoq_n_var = NULL;
4440         }
4441 
4442         if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
4443             ctx->null_cond.c = TCG_COND_NEVER;
4444             ret = DISAS_NEXT;
4445         } else {
4446             ctx->insn = insn;
4447             if (!decode(ctx, insn)) {
4448                 gen_illegal(ctx);
4449             }
4450             ret = ctx->base.is_jmp;
4451             assert(ctx->null_lab == NULL);
4452         }
4453     }
4454 
4455     /* Advance the insn queue.  Note that this check also detects
4456        a priority change within the instruction queue.  */
4457     if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
4458         if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1
4459             && use_goto_tb(ctx, ctx->iaoq_b)
4460             && (ctx->null_cond.c == TCG_COND_NEVER
4461                 || ctx->null_cond.c == TCG_COND_ALWAYS)) {
4462             nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
4463             gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
4464             ctx->base.is_jmp = ret = DISAS_NORETURN;
4465         } else {
4466             ctx->base.is_jmp = ret = DISAS_IAQ_N_STALE;
4467         }
4468     }
4469     ctx->iaoq_f = ctx->iaoq_b;
4470     ctx->iaoq_b = ctx->iaoq_n;
4471     ctx->base.pc_next += 4;
4472 
4473     switch (ret) {
4474     case DISAS_NORETURN:
4475     case DISAS_IAQ_N_UPDATED:
4476         break;
4477 
4478     case DISAS_NEXT:
4479     case DISAS_IAQ_N_STALE:
4480     case DISAS_IAQ_N_STALE_EXIT:
4481         if (ctx->iaoq_f == -1) {
4482             copy_iaoq_entry(ctx, cpu_iaoq_f, -1, cpu_iaoq_b);
4483             copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
4484 #ifndef CONFIG_USER_ONLY
4485             tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
4486 #endif
4487             nullify_save(ctx);
4488             ctx->base.is_jmp = (ret == DISAS_IAQ_N_STALE_EXIT
4489                                 ? DISAS_EXIT
4490                                 : DISAS_IAQ_N_UPDATED);
4491         } else if (ctx->iaoq_b == -1) {
4492             copy_iaoq_entry(ctx, cpu_iaoq_b, -1, ctx->iaoq_n_var);
4493         }
4494         break;
4495 
4496     default:
4497         g_assert_not_reached();
4498     }
4499 }
4500 
4501 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4502 {
4503     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4504     DisasJumpType is_jmp = ctx->base.is_jmp;
4505 
4506     switch (is_jmp) {
4507     case DISAS_NORETURN:
4508         break;
4509     case DISAS_TOO_MANY:
4510     case DISAS_IAQ_N_STALE:
4511     case DISAS_IAQ_N_STALE_EXIT:
4512         copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
4513         copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
4514         nullify_save(ctx);
4515         /* FALLTHRU */
4516     case DISAS_IAQ_N_UPDATED:
4517         if (is_jmp != DISAS_IAQ_N_STALE_EXIT) {
4518             tcg_gen_lookup_and_goto_ptr();
4519             break;
4520         }
4521         /* FALLTHRU */
4522     case DISAS_EXIT:
4523         tcg_gen_exit_tb(NULL, 0);
4524         break;
4525     default:
4526         g_assert_not_reached();
4527     }
4528 }
4529 
4530 static void hppa_tr_disas_log(const DisasContextBase *dcbase,
4531                               CPUState *cs, FILE *logfile)
4532 {
4533     target_ulong pc = dcbase->pc_first;
4534 
4535 #ifdef CONFIG_USER_ONLY
4536     switch (pc) {
4537     case 0x00:
4538         fprintf(logfile, "IN:\n0x00000000:  (null)\n");
4539         return;
4540     case 0xb0:
4541         fprintf(logfile, "IN:\n0x000000b0:  light-weight-syscall\n");
4542         return;
4543     case 0xe0:
4544         fprintf(logfile, "IN:\n0x000000e0:  set-thread-pointer-syscall\n");
4545         return;
4546     case 0x100:
4547         fprintf(logfile, "IN:\n0x00000100:  syscall\n");
4548         return;
4549     }
4550 #endif
4551 
4552     fprintf(logfile, "IN: %s\n", lookup_symbol(pc));
4553     target_disas(logfile, cs, pc, dcbase->tb->size);
4554 }
4555 
4556 static const TranslatorOps hppa_tr_ops = {
4557     .init_disas_context = hppa_tr_init_disas_context,
4558     .tb_start           = hppa_tr_tb_start,
4559     .insn_start         = hppa_tr_insn_start,
4560     .translate_insn     = hppa_tr_translate_insn,
4561     .tb_stop            = hppa_tr_tb_stop,
4562     .disas_log          = hppa_tr_disas_log,
4563 };
4564 
4565 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
4566                            target_ulong pc, void *host_pc)
4567 {
4568     DisasContext ctx;
4569     translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);
4570 }
4571