xref: /openbmc/qemu/target/hppa/translate.c (revision e5d487c9)
1 /*
2  * HPPA emulation cpu translation for qemu.
3  *
4  * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "tcg/tcg-op-gvec.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "exec/log.h"
31 
32 #define HELPER_H "helper.h"
33 #include "exec/helper-info.c.inc"
34 #undef  HELPER_H
35 
36 /* Choose to use explicit sizes within this file. */
37 #undef tcg_temp_new
38 
39 typedef struct DisasCond {
40     TCGCond c;
41     TCGv_i64 a0, a1;
42 } DisasCond;
43 
44 typedef struct DisasContext {
45     DisasContextBase base;
46     CPUState *cs;
47     TCGOp *insn_start;
48 
49     uint64_t iaoq_f;
50     uint64_t iaoq_b;
51     uint64_t iaoq_n;
52     TCGv_i64 iaoq_n_var;
53 
54     DisasCond null_cond;
55     TCGLabel *null_lab;
56 
57     TCGv_i64 zero;
58 
59     uint32_t insn;
60     uint32_t tb_flags;
61     int mmu_idx;
62     int privilege;
63     bool psw_n_nonzero;
64     bool is_pa20;
65 
66 #ifdef CONFIG_USER_ONLY
67     MemOp unalign;
68 #endif
69 } DisasContext;
70 
71 #ifdef CONFIG_USER_ONLY
72 #define UNALIGN(C)  (C)->unalign
73 #else
74 #define UNALIGN(C)  MO_ALIGN
75 #endif
76 
77 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently.  */
78 static int expand_sm_imm(DisasContext *ctx, int val)
79 {
80     /* Keep unimplemented bits disabled -- see cpu_hppa_put_psw. */
81     if (ctx->is_pa20) {
82         if (val & PSW_SM_W) {
83             val |= PSW_W;
84         }
85         val &= ~(PSW_SM_W | PSW_SM_E | PSW_G);
86     } else {
87         val &= ~(PSW_SM_W | PSW_SM_E | PSW_O);
88     }
89     return val;
90 }
91 
92 /* Inverted space register indicates 0 means sr0 not inferred from base.  */
93 static int expand_sr3x(DisasContext *ctx, int val)
94 {
95     return ~val;
96 }
97 
98 /* Convert the M:A bits within a memory insn to the tri-state value
99    we use for the final M.  */
100 static int ma_to_m(DisasContext *ctx, int val)
101 {
102     return val & 2 ? (val & 1 ? -1 : 1) : 0;
103 }
104 
105 /* Convert the sign of the displacement to a pre or post-modify.  */
106 static int pos_to_m(DisasContext *ctx, int val)
107 {
108     return val ? 1 : -1;
109 }
110 
111 static int neg_to_m(DisasContext *ctx, int val)
112 {
113     return val ? -1 : 1;
114 }
115 
116 /* Used for branch targets and fp memory ops.  */
117 static int expand_shl2(DisasContext *ctx, int val)
118 {
119     return val << 2;
120 }
121 
122 /* Used for fp memory ops.  */
123 static int expand_shl3(DisasContext *ctx, int val)
124 {
125     return val << 3;
126 }
127 
128 /* Used for assemble_21.  */
129 static int expand_shl11(DisasContext *ctx, int val)
130 {
131     return val << 11;
132 }
133 
134 static int assemble_6(DisasContext *ctx, int val)
135 {
136     /*
137      * Officially, 32 * x + 32 - y.
138      * Here, x is already in bit 5, and y is [4:0].
139      * Since -y = ~y + 1, in 5 bits 32 - y => y ^ 31 + 1,
140      * with the overflow from bit 4 summing with x.
141      */
142     return (val ^ 31) + 1;
143 }
144 
145 /* Translate CMPI doubleword conditions to standard. */
146 static int cmpbid_c(DisasContext *ctx, int val)
147 {
148     return val ? val : 4; /* 0 == "*<<" */
149 }
150 
151 
152 /* Include the auto-generated decoder.  */
153 #include "decode-insns.c.inc"
154 
155 /* We are not using a goto_tb (for whatever reason), but have updated
156    the iaq (for whatever reason), so don't do it again on exit.  */
157 #define DISAS_IAQ_N_UPDATED  DISAS_TARGET_0
158 
159 /* We are exiting the TB, but have neither emitted a goto_tb, nor
160    updated the iaq for the next instruction to be executed.  */
161 #define DISAS_IAQ_N_STALE    DISAS_TARGET_1
162 
163 /* Similarly, but we want to return to the main loop immediately
164    to recognize unmasked interrupts.  */
165 #define DISAS_IAQ_N_STALE_EXIT      DISAS_TARGET_2
166 #define DISAS_EXIT                  DISAS_TARGET_3
167 
168 /* global register indexes */
169 static TCGv_i64 cpu_gr[32];
170 static TCGv_i64 cpu_sr[4];
171 static TCGv_i64 cpu_srH;
172 static TCGv_i64 cpu_iaoq_f;
173 static TCGv_i64 cpu_iaoq_b;
174 static TCGv_i64 cpu_iasq_f;
175 static TCGv_i64 cpu_iasq_b;
176 static TCGv_i64 cpu_sar;
177 static TCGv_i64 cpu_psw_n;
178 static TCGv_i64 cpu_psw_v;
179 static TCGv_i64 cpu_psw_cb;
180 static TCGv_i64 cpu_psw_cb_msb;
181 
182 void hppa_translate_init(void)
183 {
184 #define DEF_VAR(V)  { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
185 
186     typedef struct { TCGv_i64 *var; const char *name; int ofs; } GlobalVar;
187     static const GlobalVar vars[] = {
188         { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
189         DEF_VAR(psw_n),
190         DEF_VAR(psw_v),
191         DEF_VAR(psw_cb),
192         DEF_VAR(psw_cb_msb),
193         DEF_VAR(iaoq_f),
194         DEF_VAR(iaoq_b),
195     };
196 
197 #undef DEF_VAR
198 
199     /* Use the symbolic register names that match the disassembler.  */
200     static const char gr_names[32][4] = {
201         "r0",  "r1",  "r2",  "r3",  "r4",  "r5",  "r6",  "r7",
202         "r8",  "r9",  "r10", "r11", "r12", "r13", "r14", "r15",
203         "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
204         "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
205     };
206     /* SR[4-7] are not global registers so that we can index them.  */
207     static const char sr_names[5][4] = {
208         "sr0", "sr1", "sr2", "sr3", "srH"
209     };
210 
211     int i;
212 
213     cpu_gr[0] = NULL;
214     for (i = 1; i < 32; i++) {
215         cpu_gr[i] = tcg_global_mem_new(tcg_env,
216                                        offsetof(CPUHPPAState, gr[i]),
217                                        gr_names[i]);
218     }
219     for (i = 0; i < 4; i++) {
220         cpu_sr[i] = tcg_global_mem_new_i64(tcg_env,
221                                            offsetof(CPUHPPAState, sr[i]),
222                                            sr_names[i]);
223     }
224     cpu_srH = tcg_global_mem_new_i64(tcg_env,
225                                      offsetof(CPUHPPAState, sr[4]),
226                                      sr_names[4]);
227 
228     for (i = 0; i < ARRAY_SIZE(vars); ++i) {
229         const GlobalVar *v = &vars[i];
230         *v->var = tcg_global_mem_new(tcg_env, v->ofs, v->name);
231     }
232 
233     cpu_iasq_f = tcg_global_mem_new_i64(tcg_env,
234                                         offsetof(CPUHPPAState, iasq_f),
235                                         "iasq_f");
236     cpu_iasq_b = tcg_global_mem_new_i64(tcg_env,
237                                         offsetof(CPUHPPAState, iasq_b),
238                                         "iasq_b");
239 }
240 
241 static void set_insn_breg(DisasContext *ctx, int breg)
242 {
243     assert(ctx->insn_start != NULL);
244     tcg_set_insn_start_param(ctx->insn_start, 2, breg);
245     ctx->insn_start = NULL;
246 }
247 
248 static DisasCond cond_make_f(void)
249 {
250     return (DisasCond){
251         .c = TCG_COND_NEVER,
252         .a0 = NULL,
253         .a1 = NULL,
254     };
255 }
256 
257 static DisasCond cond_make_t(void)
258 {
259     return (DisasCond){
260         .c = TCG_COND_ALWAYS,
261         .a0 = NULL,
262         .a1 = NULL,
263     };
264 }
265 
266 static DisasCond cond_make_n(void)
267 {
268     return (DisasCond){
269         .c = TCG_COND_NE,
270         .a0 = cpu_psw_n,
271         .a1 = tcg_constant_i64(0)
272     };
273 }
274 
275 static DisasCond cond_make_tmp(TCGCond c, TCGv_i64 a0, TCGv_i64 a1)
276 {
277     assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
278     return (DisasCond){ .c = c, .a0 = a0, .a1 = a1 };
279 }
280 
281 static DisasCond cond_make_0_tmp(TCGCond c, TCGv_i64 a0)
282 {
283     return cond_make_tmp(c, a0, tcg_constant_i64(0));
284 }
285 
286 static DisasCond cond_make_0(TCGCond c, TCGv_i64 a0)
287 {
288     TCGv_i64 tmp = tcg_temp_new_i64();
289     tcg_gen_mov_i64(tmp, a0);
290     return cond_make_0_tmp(c, tmp);
291 }
292 
293 static DisasCond cond_make(TCGCond c, TCGv_i64 a0, TCGv_i64 a1)
294 {
295     TCGv_i64 t0 = tcg_temp_new_i64();
296     TCGv_i64 t1 = tcg_temp_new_i64();
297 
298     tcg_gen_mov_i64(t0, a0);
299     tcg_gen_mov_i64(t1, a1);
300     return cond_make_tmp(c, t0, t1);
301 }
302 
303 static void cond_free(DisasCond *cond)
304 {
305     switch (cond->c) {
306     default:
307         cond->a0 = NULL;
308         cond->a1 = NULL;
309         /* fallthru */
310     case TCG_COND_ALWAYS:
311         cond->c = TCG_COND_NEVER;
312         break;
313     case TCG_COND_NEVER:
314         break;
315     }
316 }
317 
318 static TCGv_i64 load_gpr(DisasContext *ctx, unsigned reg)
319 {
320     if (reg == 0) {
321         return ctx->zero;
322     } else {
323         return cpu_gr[reg];
324     }
325 }
326 
327 static TCGv_i64 dest_gpr(DisasContext *ctx, unsigned reg)
328 {
329     if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
330         return tcg_temp_new_i64();
331     } else {
332         return cpu_gr[reg];
333     }
334 }
335 
336 static void save_or_nullify(DisasContext *ctx, TCGv_i64 dest, TCGv_i64 t)
337 {
338     if (ctx->null_cond.c != TCG_COND_NEVER) {
339         tcg_gen_movcond_i64(ctx->null_cond.c, dest, ctx->null_cond.a0,
340                             ctx->null_cond.a1, dest, t);
341     } else {
342         tcg_gen_mov_i64(dest, t);
343     }
344 }
345 
346 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_i64 t)
347 {
348     if (reg != 0) {
349         save_or_nullify(ctx, cpu_gr[reg], t);
350     }
351 }
352 
353 #if HOST_BIG_ENDIAN
354 # define HI_OFS  0
355 # define LO_OFS  4
356 #else
357 # define HI_OFS  4
358 # define LO_OFS  0
359 #endif
360 
361 static TCGv_i32 load_frw_i32(unsigned rt)
362 {
363     TCGv_i32 ret = tcg_temp_new_i32();
364     tcg_gen_ld_i32(ret, tcg_env,
365                    offsetof(CPUHPPAState, fr[rt & 31])
366                    + (rt & 32 ? LO_OFS : HI_OFS));
367     return ret;
368 }
369 
370 static TCGv_i32 load_frw0_i32(unsigned rt)
371 {
372     if (rt == 0) {
373         TCGv_i32 ret = tcg_temp_new_i32();
374         tcg_gen_movi_i32(ret, 0);
375         return ret;
376     } else {
377         return load_frw_i32(rt);
378     }
379 }
380 
381 static TCGv_i64 load_frw0_i64(unsigned rt)
382 {
383     TCGv_i64 ret = tcg_temp_new_i64();
384     if (rt == 0) {
385         tcg_gen_movi_i64(ret, 0);
386     } else {
387         tcg_gen_ld32u_i64(ret, tcg_env,
388                           offsetof(CPUHPPAState, fr[rt & 31])
389                           + (rt & 32 ? LO_OFS : HI_OFS));
390     }
391     return ret;
392 }
393 
394 static void save_frw_i32(unsigned rt, TCGv_i32 val)
395 {
396     tcg_gen_st_i32(val, tcg_env,
397                    offsetof(CPUHPPAState, fr[rt & 31])
398                    + (rt & 32 ? LO_OFS : HI_OFS));
399 }
400 
401 #undef HI_OFS
402 #undef LO_OFS
403 
404 static TCGv_i64 load_frd(unsigned rt)
405 {
406     TCGv_i64 ret = tcg_temp_new_i64();
407     tcg_gen_ld_i64(ret, tcg_env, offsetof(CPUHPPAState, fr[rt]));
408     return ret;
409 }
410 
411 static TCGv_i64 load_frd0(unsigned rt)
412 {
413     if (rt == 0) {
414         TCGv_i64 ret = tcg_temp_new_i64();
415         tcg_gen_movi_i64(ret, 0);
416         return ret;
417     } else {
418         return load_frd(rt);
419     }
420 }
421 
422 static void save_frd(unsigned rt, TCGv_i64 val)
423 {
424     tcg_gen_st_i64(val, tcg_env, offsetof(CPUHPPAState, fr[rt]));
425 }
426 
427 static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
428 {
429 #ifdef CONFIG_USER_ONLY
430     tcg_gen_movi_i64(dest, 0);
431 #else
432     if (reg < 4) {
433         tcg_gen_mov_i64(dest, cpu_sr[reg]);
434     } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
435         tcg_gen_mov_i64(dest, cpu_srH);
436     } else {
437         tcg_gen_ld_i64(dest, tcg_env, offsetof(CPUHPPAState, sr[reg]));
438     }
439 #endif
440 }
441 
442 /* Skip over the implementation of an insn that has been nullified.
443    Use this when the insn is too complex for a conditional move.  */
444 static void nullify_over(DisasContext *ctx)
445 {
446     if (ctx->null_cond.c != TCG_COND_NEVER) {
447         /* The always condition should have been handled in the main loop.  */
448         assert(ctx->null_cond.c != TCG_COND_ALWAYS);
449 
450         ctx->null_lab = gen_new_label();
451 
452         /* If we're using PSW[N], copy it to a temp because... */
453         if (ctx->null_cond.a0 == cpu_psw_n) {
454             ctx->null_cond.a0 = tcg_temp_new_i64();
455             tcg_gen_mov_i64(ctx->null_cond.a0, cpu_psw_n);
456         }
457         /* ... we clear it before branching over the implementation,
458            so that (1) it's clear after nullifying this insn and
459            (2) if this insn nullifies the next, PSW[N] is valid.  */
460         if (ctx->psw_n_nonzero) {
461             ctx->psw_n_nonzero = false;
462             tcg_gen_movi_i64(cpu_psw_n, 0);
463         }
464 
465         tcg_gen_brcond_i64(ctx->null_cond.c, ctx->null_cond.a0,
466                            ctx->null_cond.a1, ctx->null_lab);
467         cond_free(&ctx->null_cond);
468     }
469 }
470 
471 /* Save the current nullification state to PSW[N].  */
472 static void nullify_save(DisasContext *ctx)
473 {
474     if (ctx->null_cond.c == TCG_COND_NEVER) {
475         if (ctx->psw_n_nonzero) {
476             tcg_gen_movi_i64(cpu_psw_n, 0);
477         }
478         return;
479     }
480     if (ctx->null_cond.a0 != cpu_psw_n) {
481         tcg_gen_setcond_i64(ctx->null_cond.c, cpu_psw_n,
482                             ctx->null_cond.a0, ctx->null_cond.a1);
483         ctx->psw_n_nonzero = true;
484     }
485     cond_free(&ctx->null_cond);
486 }
487 
488 /* Set a PSW[N] to X.  The intention is that this is used immediately
489    before a goto_tb/exit_tb, so that there is no fallthru path to other
490    code within the TB.  Therefore we do not update psw_n_nonzero.  */
491 static void nullify_set(DisasContext *ctx, bool x)
492 {
493     if (ctx->psw_n_nonzero || x) {
494         tcg_gen_movi_i64(cpu_psw_n, x);
495     }
496 }
497 
498 /* Mark the end of an instruction that may have been nullified.
499    This is the pair to nullify_over.  Always returns true so that
500    it may be tail-called from a translate function.  */
501 static bool nullify_end(DisasContext *ctx)
502 {
503     TCGLabel *null_lab = ctx->null_lab;
504     DisasJumpType status = ctx->base.is_jmp;
505 
506     /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
507        For UPDATED, we cannot update on the nullified path.  */
508     assert(status != DISAS_IAQ_N_UPDATED);
509 
510     if (likely(null_lab == NULL)) {
511         /* The current insn wasn't conditional or handled the condition
512            applied to it without a branch, so the (new) setting of
513            NULL_COND can be applied directly to the next insn.  */
514         return true;
515     }
516     ctx->null_lab = NULL;
517 
518     if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
519         /* The next instruction will be unconditional,
520            and NULL_COND already reflects that.  */
521         gen_set_label(null_lab);
522     } else {
523         /* The insn that we just executed is itself nullifying the next
524            instruction.  Store the condition in the PSW[N] global.
525            We asserted PSW[N] = 0 in nullify_over, so that after the
526            label we have the proper value in place.  */
527         nullify_save(ctx);
528         gen_set_label(null_lab);
529         ctx->null_cond = cond_make_n();
530     }
531     if (status == DISAS_NORETURN) {
532         ctx->base.is_jmp = DISAS_NEXT;
533     }
534     return true;
535 }
536 
537 static uint64_t gva_offset_mask(DisasContext *ctx)
538 {
539     return (ctx->tb_flags & PSW_W
540             ? MAKE_64BIT_MASK(0, 62)
541             : MAKE_64BIT_MASK(0, 32));
542 }
543 
544 static void copy_iaoq_entry(DisasContext *ctx, TCGv_i64 dest,
545                             uint64_t ival, TCGv_i64 vval)
546 {
547     uint64_t mask = gva_offset_mask(ctx);
548 
549     if (ival != -1) {
550         tcg_gen_movi_i64(dest, ival & mask);
551         return;
552     }
553     tcg_debug_assert(vval != NULL);
554 
555     /*
556      * We know that the IAOQ is already properly masked.
557      * This optimization is primarily for "iaoq_f = iaoq_b".
558      */
559     if (vval == cpu_iaoq_f || vval == cpu_iaoq_b) {
560         tcg_gen_mov_i64(dest, vval);
561     } else {
562         tcg_gen_andi_i64(dest, vval, mask);
563     }
564 }
565 
566 static inline uint64_t iaoq_dest(DisasContext *ctx, int64_t disp)
567 {
568     return ctx->iaoq_f + disp + 8;
569 }
570 
571 static void gen_excp_1(int exception)
572 {
573     gen_helper_excp(tcg_env, tcg_constant_i32(exception));
574 }
575 
576 static void gen_excp(DisasContext *ctx, int exception)
577 {
578     copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
579     copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
580     nullify_save(ctx);
581     gen_excp_1(exception);
582     ctx->base.is_jmp = DISAS_NORETURN;
583 }
584 
585 static bool gen_excp_iir(DisasContext *ctx, int exc)
586 {
587     nullify_over(ctx);
588     tcg_gen_st_i64(tcg_constant_i64(ctx->insn),
589                    tcg_env, offsetof(CPUHPPAState, cr[CR_IIR]));
590     gen_excp(ctx, exc);
591     return nullify_end(ctx);
592 }
593 
594 static bool gen_illegal(DisasContext *ctx)
595 {
596     return gen_excp_iir(ctx, EXCP_ILL);
597 }
598 
599 #ifdef CONFIG_USER_ONLY
600 #define CHECK_MOST_PRIVILEGED(EXCP) \
601     return gen_excp_iir(ctx, EXCP)
602 #else
603 #define CHECK_MOST_PRIVILEGED(EXCP) \
604     do {                                     \
605         if (ctx->privilege != 0) {           \
606             return gen_excp_iir(ctx, EXCP);  \
607         }                                    \
608     } while (0)
609 #endif
610 
611 static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
612 {
613     return translator_use_goto_tb(&ctx->base, dest);
614 }
615 
616 /* If the next insn is to be nullified, and it's on the same page,
617    and we're not attempting to set a breakpoint on it, then we can
618    totally skip the nullified insn.  This avoids creating and
619    executing a TB that merely branches to the next TB.  */
620 static bool use_nullify_skip(DisasContext *ctx)
621 {
622     return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
623             && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
624 }
625 
626 static void gen_goto_tb(DisasContext *ctx, int which,
627                         uint64_t f, uint64_t b)
628 {
629     if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
630         tcg_gen_goto_tb(which);
631         copy_iaoq_entry(ctx, cpu_iaoq_f, f, NULL);
632         copy_iaoq_entry(ctx, cpu_iaoq_b, b, NULL);
633         tcg_gen_exit_tb(ctx->base.tb, which);
634     } else {
635         copy_iaoq_entry(ctx, cpu_iaoq_f, f, cpu_iaoq_b);
636         copy_iaoq_entry(ctx, cpu_iaoq_b, b, ctx->iaoq_n_var);
637         tcg_gen_lookup_and_goto_ptr();
638     }
639 }
640 
641 static bool cond_need_sv(int c)
642 {
643     return c == 2 || c == 3 || c == 6;
644 }
645 
646 static bool cond_need_cb(int c)
647 {
648     return c == 4 || c == 5;
649 }
650 
651 /* Need extensions from TCGv_i32 to TCGv_i64. */
652 static bool cond_need_ext(DisasContext *ctx, bool d)
653 {
654     return !(ctx->is_pa20 && d);
655 }
656 
657 /*
658  * Compute conditional for arithmetic.  See Page 5-3, Table 5-1, of
659  * the Parisc 1.1 Architecture Reference Manual for details.
660  */
661 
662 static DisasCond do_cond(DisasContext *ctx, unsigned cf, bool d,
663                          TCGv_i64 res, TCGv_i64 cb_msb, TCGv_i64 sv)
664 {
665     DisasCond cond;
666     TCGv_i64 tmp;
667 
668     switch (cf >> 1) {
669     case 0: /* Never / TR    (0 / 1) */
670         cond = cond_make_f();
671         break;
672     case 1: /* = / <>        (Z / !Z) */
673         if (cond_need_ext(ctx, d)) {
674             tmp = tcg_temp_new_i64();
675             tcg_gen_ext32u_i64(tmp, res);
676             res = tmp;
677         }
678         cond = cond_make_0(TCG_COND_EQ, res);
679         break;
680     case 2: /* < / >=        (N ^ V / !(N ^ V) */
681         tmp = tcg_temp_new_i64();
682         tcg_gen_xor_i64(tmp, res, sv);
683         if (cond_need_ext(ctx, d)) {
684             tcg_gen_ext32s_i64(tmp, tmp);
685         }
686         cond = cond_make_0_tmp(TCG_COND_LT, tmp);
687         break;
688     case 3: /* <= / >        (N ^ V) | Z / !((N ^ V) | Z) */
689         /*
690          * Simplify:
691          *   (N ^ V) | Z
692          *   ((res < 0) ^ (sv < 0)) | !res
693          *   ((res ^ sv) < 0) | !res
694          *   (~(res ^ sv) >= 0) | !res
695          *   !(~(res ^ sv) >> 31) | !res
696          *   !(~(res ^ sv) >> 31 & res)
697          */
698         tmp = tcg_temp_new_i64();
699         tcg_gen_eqv_i64(tmp, res, sv);
700         if (cond_need_ext(ctx, d)) {
701             tcg_gen_sextract_i64(tmp, tmp, 31, 1);
702             tcg_gen_and_i64(tmp, tmp, res);
703             tcg_gen_ext32u_i64(tmp, tmp);
704         } else {
705             tcg_gen_sari_i64(tmp, tmp, 63);
706             tcg_gen_and_i64(tmp, tmp, res);
707         }
708         cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
709         break;
710     case 4: /* NUV / UV      (!C / C) */
711         /* Only bit 0 of cb_msb is ever set. */
712         cond = cond_make_0(TCG_COND_EQ, cb_msb);
713         break;
714     case 5: /* ZNV / VNZ     (!C | Z / C & !Z) */
715         tmp = tcg_temp_new_i64();
716         tcg_gen_neg_i64(tmp, cb_msb);
717         tcg_gen_and_i64(tmp, tmp, res);
718         if (cond_need_ext(ctx, d)) {
719             tcg_gen_ext32u_i64(tmp, tmp);
720         }
721         cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
722         break;
723     case 6: /* SV / NSV      (V / !V) */
724         if (cond_need_ext(ctx, d)) {
725             tmp = tcg_temp_new_i64();
726             tcg_gen_ext32s_i64(tmp, sv);
727             sv = tmp;
728         }
729         cond = cond_make_0(TCG_COND_LT, sv);
730         break;
731     case 7: /* OD / EV */
732         tmp = tcg_temp_new_i64();
733         tcg_gen_andi_i64(tmp, res, 1);
734         cond = cond_make_0_tmp(TCG_COND_NE, tmp);
735         break;
736     default:
737         g_assert_not_reached();
738     }
739     if (cf & 1) {
740         cond.c = tcg_invert_cond(cond.c);
741     }
742 
743     return cond;
744 }
745 
746 /* Similar, but for the special case of subtraction without borrow, we
747    can use the inputs directly.  This can allow other computation to be
748    deleted as unused.  */
749 
750 static DisasCond do_sub_cond(DisasContext *ctx, unsigned cf, bool d,
751                              TCGv_i64 res, TCGv_i64 in1,
752                              TCGv_i64 in2, TCGv_i64 sv)
753 {
754     TCGCond tc;
755     bool ext_uns;
756 
757     switch (cf >> 1) {
758     case 1: /* = / <> */
759         tc = TCG_COND_EQ;
760         ext_uns = true;
761         break;
762     case 2: /* < / >= */
763         tc = TCG_COND_LT;
764         ext_uns = false;
765         break;
766     case 3: /* <= / > */
767         tc = TCG_COND_LE;
768         ext_uns = false;
769         break;
770     case 4: /* << / >>= */
771         tc = TCG_COND_LTU;
772         ext_uns = true;
773         break;
774     case 5: /* <<= / >> */
775         tc = TCG_COND_LEU;
776         ext_uns = true;
777         break;
778     default:
779         return do_cond(ctx, cf, d, res, NULL, sv);
780     }
781 
782     if (cf & 1) {
783         tc = tcg_invert_cond(tc);
784     }
785     if (cond_need_ext(ctx, d)) {
786         TCGv_i64 t1 = tcg_temp_new_i64();
787         TCGv_i64 t2 = tcg_temp_new_i64();
788 
789         if (ext_uns) {
790             tcg_gen_ext32u_i64(t1, in1);
791             tcg_gen_ext32u_i64(t2, in2);
792         } else {
793             tcg_gen_ext32s_i64(t1, in1);
794             tcg_gen_ext32s_i64(t2, in2);
795         }
796         return cond_make_tmp(tc, t1, t2);
797     }
798     return cond_make(tc, in1, in2);
799 }
800 
801 /*
802  * Similar, but for logicals, where the carry and overflow bits are not
803  * computed, and use of them is undefined.
804  *
805  * Undefined or not, hardware does not trap.  It seems reasonable to
806  * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's
807  * how cases c={2,3} are treated.
808  */
809 
810 static DisasCond do_log_cond(DisasContext *ctx, unsigned cf, bool d,
811                              TCGv_i64 res)
812 {
813     TCGCond tc;
814     bool ext_uns;
815 
816     switch (cf) {
817     case 0:  /* never */
818     case 9:  /* undef, C */
819     case 11: /* undef, C & !Z */
820     case 12: /* undef, V */
821         return cond_make_f();
822 
823     case 1:  /* true */
824     case 8:  /* undef, !C */
825     case 10: /* undef, !C | Z */
826     case 13: /* undef, !V */
827         return cond_make_t();
828 
829     case 2:  /* == */
830         tc = TCG_COND_EQ;
831         ext_uns = true;
832         break;
833     case 3:  /* <> */
834         tc = TCG_COND_NE;
835         ext_uns = true;
836         break;
837     case 4:  /* < */
838         tc = TCG_COND_LT;
839         ext_uns = false;
840         break;
841     case 5:  /* >= */
842         tc = TCG_COND_GE;
843         ext_uns = false;
844         break;
845     case 6:  /* <= */
846         tc = TCG_COND_LE;
847         ext_uns = false;
848         break;
849     case 7:  /* > */
850         tc = TCG_COND_GT;
851         ext_uns = false;
852         break;
853 
854     case 14: /* OD */
855     case 15: /* EV */
856         return do_cond(ctx, cf, d, res, NULL, NULL);
857 
858     default:
859         g_assert_not_reached();
860     }
861 
862     if (cond_need_ext(ctx, d)) {
863         TCGv_i64 tmp = tcg_temp_new_i64();
864 
865         if (ext_uns) {
866             tcg_gen_ext32u_i64(tmp, res);
867         } else {
868             tcg_gen_ext32s_i64(tmp, res);
869         }
870         return cond_make_0_tmp(tc, tmp);
871     }
872     return cond_make_0(tc, res);
873 }
874 
875 /* Similar, but for shift/extract/deposit conditions.  */
876 
877 static DisasCond do_sed_cond(DisasContext *ctx, unsigned orig, bool d,
878                              TCGv_i64 res)
879 {
880     unsigned c, f;
881 
882     /* Convert the compressed condition codes to standard.
883        0-2 are the same as logicals (nv,<,<=), while 3 is OD.
884        4-7 are the reverse of 0-3.  */
885     c = orig & 3;
886     if (c == 3) {
887         c = 7;
888     }
889     f = (orig & 4) / 4;
890 
891     return do_log_cond(ctx, c * 2 + f, d, res);
892 }
893 
894 /* Similar, but for unit conditions.  */
895 
896 static DisasCond do_unit_cond(unsigned cf, bool d, TCGv_i64 res,
897                               TCGv_i64 in1, TCGv_i64 in2)
898 {
899     DisasCond cond;
900     TCGv_i64 tmp, cb = NULL;
901     uint64_t d_repl = d ? 0x0000000100000001ull : 1;
902 
903     if (cf & 8) {
904         /* Since we want to test lots of carry-out bits all at once, do not
905          * do our normal thing and compute carry-in of bit B+1 since that
906          * leaves us with carry bits spread across two words.
907          */
908         cb = tcg_temp_new_i64();
909         tmp = tcg_temp_new_i64();
910         tcg_gen_or_i64(cb, in1, in2);
911         tcg_gen_and_i64(tmp, in1, in2);
912         tcg_gen_andc_i64(cb, cb, res);
913         tcg_gen_or_i64(cb, cb, tmp);
914     }
915 
916     switch (cf >> 1) {
917     case 0: /* never / TR */
918     case 1: /* undefined */
919     case 5: /* undefined */
920         cond = cond_make_f();
921         break;
922 
923     case 2: /* SBZ / NBZ */
924         /* See hasless(v,1) from
925          * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
926          */
927         tmp = tcg_temp_new_i64();
928         tcg_gen_subi_i64(tmp, res, d_repl * 0x01010101u);
929         tcg_gen_andc_i64(tmp, tmp, res);
930         tcg_gen_andi_i64(tmp, tmp, d_repl * 0x80808080u);
931         cond = cond_make_0(TCG_COND_NE, tmp);
932         break;
933 
934     case 3: /* SHZ / NHZ */
935         tmp = tcg_temp_new_i64();
936         tcg_gen_subi_i64(tmp, res, d_repl * 0x00010001u);
937         tcg_gen_andc_i64(tmp, tmp, res);
938         tcg_gen_andi_i64(tmp, tmp, d_repl * 0x80008000u);
939         cond = cond_make_0(TCG_COND_NE, tmp);
940         break;
941 
942     case 4: /* SDC / NDC */
943         tcg_gen_andi_i64(cb, cb, d_repl * 0x88888888u);
944         cond = cond_make_0(TCG_COND_NE, cb);
945         break;
946 
947     case 6: /* SBC / NBC */
948         tcg_gen_andi_i64(cb, cb, d_repl * 0x80808080u);
949         cond = cond_make_0(TCG_COND_NE, cb);
950         break;
951 
952     case 7: /* SHC / NHC */
953         tcg_gen_andi_i64(cb, cb, d_repl * 0x80008000u);
954         cond = cond_make_0(TCG_COND_NE, cb);
955         break;
956 
957     default:
958         g_assert_not_reached();
959     }
960     if (cf & 1) {
961         cond.c = tcg_invert_cond(cond.c);
962     }
963 
964     return cond;
965 }
966 
967 static TCGv_i64 get_carry(DisasContext *ctx, bool d,
968                           TCGv_i64 cb, TCGv_i64 cb_msb)
969 {
970     if (cond_need_ext(ctx, d)) {
971         TCGv_i64 t = tcg_temp_new_i64();
972         tcg_gen_extract_i64(t, cb, 32, 1);
973         return t;
974     }
975     return cb_msb;
976 }
977 
978 static TCGv_i64 get_psw_carry(DisasContext *ctx, bool d)
979 {
980     return get_carry(ctx, d, cpu_psw_cb, cpu_psw_cb_msb);
981 }
982 
983 /* Compute signed overflow for addition.  */
984 static TCGv_i64 do_add_sv(DisasContext *ctx, TCGv_i64 res,
985                           TCGv_i64 in1, TCGv_i64 in2)
986 {
987     TCGv_i64 sv = tcg_temp_new_i64();
988     TCGv_i64 tmp = tcg_temp_new_i64();
989 
990     tcg_gen_xor_i64(sv, res, in1);
991     tcg_gen_xor_i64(tmp, in1, in2);
992     tcg_gen_andc_i64(sv, sv, tmp);
993 
994     return sv;
995 }
996 
997 /* Compute signed overflow for subtraction.  */
998 static TCGv_i64 do_sub_sv(DisasContext *ctx, TCGv_i64 res,
999                           TCGv_i64 in1, TCGv_i64 in2)
1000 {
1001     TCGv_i64 sv = tcg_temp_new_i64();
1002     TCGv_i64 tmp = tcg_temp_new_i64();
1003 
1004     tcg_gen_xor_i64(sv, res, in1);
1005     tcg_gen_xor_i64(tmp, in1, in2);
1006     tcg_gen_and_i64(sv, sv, tmp);
1007 
1008     return sv;
1009 }
1010 
1011 static void do_add(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1012                    TCGv_i64 in2, unsigned shift, bool is_l,
1013                    bool is_tsv, bool is_tc, bool is_c, unsigned cf, bool d)
1014 {
1015     TCGv_i64 dest, cb, cb_msb, cb_cond, sv, tmp;
1016     unsigned c = cf >> 1;
1017     DisasCond cond;
1018 
1019     dest = tcg_temp_new_i64();
1020     cb = NULL;
1021     cb_msb = NULL;
1022     cb_cond = NULL;
1023 
1024     if (shift) {
1025         tmp = tcg_temp_new_i64();
1026         tcg_gen_shli_i64(tmp, in1, shift);
1027         in1 = tmp;
1028     }
1029 
1030     if (!is_l || cond_need_cb(c)) {
1031         cb_msb = tcg_temp_new_i64();
1032         cb = tcg_temp_new_i64();
1033 
1034         tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero, in2, ctx->zero);
1035         if (is_c) {
1036             tcg_gen_add2_i64(dest, cb_msb, dest, cb_msb,
1037                              get_psw_carry(ctx, d), ctx->zero);
1038         }
1039         tcg_gen_xor_i64(cb, in1, in2);
1040         tcg_gen_xor_i64(cb, cb, dest);
1041         if (cond_need_cb(c)) {
1042             cb_cond = get_carry(ctx, d, cb, cb_msb);
1043         }
1044     } else {
1045         tcg_gen_add_i64(dest, in1, in2);
1046         if (is_c) {
1047             tcg_gen_add_i64(dest, dest, get_psw_carry(ctx, d));
1048         }
1049     }
1050 
1051     /* Compute signed overflow if required.  */
1052     sv = NULL;
1053     if (is_tsv || cond_need_sv(c)) {
1054         sv = do_add_sv(ctx, dest, in1, in2);
1055         if (is_tsv) {
1056             /* ??? Need to include overflow from shift.  */
1057             gen_helper_tsv(tcg_env, sv);
1058         }
1059     }
1060 
1061     /* Emit any conditional trap before any writeback.  */
1062     cond = do_cond(ctx, cf, d, dest, cb_cond, sv);
1063     if (is_tc) {
1064         tmp = tcg_temp_new_i64();
1065         tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1);
1066         gen_helper_tcond(tcg_env, tmp);
1067     }
1068 
1069     /* Write back the result.  */
1070     if (!is_l) {
1071         save_or_nullify(ctx, cpu_psw_cb, cb);
1072         save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1073     }
1074     save_gpr(ctx, rt, dest);
1075 
1076     /* Install the new nullification.  */
1077     cond_free(&ctx->null_cond);
1078     ctx->null_cond = cond;
1079 }
1080 
1081 static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_d_sh *a,
1082                        bool is_l, bool is_tsv, bool is_tc, bool is_c)
1083 {
1084     TCGv_i64 tcg_r1, tcg_r2;
1085 
1086     if (a->cf) {
1087         nullify_over(ctx);
1088     }
1089     tcg_r1 = load_gpr(ctx, a->r1);
1090     tcg_r2 = load_gpr(ctx, a->r2);
1091     do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l,
1092            is_tsv, is_tc, is_c, a->cf, a->d);
1093     return nullify_end(ctx);
1094 }
1095 
1096 static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a,
1097                        bool is_tsv, bool is_tc)
1098 {
1099     TCGv_i64 tcg_im, tcg_r2;
1100 
1101     if (a->cf) {
1102         nullify_over(ctx);
1103     }
1104     tcg_im = tcg_constant_i64(a->i);
1105     tcg_r2 = load_gpr(ctx, a->r);
1106     /* All ADDI conditions are 32-bit. */
1107     do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf, false);
1108     return nullify_end(ctx);
1109 }
1110 
1111 static void do_sub(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1112                    TCGv_i64 in2, bool is_tsv, bool is_b,
1113                    bool is_tc, unsigned cf, bool d)
1114 {
1115     TCGv_i64 dest, sv, cb, cb_msb, tmp;
1116     unsigned c = cf >> 1;
1117     DisasCond cond;
1118 
1119     dest = tcg_temp_new_i64();
1120     cb = tcg_temp_new_i64();
1121     cb_msb = tcg_temp_new_i64();
1122 
1123     if (is_b) {
1124         /* DEST,C = IN1 + ~IN2 + C.  */
1125         tcg_gen_not_i64(cb, in2);
1126         tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero,
1127                          get_psw_carry(ctx, d), ctx->zero);
1128         tcg_gen_add2_i64(dest, cb_msb, dest, cb_msb, cb, ctx->zero);
1129         tcg_gen_xor_i64(cb, cb, in1);
1130         tcg_gen_xor_i64(cb, cb, dest);
1131     } else {
1132         /*
1133          * DEST,C = IN1 + ~IN2 + 1.  We can produce the same result in fewer
1134          * operations by seeding the high word with 1 and subtracting.
1135          */
1136         TCGv_i64 one = tcg_constant_i64(1);
1137         tcg_gen_sub2_i64(dest, cb_msb, in1, one, in2, ctx->zero);
1138         tcg_gen_eqv_i64(cb, in1, in2);
1139         tcg_gen_xor_i64(cb, cb, dest);
1140     }
1141 
1142     /* Compute signed overflow if required.  */
1143     sv = NULL;
1144     if (is_tsv || cond_need_sv(c)) {
1145         sv = do_sub_sv(ctx, dest, in1, in2);
1146         if (is_tsv) {
1147             gen_helper_tsv(tcg_env, sv);
1148         }
1149     }
1150 
1151     /* Compute the condition.  We cannot use the special case for borrow.  */
1152     if (!is_b) {
1153         cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv);
1154     } else {
1155         cond = do_cond(ctx, cf, d, dest, get_carry(ctx, d, cb, cb_msb), sv);
1156     }
1157 
1158     /* Emit any conditional trap before any writeback.  */
1159     if (is_tc) {
1160         tmp = tcg_temp_new_i64();
1161         tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1);
1162         gen_helper_tcond(tcg_env, tmp);
1163     }
1164 
1165     /* Write back the result.  */
1166     save_or_nullify(ctx, cpu_psw_cb, cb);
1167     save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1168     save_gpr(ctx, rt, dest);
1169 
1170     /* Install the new nullification.  */
1171     cond_free(&ctx->null_cond);
1172     ctx->null_cond = cond;
1173 }
1174 
1175 static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf_d *a,
1176                        bool is_tsv, bool is_b, bool is_tc)
1177 {
1178     TCGv_i64 tcg_r1, tcg_r2;
1179 
1180     if (a->cf) {
1181         nullify_over(ctx);
1182     }
1183     tcg_r1 = load_gpr(ctx, a->r1);
1184     tcg_r2 = load_gpr(ctx, a->r2);
1185     do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf, a->d);
1186     return nullify_end(ctx);
1187 }
1188 
1189 static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv)
1190 {
1191     TCGv_i64 tcg_im, tcg_r2;
1192 
1193     if (a->cf) {
1194         nullify_over(ctx);
1195     }
1196     tcg_im = tcg_constant_i64(a->i);
1197     tcg_r2 = load_gpr(ctx, a->r);
1198     /* All SUBI conditions are 32-bit. */
1199     do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf, false);
1200     return nullify_end(ctx);
1201 }
1202 
1203 static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1204                       TCGv_i64 in2, unsigned cf, bool d)
1205 {
1206     TCGv_i64 dest, sv;
1207     DisasCond cond;
1208 
1209     dest = tcg_temp_new_i64();
1210     tcg_gen_sub_i64(dest, in1, in2);
1211 
1212     /* Compute signed overflow if required.  */
1213     sv = NULL;
1214     if (cond_need_sv(cf >> 1)) {
1215         sv = do_sub_sv(ctx, dest, in1, in2);
1216     }
1217 
1218     /* Form the condition for the compare.  */
1219     cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv);
1220 
1221     /* Clear.  */
1222     tcg_gen_movi_i64(dest, 0);
1223     save_gpr(ctx, rt, dest);
1224 
1225     /* Install the new nullification.  */
1226     cond_free(&ctx->null_cond);
1227     ctx->null_cond = cond;
1228 }
1229 
1230 static void do_log(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1231                    TCGv_i64 in2, unsigned cf, bool d,
1232                    void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
1233 {
1234     TCGv_i64 dest = dest_gpr(ctx, rt);
1235 
1236     /* Perform the operation, and writeback.  */
1237     fn(dest, in1, in2);
1238     save_gpr(ctx, rt, dest);
1239 
1240     /* Install the new nullification.  */
1241     cond_free(&ctx->null_cond);
1242     if (cf) {
1243         ctx->null_cond = do_log_cond(ctx, cf, d, dest);
1244     }
1245 }
1246 
1247 static bool do_log_reg(DisasContext *ctx, arg_rrr_cf_d *a,
1248                        void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
1249 {
1250     TCGv_i64 tcg_r1, tcg_r2;
1251 
1252     if (a->cf) {
1253         nullify_over(ctx);
1254     }
1255     tcg_r1 = load_gpr(ctx, a->r1);
1256     tcg_r2 = load_gpr(ctx, a->r2);
1257     do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d, fn);
1258     return nullify_end(ctx);
1259 }
1260 
1261 static void do_unit(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1262                     TCGv_i64 in2, unsigned cf, bool d, bool is_tc,
1263                     void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
1264 {
1265     TCGv_i64 dest;
1266     DisasCond cond;
1267 
1268     if (cf == 0) {
1269         dest = dest_gpr(ctx, rt);
1270         fn(dest, in1, in2);
1271         save_gpr(ctx, rt, dest);
1272         cond_free(&ctx->null_cond);
1273     } else {
1274         dest = tcg_temp_new_i64();
1275         fn(dest, in1, in2);
1276 
1277         cond = do_unit_cond(cf, d, dest, in1, in2);
1278 
1279         if (is_tc) {
1280             TCGv_i64 tmp = tcg_temp_new_i64();
1281             tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1);
1282             gen_helper_tcond(tcg_env, tmp);
1283         }
1284         save_gpr(ctx, rt, dest);
1285 
1286         cond_free(&ctx->null_cond);
1287         ctx->null_cond = cond;
1288     }
1289 }
1290 
1291 #ifndef CONFIG_USER_ONLY
1292 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1293    from the top 2 bits of the base register.  There are a few system
1294    instructions that have a 3-bit space specifier, for which SR0 is
1295    not special.  To handle this, pass ~SP.  */
1296 static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_i64 base)
1297 {
1298     TCGv_ptr ptr;
1299     TCGv_i64 tmp;
1300     TCGv_i64 spc;
1301 
1302     if (sp != 0) {
1303         if (sp < 0) {
1304             sp = ~sp;
1305         }
1306         spc = tcg_temp_new_i64();
1307         load_spr(ctx, spc, sp);
1308         return spc;
1309     }
1310     if (ctx->tb_flags & TB_FLAG_SR_SAME) {
1311         return cpu_srH;
1312     }
1313 
1314     ptr = tcg_temp_new_ptr();
1315     tmp = tcg_temp_new_i64();
1316     spc = tcg_temp_new_i64();
1317 
1318     /* Extract top 2 bits of the address, shift left 3 for uint64_t index. */
1319     tcg_gen_shri_i64(tmp, base, (ctx->tb_flags & PSW_W ? 64 : 32) - 5);
1320     tcg_gen_andi_i64(tmp, tmp, 030);
1321     tcg_gen_trunc_i64_ptr(ptr, tmp);
1322 
1323     tcg_gen_add_ptr(ptr, ptr, tcg_env);
1324     tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
1325 
1326     return spc;
1327 }
1328 #endif
1329 
1330 static void form_gva(DisasContext *ctx, TCGv_i64 *pgva, TCGv_i64 *pofs,
1331                      unsigned rb, unsigned rx, int scale, int64_t disp,
1332                      unsigned sp, int modify, bool is_phys)
1333 {
1334     TCGv_i64 base = load_gpr(ctx, rb);
1335     TCGv_i64 ofs;
1336     TCGv_i64 addr;
1337 
1338     set_insn_breg(ctx, rb);
1339 
1340     /* Note that RX is mutually exclusive with DISP.  */
1341     if (rx) {
1342         ofs = tcg_temp_new_i64();
1343         tcg_gen_shli_i64(ofs, cpu_gr[rx], scale);
1344         tcg_gen_add_i64(ofs, ofs, base);
1345     } else if (disp || modify) {
1346         ofs = tcg_temp_new_i64();
1347         tcg_gen_addi_i64(ofs, base, disp);
1348     } else {
1349         ofs = base;
1350     }
1351 
1352     *pofs = ofs;
1353     *pgva = addr = tcg_temp_new_i64();
1354     tcg_gen_andi_i64(addr, modify <= 0 ? ofs : base, gva_offset_mask(ctx));
1355 #ifndef CONFIG_USER_ONLY
1356     if (!is_phys) {
1357         tcg_gen_or_i64(addr, addr, space_select(ctx, sp, base));
1358     }
1359 #endif
1360 }
1361 
1362 /* Emit a memory load.  The modify parameter should be
1363  * < 0 for pre-modify,
1364  * > 0 for post-modify,
1365  * = 0 for no base register update.
1366  */
1367 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
1368                        unsigned rx, int scale, int64_t disp,
1369                        unsigned sp, int modify, MemOp mop)
1370 {
1371     TCGv_i64 ofs;
1372     TCGv_i64 addr;
1373 
1374     /* Caller uses nullify_over/nullify_end.  */
1375     assert(ctx->null_cond.c == TCG_COND_NEVER);
1376 
1377     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1378              ctx->mmu_idx == MMU_PHYS_IDX);
1379     tcg_gen_qemu_ld_i32(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1380     if (modify) {
1381         save_gpr(ctx, rb, ofs);
1382     }
1383 }
1384 
1385 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1386                        unsigned rx, int scale, int64_t disp,
1387                        unsigned sp, int modify, MemOp mop)
1388 {
1389     TCGv_i64 ofs;
1390     TCGv_i64 addr;
1391 
1392     /* Caller uses nullify_over/nullify_end.  */
1393     assert(ctx->null_cond.c == TCG_COND_NEVER);
1394 
1395     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1396              ctx->mmu_idx == MMU_PHYS_IDX);
1397     tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1398     if (modify) {
1399         save_gpr(ctx, rb, ofs);
1400     }
1401 }
1402 
1403 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1404                         unsigned rx, int scale, int64_t disp,
1405                         unsigned sp, int modify, MemOp mop)
1406 {
1407     TCGv_i64 ofs;
1408     TCGv_i64 addr;
1409 
1410     /* Caller uses nullify_over/nullify_end.  */
1411     assert(ctx->null_cond.c == TCG_COND_NEVER);
1412 
1413     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1414              ctx->mmu_idx == MMU_PHYS_IDX);
1415     tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1416     if (modify) {
1417         save_gpr(ctx, rb, ofs);
1418     }
1419 }
1420 
1421 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1422                         unsigned rx, int scale, int64_t disp,
1423                         unsigned sp, int modify, MemOp mop)
1424 {
1425     TCGv_i64 ofs;
1426     TCGv_i64 addr;
1427 
1428     /* Caller uses nullify_over/nullify_end.  */
1429     assert(ctx->null_cond.c == TCG_COND_NEVER);
1430 
1431     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1432              ctx->mmu_idx == MMU_PHYS_IDX);
1433     tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1434     if (modify) {
1435         save_gpr(ctx, rb, ofs);
1436     }
1437 }
1438 
1439 static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1440                     unsigned rx, int scale, int64_t disp,
1441                     unsigned sp, int modify, MemOp mop)
1442 {
1443     TCGv_i64 dest;
1444 
1445     nullify_over(ctx);
1446 
1447     if (modify == 0) {
1448         /* No base register update.  */
1449         dest = dest_gpr(ctx, rt);
1450     } else {
1451         /* Make sure if RT == RB, we see the result of the load.  */
1452         dest = tcg_temp_new_i64();
1453     }
1454     do_load_64(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
1455     save_gpr(ctx, rt, dest);
1456 
1457     return nullify_end(ctx);
1458 }
1459 
1460 static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1461                       unsigned rx, int scale, int64_t disp,
1462                       unsigned sp, int modify)
1463 {
1464     TCGv_i32 tmp;
1465 
1466     nullify_over(ctx);
1467 
1468     tmp = tcg_temp_new_i32();
1469     do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1470     save_frw_i32(rt, tmp);
1471 
1472     if (rt == 0) {
1473         gen_helper_loaded_fr0(tcg_env);
1474     }
1475 
1476     return nullify_end(ctx);
1477 }
1478 
1479 static bool trans_fldw(DisasContext *ctx, arg_ldst *a)
1480 {
1481     return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1482                      a->disp, a->sp, a->m);
1483 }
1484 
1485 static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1486                       unsigned rx, int scale, int64_t disp,
1487                       unsigned sp, int modify)
1488 {
1489     TCGv_i64 tmp;
1490 
1491     nullify_over(ctx);
1492 
1493     tmp = tcg_temp_new_i64();
1494     do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1495     save_frd(rt, tmp);
1496 
1497     if (rt == 0) {
1498         gen_helper_loaded_fr0(tcg_env);
1499     }
1500 
1501     return nullify_end(ctx);
1502 }
1503 
1504 static bool trans_fldd(DisasContext *ctx, arg_ldst *a)
1505 {
1506     return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1507                      a->disp, a->sp, a->m);
1508 }
1509 
1510 static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1511                      int64_t disp, unsigned sp,
1512                      int modify, MemOp mop)
1513 {
1514     nullify_over(ctx);
1515     do_store_64(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
1516     return nullify_end(ctx);
1517 }
1518 
1519 static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1520                        unsigned rx, int scale, int64_t disp,
1521                        unsigned sp, int modify)
1522 {
1523     TCGv_i32 tmp;
1524 
1525     nullify_over(ctx);
1526 
1527     tmp = load_frw_i32(rt);
1528     do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1529 
1530     return nullify_end(ctx);
1531 }
1532 
1533 static bool trans_fstw(DisasContext *ctx, arg_ldst *a)
1534 {
1535     return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1536                       a->disp, a->sp, a->m);
1537 }
1538 
1539 static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1540                        unsigned rx, int scale, int64_t disp,
1541                        unsigned sp, int modify)
1542 {
1543     TCGv_i64 tmp;
1544 
1545     nullify_over(ctx);
1546 
1547     tmp = load_frd(rt);
1548     do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1549 
1550     return nullify_end(ctx);
1551 }
1552 
1553 static bool trans_fstd(DisasContext *ctx, arg_ldst *a)
1554 {
1555     return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1556                       a->disp, a->sp, a->m);
1557 }
1558 
1559 static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
1560                        void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
1561 {
1562     TCGv_i32 tmp;
1563 
1564     nullify_over(ctx);
1565     tmp = load_frw0_i32(ra);
1566 
1567     func(tmp, tcg_env, tmp);
1568 
1569     save_frw_i32(rt, tmp);
1570     return nullify_end(ctx);
1571 }
1572 
1573 static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
1574                        void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
1575 {
1576     TCGv_i32 dst;
1577     TCGv_i64 src;
1578 
1579     nullify_over(ctx);
1580     src = load_frd(ra);
1581     dst = tcg_temp_new_i32();
1582 
1583     func(dst, tcg_env, src);
1584 
1585     save_frw_i32(rt, dst);
1586     return nullify_end(ctx);
1587 }
1588 
1589 static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
1590                        void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
1591 {
1592     TCGv_i64 tmp;
1593 
1594     nullify_over(ctx);
1595     tmp = load_frd0(ra);
1596 
1597     func(tmp, tcg_env, tmp);
1598 
1599     save_frd(rt, tmp);
1600     return nullify_end(ctx);
1601 }
1602 
1603 static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
1604                        void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
1605 {
1606     TCGv_i32 src;
1607     TCGv_i64 dst;
1608 
1609     nullify_over(ctx);
1610     src = load_frw0_i32(ra);
1611     dst = tcg_temp_new_i64();
1612 
1613     func(dst, tcg_env, src);
1614 
1615     save_frd(rt, dst);
1616     return nullify_end(ctx);
1617 }
1618 
1619 static bool do_fop_weww(DisasContext *ctx, unsigned rt,
1620                         unsigned ra, unsigned rb,
1621                         void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
1622 {
1623     TCGv_i32 a, b;
1624 
1625     nullify_over(ctx);
1626     a = load_frw0_i32(ra);
1627     b = load_frw0_i32(rb);
1628 
1629     func(a, tcg_env, a, b);
1630 
1631     save_frw_i32(rt, a);
1632     return nullify_end(ctx);
1633 }
1634 
1635 static bool do_fop_dedd(DisasContext *ctx, unsigned rt,
1636                         unsigned ra, unsigned rb,
1637                         void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
1638 {
1639     TCGv_i64 a, b;
1640 
1641     nullify_over(ctx);
1642     a = load_frd0(ra);
1643     b = load_frd0(rb);
1644 
1645     func(a, tcg_env, a, b);
1646 
1647     save_frd(rt, a);
1648     return nullify_end(ctx);
1649 }
1650 
1651 /* Emit an unconditional branch to a direct target, which may or may not
1652    have already had nullification handled.  */
1653 static bool do_dbranch(DisasContext *ctx, uint64_t dest,
1654                        unsigned link, bool is_n)
1655 {
1656     if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1657         if (link != 0) {
1658             copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1659         }
1660         ctx->iaoq_n = dest;
1661         if (is_n) {
1662             ctx->null_cond.c = TCG_COND_ALWAYS;
1663         }
1664     } else {
1665         nullify_over(ctx);
1666 
1667         if (link != 0) {
1668             copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1669         }
1670 
1671         if (is_n && use_nullify_skip(ctx)) {
1672             nullify_set(ctx, 0);
1673             gen_goto_tb(ctx, 0, dest, dest + 4);
1674         } else {
1675             nullify_set(ctx, is_n);
1676             gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1677         }
1678 
1679         nullify_end(ctx);
1680 
1681         nullify_set(ctx, 0);
1682         gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
1683         ctx->base.is_jmp = DISAS_NORETURN;
1684     }
1685     return true;
1686 }
1687 
1688 /* Emit a conditional branch to a direct target.  If the branch itself
1689    is nullified, we should have already used nullify_over.  */
1690 static bool do_cbranch(DisasContext *ctx, int64_t disp, bool is_n,
1691                        DisasCond *cond)
1692 {
1693     uint64_t dest = iaoq_dest(ctx, disp);
1694     TCGLabel *taken = NULL;
1695     TCGCond c = cond->c;
1696     bool n;
1697 
1698     assert(ctx->null_cond.c == TCG_COND_NEVER);
1699 
1700     /* Handle TRUE and NEVER as direct branches.  */
1701     if (c == TCG_COND_ALWAYS) {
1702         return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
1703     }
1704     if (c == TCG_COND_NEVER) {
1705         return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
1706     }
1707 
1708     taken = gen_new_label();
1709     tcg_gen_brcond_i64(c, cond->a0, cond->a1, taken);
1710     cond_free(cond);
1711 
1712     /* Not taken: Condition not satisfied; nullify on backward branches. */
1713     n = is_n && disp < 0;
1714     if (n && use_nullify_skip(ctx)) {
1715         nullify_set(ctx, 0);
1716         gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
1717     } else {
1718         if (!n && ctx->null_lab) {
1719             gen_set_label(ctx->null_lab);
1720             ctx->null_lab = NULL;
1721         }
1722         nullify_set(ctx, n);
1723         if (ctx->iaoq_n == -1) {
1724             /* The temporary iaoq_n_var died at the branch above.
1725                Regenerate it here instead of saving it.  */
1726             tcg_gen_addi_i64(ctx->iaoq_n_var, cpu_iaoq_b, 4);
1727         }
1728         gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
1729     }
1730 
1731     gen_set_label(taken);
1732 
1733     /* Taken: Condition satisfied; nullify on forward branches.  */
1734     n = is_n && disp >= 0;
1735     if (n && use_nullify_skip(ctx)) {
1736         nullify_set(ctx, 0);
1737         gen_goto_tb(ctx, 1, dest, dest + 4);
1738     } else {
1739         nullify_set(ctx, n);
1740         gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
1741     }
1742 
1743     /* Not taken: the branch itself was nullified.  */
1744     if (ctx->null_lab) {
1745         gen_set_label(ctx->null_lab);
1746         ctx->null_lab = NULL;
1747         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
1748     } else {
1749         ctx->base.is_jmp = DISAS_NORETURN;
1750     }
1751     return true;
1752 }
1753 
1754 /* Emit an unconditional branch to an indirect target.  This handles
1755    nullification of the branch itself.  */
1756 static bool do_ibranch(DisasContext *ctx, TCGv_i64 dest,
1757                        unsigned link, bool is_n)
1758 {
1759     TCGv_i64 a0, a1, next, tmp;
1760     TCGCond c;
1761 
1762     assert(ctx->null_lab == NULL);
1763 
1764     if (ctx->null_cond.c == TCG_COND_NEVER) {
1765         if (link != 0) {
1766             copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1767         }
1768         next = tcg_temp_new_i64();
1769         tcg_gen_mov_i64(next, dest);
1770         if (is_n) {
1771             if (use_nullify_skip(ctx)) {
1772                 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, next);
1773                 tcg_gen_addi_i64(next, next, 4);
1774                 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, next);
1775                 nullify_set(ctx, 0);
1776                 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1777                 return true;
1778             }
1779             ctx->null_cond.c = TCG_COND_ALWAYS;
1780         }
1781         ctx->iaoq_n = -1;
1782         ctx->iaoq_n_var = next;
1783     } else if (is_n && use_nullify_skip(ctx)) {
1784         /* The (conditional) branch, B, nullifies the next insn, N,
1785            and we're allowed to skip execution N (no single-step or
1786            tracepoint in effect).  Since the goto_ptr that we must use
1787            for the indirect branch consumes no special resources, we
1788            can (conditionally) skip B and continue execution.  */
1789         /* The use_nullify_skip test implies we have a known control path.  */
1790         tcg_debug_assert(ctx->iaoq_b != -1);
1791         tcg_debug_assert(ctx->iaoq_n != -1);
1792 
1793         /* We do have to handle the non-local temporary, DEST, before
1794            branching.  Since IOAQ_F is not really live at this point, we
1795            can simply store DEST optimistically.  Similarly with IAOQ_B.  */
1796         copy_iaoq_entry(ctx, cpu_iaoq_f, -1, dest);
1797         next = tcg_temp_new_i64();
1798         tcg_gen_addi_i64(next, dest, 4);
1799         copy_iaoq_entry(ctx, cpu_iaoq_b, -1, next);
1800 
1801         nullify_over(ctx);
1802         if (link != 0) {
1803             copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1804         }
1805         tcg_gen_lookup_and_goto_ptr();
1806         return nullify_end(ctx);
1807     } else {
1808         c = ctx->null_cond.c;
1809         a0 = ctx->null_cond.a0;
1810         a1 = ctx->null_cond.a1;
1811 
1812         tmp = tcg_temp_new_i64();
1813         next = tcg_temp_new_i64();
1814 
1815         copy_iaoq_entry(ctx, tmp, ctx->iaoq_n, ctx->iaoq_n_var);
1816         tcg_gen_movcond_i64(c, next, a0, a1, tmp, dest);
1817         ctx->iaoq_n = -1;
1818         ctx->iaoq_n_var = next;
1819 
1820         if (link != 0) {
1821             tcg_gen_movcond_i64(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
1822         }
1823 
1824         if (is_n) {
1825             /* The branch nullifies the next insn, which means the state of N
1826                after the branch is the inverse of the state of N that applied
1827                to the branch.  */
1828             tcg_gen_setcond_i64(tcg_invert_cond(c), cpu_psw_n, a0, a1);
1829             cond_free(&ctx->null_cond);
1830             ctx->null_cond = cond_make_n();
1831             ctx->psw_n_nonzero = true;
1832         } else {
1833             cond_free(&ctx->null_cond);
1834         }
1835     }
1836     return true;
1837 }
1838 
1839 /* Implement
1840  *    if (IAOQ_Front{30..31} < GR[b]{30..31})
1841  *      IAOQ_Next{30..31} ← GR[b]{30..31};
1842  *    else
1843  *      IAOQ_Next{30..31} ← IAOQ_Front{30..31};
1844  * which keeps the privilege level from being increased.
1845  */
1846 static TCGv_i64 do_ibranch_priv(DisasContext *ctx, TCGv_i64 offset)
1847 {
1848     TCGv_i64 dest;
1849     switch (ctx->privilege) {
1850     case 0:
1851         /* Privilege 0 is maximum and is allowed to decrease.  */
1852         return offset;
1853     case 3:
1854         /* Privilege 3 is minimum and is never allowed to increase.  */
1855         dest = tcg_temp_new_i64();
1856         tcg_gen_ori_i64(dest, offset, 3);
1857         break;
1858     default:
1859         dest = tcg_temp_new_i64();
1860         tcg_gen_andi_i64(dest, offset, -4);
1861         tcg_gen_ori_i64(dest, dest, ctx->privilege);
1862         tcg_gen_movcond_i64(TCG_COND_GTU, dest, dest, offset, dest, offset);
1863         break;
1864     }
1865     return dest;
1866 }
1867 
1868 #ifdef CONFIG_USER_ONLY
1869 /* On Linux, page zero is normally marked execute only + gateway.
1870    Therefore normal read or write is supposed to fail, but specific
1871    offsets have kernel code mapped to raise permissions to implement
1872    system calls.  Handling this via an explicit check here, rather
1873    in than the "be disp(sr2,r0)" instruction that probably sent us
1874    here, is the easiest way to handle the branch delay slot on the
1875    aforementioned BE.  */
1876 static void do_page_zero(DisasContext *ctx)
1877 {
1878     TCGv_i64 tmp;
1879 
1880     /* If by some means we get here with PSW[N]=1, that implies that
1881        the B,GATE instruction would be skipped, and we'd fault on the
1882        next insn within the privileged page.  */
1883     switch (ctx->null_cond.c) {
1884     case TCG_COND_NEVER:
1885         break;
1886     case TCG_COND_ALWAYS:
1887         tcg_gen_movi_i64(cpu_psw_n, 0);
1888         goto do_sigill;
1889     default:
1890         /* Since this is always the first (and only) insn within the
1891            TB, we should know the state of PSW[N] from TB->FLAGS.  */
1892         g_assert_not_reached();
1893     }
1894 
1895     /* Check that we didn't arrive here via some means that allowed
1896        non-sequential instruction execution.  Normally the PSW[B] bit
1897        detects this by disallowing the B,GATE instruction to execute
1898        under such conditions.  */
1899     if (ctx->iaoq_b != ctx->iaoq_f + 4) {
1900         goto do_sigill;
1901     }
1902 
1903     switch (ctx->iaoq_f & -4) {
1904     case 0x00: /* Null pointer call */
1905         gen_excp_1(EXCP_IMP);
1906         ctx->base.is_jmp = DISAS_NORETURN;
1907         break;
1908 
1909     case 0xb0: /* LWS */
1910         gen_excp_1(EXCP_SYSCALL_LWS);
1911         ctx->base.is_jmp = DISAS_NORETURN;
1912         break;
1913 
1914     case 0xe0: /* SET_THREAD_POINTER */
1915         tcg_gen_st_i64(cpu_gr[26], tcg_env, offsetof(CPUHPPAState, cr[27]));
1916         tmp = tcg_temp_new_i64();
1917         tcg_gen_ori_i64(tmp, cpu_gr[31], 3);
1918         copy_iaoq_entry(ctx, cpu_iaoq_f, -1, tmp);
1919         tcg_gen_addi_i64(tmp, tmp, 4);
1920         copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
1921         ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1922         break;
1923 
1924     case 0x100: /* SYSCALL */
1925         gen_excp_1(EXCP_SYSCALL);
1926         ctx->base.is_jmp = DISAS_NORETURN;
1927         break;
1928 
1929     default:
1930     do_sigill:
1931         gen_excp_1(EXCP_ILL);
1932         ctx->base.is_jmp = DISAS_NORETURN;
1933         break;
1934     }
1935 }
1936 #endif
1937 
1938 static bool trans_nop(DisasContext *ctx, arg_nop *a)
1939 {
1940     cond_free(&ctx->null_cond);
1941     return true;
1942 }
1943 
1944 static bool trans_break(DisasContext *ctx, arg_break *a)
1945 {
1946     return gen_excp_iir(ctx, EXCP_BREAK);
1947 }
1948 
1949 static bool trans_sync(DisasContext *ctx, arg_sync *a)
1950 {
1951     /* No point in nullifying the memory barrier.  */
1952     tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1953 
1954     cond_free(&ctx->null_cond);
1955     return true;
1956 }
1957 
1958 static bool trans_mfia(DisasContext *ctx, arg_mfia *a)
1959 {
1960     unsigned rt = a->t;
1961     TCGv_i64 tmp = dest_gpr(ctx, rt);
1962     tcg_gen_movi_i64(tmp, ctx->iaoq_f);
1963     save_gpr(ctx, rt, tmp);
1964 
1965     cond_free(&ctx->null_cond);
1966     return true;
1967 }
1968 
1969 static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a)
1970 {
1971     unsigned rt = a->t;
1972     unsigned rs = a->sp;
1973     TCGv_i64 t0 = tcg_temp_new_i64();
1974 
1975     load_spr(ctx, t0, rs);
1976     tcg_gen_shri_i64(t0, t0, 32);
1977 
1978     save_gpr(ctx, rt, t0);
1979 
1980     cond_free(&ctx->null_cond);
1981     return true;
1982 }
1983 
1984 static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
1985 {
1986     unsigned rt = a->t;
1987     unsigned ctl = a->r;
1988     TCGv_i64 tmp;
1989 
1990     switch (ctl) {
1991     case CR_SAR:
1992         if (a->e == 0) {
1993             /* MFSAR without ,W masks low 5 bits.  */
1994             tmp = dest_gpr(ctx, rt);
1995             tcg_gen_andi_i64(tmp, cpu_sar, 31);
1996             save_gpr(ctx, rt, tmp);
1997             goto done;
1998         }
1999         save_gpr(ctx, rt, cpu_sar);
2000         goto done;
2001     case CR_IT: /* Interval Timer */
2002         /* FIXME: Respect PSW_S bit.  */
2003         nullify_over(ctx);
2004         tmp = dest_gpr(ctx, rt);
2005         if (translator_io_start(&ctx->base)) {
2006             gen_helper_read_interval_timer(tmp);
2007             ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2008         } else {
2009             gen_helper_read_interval_timer(tmp);
2010         }
2011         save_gpr(ctx, rt, tmp);
2012         return nullify_end(ctx);
2013     case 26:
2014     case 27:
2015         break;
2016     default:
2017         /* All other control registers are privileged.  */
2018         CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2019         break;
2020     }
2021 
2022     tmp = tcg_temp_new_i64();
2023     tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2024     save_gpr(ctx, rt, tmp);
2025 
2026  done:
2027     cond_free(&ctx->null_cond);
2028     return true;
2029 }
2030 
2031 static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a)
2032 {
2033     unsigned rr = a->r;
2034     unsigned rs = a->sp;
2035     TCGv_i64 tmp;
2036 
2037     if (rs >= 5) {
2038         CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2039     }
2040     nullify_over(ctx);
2041 
2042     tmp = tcg_temp_new_i64();
2043     tcg_gen_shli_i64(tmp, load_gpr(ctx, rr), 32);
2044 
2045     if (rs >= 4) {
2046         tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUHPPAState, sr[rs]));
2047         ctx->tb_flags &= ~TB_FLAG_SR_SAME;
2048     } else {
2049         tcg_gen_mov_i64(cpu_sr[rs], tmp);
2050     }
2051 
2052     return nullify_end(ctx);
2053 }
2054 
2055 static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
2056 {
2057     unsigned ctl = a->t;
2058     TCGv_i64 reg;
2059     TCGv_i64 tmp;
2060 
2061     if (ctl == CR_SAR) {
2062         reg = load_gpr(ctx, a->r);
2063         tmp = tcg_temp_new_i64();
2064         tcg_gen_andi_i64(tmp, reg, ctx->is_pa20 ? 63 : 31);
2065         save_or_nullify(ctx, cpu_sar, tmp);
2066 
2067         cond_free(&ctx->null_cond);
2068         return true;
2069     }
2070 
2071     /* All other control registers are privileged or read-only.  */
2072     CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2073 
2074 #ifndef CONFIG_USER_ONLY
2075     nullify_over(ctx);
2076 
2077     if (ctx->is_pa20) {
2078         reg = load_gpr(ctx, a->r);
2079     } else {
2080         reg = tcg_temp_new_i64();
2081         tcg_gen_ext32u_i64(reg, load_gpr(ctx, a->r));
2082     }
2083 
2084     switch (ctl) {
2085     case CR_IT:
2086         gen_helper_write_interval_timer(tcg_env, reg);
2087         break;
2088     case CR_EIRR:
2089         gen_helper_write_eirr(tcg_env, reg);
2090         break;
2091     case CR_EIEM:
2092         gen_helper_write_eiem(tcg_env, reg);
2093         ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2094         break;
2095 
2096     case CR_IIASQ:
2097     case CR_IIAOQ:
2098         /* FIXME: Respect PSW_Q bit */
2099         /* The write advances the queue and stores to the back element.  */
2100         tmp = tcg_temp_new_i64();
2101         tcg_gen_ld_i64(tmp, tcg_env,
2102                        offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2103         tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2104         tcg_gen_st_i64(reg, tcg_env,
2105                        offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2106         break;
2107 
2108     case CR_PID1:
2109     case CR_PID2:
2110     case CR_PID3:
2111     case CR_PID4:
2112         tcg_gen_st_i64(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2113 #ifndef CONFIG_USER_ONLY
2114         gen_helper_change_prot_id(tcg_env);
2115 #endif
2116         break;
2117 
2118     default:
2119         tcg_gen_st_i64(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2120         break;
2121     }
2122     return nullify_end(ctx);
2123 #endif
2124 }
2125 
2126 static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a)
2127 {
2128     TCGv_i64 tmp = tcg_temp_new_i64();
2129 
2130     tcg_gen_not_i64(tmp, load_gpr(ctx, a->r));
2131     tcg_gen_andi_i64(tmp, tmp, ctx->is_pa20 ? 63 : 31);
2132     save_or_nullify(ctx, cpu_sar, tmp);
2133 
2134     cond_free(&ctx->null_cond);
2135     return true;
2136 }
2137 
2138 static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a)
2139 {
2140     TCGv_i64 dest = dest_gpr(ctx, a->t);
2141 
2142 #ifdef CONFIG_USER_ONLY
2143     /* We don't implement space registers in user mode. */
2144     tcg_gen_movi_i64(dest, 0);
2145 #else
2146     tcg_gen_mov_i64(dest, space_select(ctx, a->sp, load_gpr(ctx, a->b)));
2147     tcg_gen_shri_i64(dest, dest, 32);
2148 #endif
2149     save_gpr(ctx, a->t, dest);
2150 
2151     cond_free(&ctx->null_cond);
2152     return true;
2153 }
2154 
2155 static bool trans_rsm(DisasContext *ctx, arg_rsm *a)
2156 {
2157     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2158 #ifndef CONFIG_USER_ONLY
2159     TCGv_i64 tmp;
2160 
2161     nullify_over(ctx);
2162 
2163     tmp = tcg_temp_new_i64();
2164     tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2165     tcg_gen_andi_i64(tmp, tmp, ~a->i);
2166     gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2167     save_gpr(ctx, a->t, tmp);
2168 
2169     /* Exit the TB to recognize new interrupts, e.g. PSW_M.  */
2170     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2171     return nullify_end(ctx);
2172 #endif
2173 }
2174 
2175 static bool trans_ssm(DisasContext *ctx, arg_ssm *a)
2176 {
2177     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2178 #ifndef CONFIG_USER_ONLY
2179     TCGv_i64 tmp;
2180 
2181     nullify_over(ctx);
2182 
2183     tmp = tcg_temp_new_i64();
2184     tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2185     tcg_gen_ori_i64(tmp, tmp, a->i);
2186     gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2187     save_gpr(ctx, a->t, tmp);
2188 
2189     /* Exit the TB to recognize new interrupts, e.g. PSW_I.  */
2190     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2191     return nullify_end(ctx);
2192 #endif
2193 }
2194 
2195 static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a)
2196 {
2197     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2198 #ifndef CONFIG_USER_ONLY
2199     TCGv_i64 tmp, reg;
2200     nullify_over(ctx);
2201 
2202     reg = load_gpr(ctx, a->r);
2203     tmp = tcg_temp_new_i64();
2204     gen_helper_swap_system_mask(tmp, tcg_env, reg);
2205 
2206     /* Exit the TB to recognize new interrupts.  */
2207     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2208     return nullify_end(ctx);
2209 #endif
2210 }
2211 
2212 static bool do_rfi(DisasContext *ctx, bool rfi_r)
2213 {
2214     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2215 #ifndef CONFIG_USER_ONLY
2216     nullify_over(ctx);
2217 
2218     if (rfi_r) {
2219         gen_helper_rfi_r(tcg_env);
2220     } else {
2221         gen_helper_rfi(tcg_env);
2222     }
2223     /* Exit the TB to recognize new interrupts.  */
2224     tcg_gen_exit_tb(NULL, 0);
2225     ctx->base.is_jmp = DISAS_NORETURN;
2226 
2227     return nullify_end(ctx);
2228 #endif
2229 }
2230 
2231 static bool trans_rfi(DisasContext *ctx, arg_rfi *a)
2232 {
2233     return do_rfi(ctx, false);
2234 }
2235 
2236 static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a)
2237 {
2238     return do_rfi(ctx, true);
2239 }
2240 
2241 static bool trans_halt(DisasContext *ctx, arg_halt *a)
2242 {
2243     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2244 #ifndef CONFIG_USER_ONLY
2245     nullify_over(ctx);
2246     gen_helper_halt(tcg_env);
2247     ctx->base.is_jmp = DISAS_NORETURN;
2248     return nullify_end(ctx);
2249 #endif
2250 }
2251 
2252 static bool trans_reset(DisasContext *ctx, arg_reset *a)
2253 {
2254     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2255 #ifndef CONFIG_USER_ONLY
2256     nullify_over(ctx);
2257     gen_helper_reset(tcg_env);
2258     ctx->base.is_jmp = DISAS_NORETURN;
2259     return nullify_end(ctx);
2260 #endif
2261 }
2262 
2263 static bool trans_getshadowregs(DisasContext *ctx, arg_getshadowregs *a)
2264 {
2265     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2266 #ifndef CONFIG_USER_ONLY
2267     nullify_over(ctx);
2268     gen_helper_getshadowregs(tcg_env);
2269     return nullify_end(ctx);
2270 #endif
2271 }
2272 
2273 static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a)
2274 {
2275     if (a->m) {
2276         TCGv_i64 dest = dest_gpr(ctx, a->b);
2277         TCGv_i64 src1 = load_gpr(ctx, a->b);
2278         TCGv_i64 src2 = load_gpr(ctx, a->x);
2279 
2280         /* The only thing we need to do is the base register modification.  */
2281         tcg_gen_add_i64(dest, src1, src2);
2282         save_gpr(ctx, a->b, dest);
2283     }
2284     cond_free(&ctx->null_cond);
2285     return true;
2286 }
2287 
2288 static bool trans_probe(DisasContext *ctx, arg_probe *a)
2289 {
2290     TCGv_i64 dest, ofs;
2291     TCGv_i32 level, want;
2292     TCGv_i64 addr;
2293 
2294     nullify_over(ctx);
2295 
2296     dest = dest_gpr(ctx, a->t);
2297     form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2298 
2299     if (a->imm) {
2300         level = tcg_constant_i32(a->ri & 3);
2301     } else {
2302         level = tcg_temp_new_i32();
2303         tcg_gen_extrl_i64_i32(level, load_gpr(ctx, a->ri));
2304         tcg_gen_andi_i32(level, level, 3);
2305     }
2306     want = tcg_constant_i32(a->write ? PAGE_WRITE : PAGE_READ);
2307 
2308     gen_helper_probe(dest, tcg_env, addr, level, want);
2309 
2310     save_gpr(ctx, a->t, dest);
2311     return nullify_end(ctx);
2312 }
2313 
2314 static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a)
2315 {
2316     if (ctx->is_pa20) {
2317         return false;
2318     }
2319     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2320 #ifndef CONFIG_USER_ONLY
2321     TCGv_i64 addr;
2322     TCGv_i64 ofs, reg;
2323 
2324     nullify_over(ctx);
2325 
2326     form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2327     reg = load_gpr(ctx, a->r);
2328     if (a->addr) {
2329         gen_helper_itlba_pa11(tcg_env, addr, reg);
2330     } else {
2331         gen_helper_itlbp_pa11(tcg_env, addr, reg);
2332     }
2333 
2334     /* Exit TB for TLB change if mmu is enabled.  */
2335     if (ctx->tb_flags & PSW_C) {
2336         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2337     }
2338     return nullify_end(ctx);
2339 #endif
2340 }
2341 
2342 static bool do_pxtlb(DisasContext *ctx, arg_ldst *a, bool local)
2343 {
2344     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2345 #ifndef CONFIG_USER_ONLY
2346     TCGv_i64 addr;
2347     TCGv_i64 ofs;
2348 
2349     nullify_over(ctx);
2350 
2351     form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2352 
2353     /*
2354      * Page align now, rather than later, so that we can add in the
2355      * page_size field from pa2.0 from the low 4 bits of GR[b].
2356      */
2357     tcg_gen_andi_i64(addr, addr, TARGET_PAGE_MASK);
2358     if (ctx->is_pa20) {
2359         tcg_gen_deposit_i64(addr, addr, load_gpr(ctx, a->b), 0, 4);
2360     }
2361 
2362     if (local) {
2363         gen_helper_ptlb_l(tcg_env, addr);
2364     } else {
2365         gen_helper_ptlb(tcg_env, addr);
2366     }
2367 
2368     if (a->m) {
2369         save_gpr(ctx, a->b, ofs);
2370     }
2371 
2372     /* Exit TB for TLB change if mmu is enabled.  */
2373     if (ctx->tb_flags & PSW_C) {
2374         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2375     }
2376     return nullify_end(ctx);
2377 #endif
2378 }
2379 
2380 static bool trans_pxtlb(DisasContext *ctx, arg_ldst *a)
2381 {
2382     return do_pxtlb(ctx, a, false);
2383 }
2384 
2385 static bool trans_pxtlb_l(DisasContext *ctx, arg_ldst *a)
2386 {
2387     return ctx->is_pa20 && do_pxtlb(ctx, a, true);
2388 }
2389 
2390 static bool trans_pxtlbe(DisasContext *ctx, arg_ldst *a)
2391 {
2392     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2393 #ifndef CONFIG_USER_ONLY
2394     nullify_over(ctx);
2395 
2396     trans_nop_addrx(ctx, a);
2397     gen_helper_ptlbe(tcg_env);
2398 
2399     /* Exit TB for TLB change if mmu is enabled.  */
2400     if (ctx->tb_flags & PSW_C) {
2401         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2402     }
2403     return nullify_end(ctx);
2404 #endif
2405 }
2406 
2407 /*
2408  * Implement the pcxl and pcxl2 Fast TLB Insert instructions.
2409  * See
2410  *     https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf
2411  *     page 13-9 (195/206)
2412  */
2413 static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a)
2414 {
2415     if (ctx->is_pa20) {
2416         return false;
2417     }
2418     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2419 #ifndef CONFIG_USER_ONLY
2420     TCGv_i64 addr, atl, stl;
2421     TCGv_i64 reg;
2422 
2423     nullify_over(ctx);
2424 
2425     /*
2426      * FIXME:
2427      *  if (not (pcxl or pcxl2))
2428      *    return gen_illegal(ctx);
2429      */
2430 
2431     atl = tcg_temp_new_i64();
2432     stl = tcg_temp_new_i64();
2433     addr = tcg_temp_new_i64();
2434 
2435     tcg_gen_ld32u_i64(stl, tcg_env,
2436                       a->data ? offsetof(CPUHPPAState, cr[CR_ISR])
2437                       : offsetof(CPUHPPAState, cr[CR_IIASQ]));
2438     tcg_gen_ld32u_i64(atl, tcg_env,
2439                       a->data ? offsetof(CPUHPPAState, cr[CR_IOR])
2440                       : offsetof(CPUHPPAState, cr[CR_IIAOQ]));
2441     tcg_gen_shli_i64(stl, stl, 32);
2442     tcg_gen_or_i64(addr, atl, stl);
2443 
2444     reg = load_gpr(ctx, a->r);
2445     if (a->addr) {
2446         gen_helper_itlba_pa11(tcg_env, addr, reg);
2447     } else {
2448         gen_helper_itlbp_pa11(tcg_env, addr, reg);
2449     }
2450 
2451     /* Exit TB for TLB change if mmu is enabled.  */
2452     if (ctx->tb_flags & PSW_C) {
2453         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2454     }
2455     return nullify_end(ctx);
2456 #endif
2457 }
2458 
2459 static bool trans_ixtlbt(DisasContext *ctx, arg_ixtlbt *a)
2460 {
2461     if (!ctx->is_pa20) {
2462         return false;
2463     }
2464     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2465 #ifndef CONFIG_USER_ONLY
2466     nullify_over(ctx);
2467     {
2468         TCGv_i64 src1 = load_gpr(ctx, a->r1);
2469         TCGv_i64 src2 = load_gpr(ctx, a->r2);
2470 
2471         if (a->data) {
2472             gen_helper_idtlbt_pa20(tcg_env, src1, src2);
2473         } else {
2474             gen_helper_iitlbt_pa20(tcg_env, src1, src2);
2475         }
2476     }
2477     /* Exit TB for TLB change if mmu is enabled.  */
2478     if (ctx->tb_flags & PSW_C) {
2479         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2480     }
2481     return nullify_end(ctx);
2482 #endif
2483 }
2484 
2485 static bool trans_lpa(DisasContext *ctx, arg_ldst *a)
2486 {
2487     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2488 #ifndef CONFIG_USER_ONLY
2489     TCGv_i64 vaddr;
2490     TCGv_i64 ofs, paddr;
2491 
2492     nullify_over(ctx);
2493 
2494     form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2495 
2496     paddr = tcg_temp_new_i64();
2497     gen_helper_lpa(paddr, tcg_env, vaddr);
2498 
2499     /* Note that physical address result overrides base modification.  */
2500     if (a->m) {
2501         save_gpr(ctx, a->b, ofs);
2502     }
2503     save_gpr(ctx, a->t, paddr);
2504 
2505     return nullify_end(ctx);
2506 #endif
2507 }
2508 
2509 static bool trans_lci(DisasContext *ctx, arg_lci *a)
2510 {
2511     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2512 
2513     /* The Coherence Index is an implementation-defined function of the
2514        physical address.  Two addresses with the same CI have a coherent
2515        view of the cache.  Our implementation is to return 0 for all,
2516        since the entire address space is coherent.  */
2517     save_gpr(ctx, a->t, ctx->zero);
2518 
2519     cond_free(&ctx->null_cond);
2520     return true;
2521 }
2522 
2523 static bool trans_add(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2524 {
2525     return do_add_reg(ctx, a, false, false, false, false);
2526 }
2527 
2528 static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2529 {
2530     return do_add_reg(ctx, a, true, false, false, false);
2531 }
2532 
2533 static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2534 {
2535     return do_add_reg(ctx, a, false, true, false, false);
2536 }
2537 
2538 static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2539 {
2540     return do_add_reg(ctx, a, false, false, false, true);
2541 }
2542 
2543 static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2544 {
2545     return do_add_reg(ctx, a, false, true, false, true);
2546 }
2547 
2548 static bool trans_sub(DisasContext *ctx, arg_rrr_cf_d *a)
2549 {
2550     return do_sub_reg(ctx, a, false, false, false);
2551 }
2552 
2553 static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf_d *a)
2554 {
2555     return do_sub_reg(ctx, a, true, false, false);
2556 }
2557 
2558 static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2559 {
2560     return do_sub_reg(ctx, a, false, false, true);
2561 }
2562 
2563 static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2564 {
2565     return do_sub_reg(ctx, a, true, false, true);
2566 }
2567 
2568 static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf_d *a)
2569 {
2570     return do_sub_reg(ctx, a, false, true, false);
2571 }
2572 
2573 static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf_d *a)
2574 {
2575     return do_sub_reg(ctx, a, true, true, false);
2576 }
2577 
2578 static bool trans_andcm(DisasContext *ctx, arg_rrr_cf_d *a)
2579 {
2580     return do_log_reg(ctx, a, tcg_gen_andc_i64);
2581 }
2582 
2583 static bool trans_and(DisasContext *ctx, arg_rrr_cf_d *a)
2584 {
2585     return do_log_reg(ctx, a, tcg_gen_and_i64);
2586 }
2587 
2588 static bool trans_or(DisasContext *ctx, arg_rrr_cf_d *a)
2589 {
2590     if (a->cf == 0) {
2591         unsigned r2 = a->r2;
2592         unsigned r1 = a->r1;
2593         unsigned rt = a->t;
2594 
2595         if (rt == 0) { /* NOP */
2596             cond_free(&ctx->null_cond);
2597             return true;
2598         }
2599         if (r2 == 0) { /* COPY */
2600             if (r1 == 0) {
2601                 TCGv_i64 dest = dest_gpr(ctx, rt);
2602                 tcg_gen_movi_i64(dest, 0);
2603                 save_gpr(ctx, rt, dest);
2604             } else {
2605                 save_gpr(ctx, rt, cpu_gr[r1]);
2606             }
2607             cond_free(&ctx->null_cond);
2608             return true;
2609         }
2610 #ifndef CONFIG_USER_ONLY
2611         /* These are QEMU extensions and are nops in the real architecture:
2612          *
2613          * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2614          * or %r31,%r31,%r31 -- death loop; offline cpu
2615          *                      currently implemented as idle.
2616          */
2617         if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */
2618             /* No need to check for supervisor, as userland can only pause
2619                until the next timer interrupt.  */
2620             nullify_over(ctx);
2621 
2622             /* Advance the instruction queue.  */
2623             copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
2624             copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
2625             nullify_set(ctx, 0);
2626 
2627             /* Tell the qemu main loop to halt until this cpu has work.  */
2628             tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
2629                            offsetof(CPUState, halted) - offsetof(HPPACPU, env));
2630             gen_excp_1(EXCP_HALTED);
2631             ctx->base.is_jmp = DISAS_NORETURN;
2632 
2633             return nullify_end(ctx);
2634         }
2635 #endif
2636     }
2637     return do_log_reg(ctx, a, tcg_gen_or_i64);
2638 }
2639 
2640 static bool trans_xor(DisasContext *ctx, arg_rrr_cf_d *a)
2641 {
2642     return do_log_reg(ctx, a, tcg_gen_xor_i64);
2643 }
2644 
2645 static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf_d *a)
2646 {
2647     TCGv_i64 tcg_r1, tcg_r2;
2648 
2649     if (a->cf) {
2650         nullify_over(ctx);
2651     }
2652     tcg_r1 = load_gpr(ctx, a->r1);
2653     tcg_r2 = load_gpr(ctx, a->r2);
2654     do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d);
2655     return nullify_end(ctx);
2656 }
2657 
2658 static bool trans_uxor(DisasContext *ctx, arg_rrr_cf_d *a)
2659 {
2660     TCGv_i64 tcg_r1, tcg_r2;
2661 
2662     if (a->cf) {
2663         nullify_over(ctx);
2664     }
2665     tcg_r1 = load_gpr(ctx, a->r1);
2666     tcg_r2 = load_gpr(ctx, a->r2);
2667     do_unit(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d, false, tcg_gen_xor_i64);
2668     return nullify_end(ctx);
2669 }
2670 
2671 static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a, bool is_tc)
2672 {
2673     TCGv_i64 tcg_r1, tcg_r2, tmp;
2674 
2675     if (a->cf) {
2676         nullify_over(ctx);
2677     }
2678     tcg_r1 = load_gpr(ctx, a->r1);
2679     tcg_r2 = load_gpr(ctx, a->r2);
2680     tmp = tcg_temp_new_i64();
2681     tcg_gen_not_i64(tmp, tcg_r2);
2682     do_unit(ctx, a->t, tcg_r1, tmp, a->cf, a->d, is_tc, tcg_gen_add_i64);
2683     return nullify_end(ctx);
2684 }
2685 
2686 static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a)
2687 {
2688     return do_uaddcm(ctx, a, false);
2689 }
2690 
2691 static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2692 {
2693     return do_uaddcm(ctx, a, true);
2694 }
2695 
2696 static bool do_dcor(DisasContext *ctx, arg_rr_cf_d *a, bool is_i)
2697 {
2698     TCGv_i64 tmp;
2699 
2700     nullify_over(ctx);
2701 
2702     tmp = tcg_temp_new_i64();
2703     tcg_gen_shri_i64(tmp, cpu_psw_cb, 3);
2704     if (!is_i) {
2705         tcg_gen_not_i64(tmp, tmp);
2706     }
2707     tcg_gen_andi_i64(tmp, tmp, (uint64_t)0x1111111111111111ull);
2708     tcg_gen_muli_i64(tmp, tmp, 6);
2709     do_unit(ctx, a->t, load_gpr(ctx, a->r), tmp, a->cf, a->d, false,
2710             is_i ? tcg_gen_add_i64 : tcg_gen_sub_i64);
2711     return nullify_end(ctx);
2712 }
2713 
2714 static bool trans_dcor(DisasContext *ctx, arg_rr_cf_d *a)
2715 {
2716     return do_dcor(ctx, a, false);
2717 }
2718 
2719 static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf_d *a)
2720 {
2721     return do_dcor(ctx, a, true);
2722 }
2723 
2724 static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a)
2725 {
2726     TCGv_i64 dest, add1, add2, addc, in1, in2;
2727     TCGv_i64 cout;
2728 
2729     nullify_over(ctx);
2730 
2731     in1 = load_gpr(ctx, a->r1);
2732     in2 = load_gpr(ctx, a->r2);
2733 
2734     add1 = tcg_temp_new_i64();
2735     add2 = tcg_temp_new_i64();
2736     addc = tcg_temp_new_i64();
2737     dest = tcg_temp_new_i64();
2738 
2739     /* Form R1 << 1 | PSW[CB]{8}.  */
2740     tcg_gen_add_i64(add1, in1, in1);
2741     tcg_gen_add_i64(add1, add1, get_psw_carry(ctx, false));
2742 
2743     /*
2744      * Add or subtract R2, depending on PSW[V].  Proper computation of
2745      * carry requires that we subtract via + ~R2 + 1, as described in
2746      * the manual.  By extracting and masking V, we can produce the
2747      * proper inputs to the addition without movcond.
2748      */
2749     tcg_gen_sextract_i64(addc, cpu_psw_v, 31, 1);
2750     tcg_gen_xor_i64(add2, in2, addc);
2751     tcg_gen_andi_i64(addc, addc, 1);
2752 
2753     tcg_gen_add2_i64(dest, cpu_psw_cb_msb, add1, ctx->zero, add2, ctx->zero);
2754     tcg_gen_add2_i64(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb,
2755                      addc, ctx->zero);
2756 
2757     /* Write back the result register.  */
2758     save_gpr(ctx, a->t, dest);
2759 
2760     /* Write back PSW[CB].  */
2761     tcg_gen_xor_i64(cpu_psw_cb, add1, add2);
2762     tcg_gen_xor_i64(cpu_psw_cb, cpu_psw_cb, dest);
2763 
2764     /* Write back PSW[V] for the division step.  */
2765     cout = get_psw_carry(ctx, false);
2766     tcg_gen_neg_i64(cpu_psw_v, cout);
2767     tcg_gen_xor_i64(cpu_psw_v, cpu_psw_v, in2);
2768 
2769     /* Install the new nullification.  */
2770     if (a->cf) {
2771         TCGv_i64 sv = NULL;
2772         if (cond_need_sv(a->cf >> 1)) {
2773             /* ??? The lshift is supposed to contribute to overflow.  */
2774             sv = do_add_sv(ctx, dest, add1, add2);
2775         }
2776         ctx->null_cond = do_cond(ctx, a->cf, false, dest, cout, sv);
2777     }
2778 
2779     return nullify_end(ctx);
2780 }
2781 
2782 static bool trans_addi(DisasContext *ctx, arg_rri_cf *a)
2783 {
2784     return do_add_imm(ctx, a, false, false);
2785 }
2786 
2787 static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a)
2788 {
2789     return do_add_imm(ctx, a, true, false);
2790 }
2791 
2792 static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a)
2793 {
2794     return do_add_imm(ctx, a, false, true);
2795 }
2796 
2797 static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a)
2798 {
2799     return do_add_imm(ctx, a, true, true);
2800 }
2801 
2802 static bool trans_subi(DisasContext *ctx, arg_rri_cf *a)
2803 {
2804     return do_sub_imm(ctx, a, false);
2805 }
2806 
2807 static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a)
2808 {
2809     return do_sub_imm(ctx, a, true);
2810 }
2811 
2812 static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf_d *a)
2813 {
2814     TCGv_i64 tcg_im, tcg_r2;
2815 
2816     if (a->cf) {
2817         nullify_over(ctx);
2818     }
2819 
2820     tcg_im = tcg_constant_i64(a->i);
2821     tcg_r2 = load_gpr(ctx, a->r);
2822     do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf, a->d);
2823 
2824     return nullify_end(ctx);
2825 }
2826 
2827 static bool do_multimedia(DisasContext *ctx, arg_rrr *a,
2828                           void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
2829 {
2830     TCGv_i64 r1, r2, dest;
2831 
2832     if (!ctx->is_pa20) {
2833         return false;
2834     }
2835 
2836     nullify_over(ctx);
2837 
2838     r1 = load_gpr(ctx, a->r1);
2839     r2 = load_gpr(ctx, a->r2);
2840     dest = dest_gpr(ctx, a->t);
2841 
2842     fn(dest, r1, r2);
2843     save_gpr(ctx, a->t, dest);
2844 
2845     return nullify_end(ctx);
2846 }
2847 
2848 static bool do_multimedia_sh(DisasContext *ctx, arg_rri *a,
2849                              void (*fn)(TCGv_i64, TCGv_i64, int64_t))
2850 {
2851     TCGv_i64 r, dest;
2852 
2853     if (!ctx->is_pa20) {
2854         return false;
2855     }
2856 
2857     nullify_over(ctx);
2858 
2859     r = load_gpr(ctx, a->r);
2860     dest = dest_gpr(ctx, a->t);
2861 
2862     fn(dest, r, a->i);
2863     save_gpr(ctx, a->t, dest);
2864 
2865     return nullify_end(ctx);
2866 }
2867 
2868 static bool do_multimedia_shadd(DisasContext *ctx, arg_rrr_sh *a,
2869                                 void (*fn)(TCGv_i64, TCGv_i64,
2870                                            TCGv_i64, TCGv_i32))
2871 {
2872     TCGv_i64 r1, r2, dest;
2873 
2874     if (!ctx->is_pa20) {
2875         return false;
2876     }
2877 
2878     nullify_over(ctx);
2879 
2880     r1 = load_gpr(ctx, a->r1);
2881     r2 = load_gpr(ctx, a->r2);
2882     dest = dest_gpr(ctx, a->t);
2883 
2884     fn(dest, r1, r2, tcg_constant_i32(a->sh));
2885     save_gpr(ctx, a->t, dest);
2886 
2887     return nullify_end(ctx);
2888 }
2889 
2890 static bool trans_hadd(DisasContext *ctx, arg_rrr *a)
2891 {
2892     return do_multimedia(ctx, a, tcg_gen_vec_add16_i64);
2893 }
2894 
2895 static bool trans_hadd_ss(DisasContext *ctx, arg_rrr *a)
2896 {
2897     return do_multimedia(ctx, a, gen_helper_hadd_ss);
2898 }
2899 
2900 static bool trans_hadd_us(DisasContext *ctx, arg_rrr *a)
2901 {
2902     return do_multimedia(ctx, a, gen_helper_hadd_us);
2903 }
2904 
2905 static bool trans_havg(DisasContext *ctx, arg_rrr *a)
2906 {
2907     return do_multimedia(ctx, a, gen_helper_havg);
2908 }
2909 
2910 static bool trans_hshl(DisasContext *ctx, arg_rri *a)
2911 {
2912     return do_multimedia_sh(ctx, a, tcg_gen_vec_shl16i_i64);
2913 }
2914 
2915 static bool trans_hshr_s(DisasContext *ctx, arg_rri *a)
2916 {
2917     return do_multimedia_sh(ctx, a, tcg_gen_vec_sar16i_i64);
2918 }
2919 
2920 static bool trans_hshr_u(DisasContext *ctx, arg_rri *a)
2921 {
2922     return do_multimedia_sh(ctx, a, tcg_gen_vec_shr16i_i64);
2923 }
2924 
2925 static bool trans_hshladd(DisasContext *ctx, arg_rrr_sh *a)
2926 {
2927     return do_multimedia_shadd(ctx, a, gen_helper_hshladd);
2928 }
2929 
2930 static bool trans_hshradd(DisasContext *ctx, arg_rrr_sh *a)
2931 {
2932     return do_multimedia_shadd(ctx, a, gen_helper_hshradd);
2933 }
2934 
2935 static bool trans_hsub(DisasContext *ctx, arg_rrr *a)
2936 {
2937     return do_multimedia(ctx, a, tcg_gen_vec_sub16_i64);
2938 }
2939 
2940 static bool trans_hsub_ss(DisasContext *ctx, arg_rrr *a)
2941 {
2942     return do_multimedia(ctx, a, gen_helper_hsub_ss);
2943 }
2944 
2945 static bool trans_hsub_us(DisasContext *ctx, arg_rrr *a)
2946 {
2947     return do_multimedia(ctx, a, gen_helper_hsub_us);
2948 }
2949 
2950 static void gen_mixh_l(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
2951 {
2952     uint64_t mask = 0xffff0000ffff0000ull;
2953     TCGv_i64 tmp = tcg_temp_new_i64();
2954 
2955     tcg_gen_andi_i64(tmp, r2, mask);
2956     tcg_gen_andi_i64(dst, r1, mask);
2957     tcg_gen_shri_i64(tmp, tmp, 16);
2958     tcg_gen_or_i64(dst, dst, tmp);
2959 }
2960 
2961 static bool trans_mixh_l(DisasContext *ctx, arg_rrr *a)
2962 {
2963     return do_multimedia(ctx, a, gen_mixh_l);
2964 }
2965 
2966 static void gen_mixh_r(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
2967 {
2968     uint64_t mask = 0x0000ffff0000ffffull;
2969     TCGv_i64 tmp = tcg_temp_new_i64();
2970 
2971     tcg_gen_andi_i64(tmp, r1, mask);
2972     tcg_gen_andi_i64(dst, r2, mask);
2973     tcg_gen_shli_i64(tmp, tmp, 16);
2974     tcg_gen_or_i64(dst, dst, tmp);
2975 }
2976 
2977 static bool trans_mixh_r(DisasContext *ctx, arg_rrr *a)
2978 {
2979     return do_multimedia(ctx, a, gen_mixh_r);
2980 }
2981 
2982 static void gen_mixw_l(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
2983 {
2984     TCGv_i64 tmp = tcg_temp_new_i64();
2985 
2986     tcg_gen_shri_i64(tmp, r2, 32);
2987     tcg_gen_deposit_i64(dst, r1, tmp, 0, 32);
2988 }
2989 
2990 static bool trans_mixw_l(DisasContext *ctx, arg_rrr *a)
2991 {
2992     return do_multimedia(ctx, a, gen_mixw_l);
2993 }
2994 
2995 static void gen_mixw_r(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
2996 {
2997     tcg_gen_deposit_i64(dst, r2, r1, 32, 32);
2998 }
2999 
3000 static bool trans_mixw_r(DisasContext *ctx, arg_rrr *a)
3001 {
3002     return do_multimedia(ctx, a, gen_mixw_r);
3003 }
3004 
3005 static bool trans_permh(DisasContext *ctx, arg_permh *a)
3006 {
3007     TCGv_i64 r, t0, t1, t2, t3;
3008 
3009     if (!ctx->is_pa20) {
3010         return false;
3011     }
3012 
3013     nullify_over(ctx);
3014 
3015     r = load_gpr(ctx, a->r1);
3016     t0 = tcg_temp_new_i64();
3017     t1 = tcg_temp_new_i64();
3018     t2 = tcg_temp_new_i64();
3019     t3 = tcg_temp_new_i64();
3020 
3021     tcg_gen_extract_i64(t0, r, (3 - a->c0) * 16, 16);
3022     tcg_gen_extract_i64(t1, r, (3 - a->c1) * 16, 16);
3023     tcg_gen_extract_i64(t2, r, (3 - a->c2) * 16, 16);
3024     tcg_gen_extract_i64(t3, r, (3 - a->c3) * 16, 16);
3025 
3026     tcg_gen_deposit_i64(t0, t1, t0, 16, 48);
3027     tcg_gen_deposit_i64(t2, t3, t2, 16, 48);
3028     tcg_gen_deposit_i64(t0, t2, t0, 32, 32);
3029 
3030     save_gpr(ctx, a->t, t0);
3031     return nullify_end(ctx);
3032 }
3033 
3034 static bool trans_ld(DisasContext *ctx, arg_ldst *a)
3035 {
3036     if (ctx->is_pa20) {
3037        /*
3038         * With pa20, LDB, LDH, LDW, LDD to %g0 are prefetches.
3039         * Any base modification still occurs.
3040         */
3041         if (a->t == 0) {
3042             return trans_nop_addrx(ctx, a);
3043         }
3044     } else if (a->size > MO_32) {
3045         return gen_illegal(ctx);
3046     }
3047     return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0,
3048                    a->disp, a->sp, a->m, a->size | MO_TE);
3049 }
3050 
3051 static bool trans_st(DisasContext *ctx, arg_ldst *a)
3052 {
3053     assert(a->x == 0 && a->scale == 0);
3054     if (!ctx->is_pa20 && a->size > MO_32) {
3055         return gen_illegal(ctx);
3056     }
3057     return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE);
3058 }
3059 
3060 static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
3061 {
3062     MemOp mop = MO_TE | MO_ALIGN | a->size;
3063     TCGv_i64 dest, ofs;
3064     TCGv_i64 addr;
3065 
3066     if (!ctx->is_pa20 && a->size > MO_32) {
3067         return gen_illegal(ctx);
3068     }
3069 
3070     nullify_over(ctx);
3071 
3072     if (a->m) {
3073         /* Base register modification.  Make sure if RT == RB,
3074            we see the result of the load.  */
3075         dest = tcg_temp_new_i64();
3076     } else {
3077         dest = dest_gpr(ctx, a->t);
3078     }
3079 
3080     form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? a->size : 0,
3081              a->disp, a->sp, a->m, ctx->mmu_idx == MMU_PHYS_IDX);
3082 
3083     /*
3084      * For hppa1.1, LDCW is undefined unless aligned mod 16.
3085      * However actual hardware succeeds with aligned mod 4.
3086      * Detect this case and log a GUEST_ERROR.
3087      *
3088      * TODO: HPPA64 relaxes the over-alignment requirement
3089      * with the ,co completer.
3090      */
3091     gen_helper_ldc_check(addr);
3092 
3093     tcg_gen_atomic_xchg_i64(dest, addr, ctx->zero, ctx->mmu_idx, mop);
3094 
3095     if (a->m) {
3096         save_gpr(ctx, a->b, ofs);
3097     }
3098     save_gpr(ctx, a->t, dest);
3099 
3100     return nullify_end(ctx);
3101 }
3102 
3103 static bool trans_stby(DisasContext *ctx, arg_stby *a)
3104 {
3105     TCGv_i64 ofs, val;
3106     TCGv_i64 addr;
3107 
3108     nullify_over(ctx);
3109 
3110     form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
3111              ctx->mmu_idx == MMU_PHYS_IDX);
3112     val = load_gpr(ctx, a->r);
3113     if (a->a) {
3114         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3115             gen_helper_stby_e_parallel(tcg_env, addr, val);
3116         } else {
3117             gen_helper_stby_e(tcg_env, addr, val);
3118         }
3119     } else {
3120         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3121             gen_helper_stby_b_parallel(tcg_env, addr, val);
3122         } else {
3123             gen_helper_stby_b(tcg_env, addr, val);
3124         }
3125     }
3126     if (a->m) {
3127         tcg_gen_andi_i64(ofs, ofs, ~3);
3128         save_gpr(ctx, a->b, ofs);
3129     }
3130 
3131     return nullify_end(ctx);
3132 }
3133 
3134 static bool trans_stdby(DisasContext *ctx, arg_stby *a)
3135 {
3136     TCGv_i64 ofs, val;
3137     TCGv_i64 addr;
3138 
3139     if (!ctx->is_pa20) {
3140         return false;
3141     }
3142     nullify_over(ctx);
3143 
3144     form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
3145              ctx->mmu_idx == MMU_PHYS_IDX);
3146     val = load_gpr(ctx, a->r);
3147     if (a->a) {
3148         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3149             gen_helper_stdby_e_parallel(tcg_env, addr, val);
3150         } else {
3151             gen_helper_stdby_e(tcg_env, addr, val);
3152         }
3153     } else {
3154         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3155             gen_helper_stdby_b_parallel(tcg_env, addr, val);
3156         } else {
3157             gen_helper_stdby_b(tcg_env, addr, val);
3158         }
3159     }
3160     if (a->m) {
3161         tcg_gen_andi_i64(ofs, ofs, ~7);
3162         save_gpr(ctx, a->b, ofs);
3163     }
3164 
3165     return nullify_end(ctx);
3166 }
3167 
3168 static bool trans_lda(DisasContext *ctx, arg_ldst *a)
3169 {
3170     int hold_mmu_idx = ctx->mmu_idx;
3171 
3172     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3173     ctx->mmu_idx = MMU_PHYS_IDX;
3174     trans_ld(ctx, a);
3175     ctx->mmu_idx = hold_mmu_idx;
3176     return true;
3177 }
3178 
3179 static bool trans_sta(DisasContext *ctx, arg_ldst *a)
3180 {
3181     int hold_mmu_idx = ctx->mmu_idx;
3182 
3183     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3184     ctx->mmu_idx = MMU_PHYS_IDX;
3185     trans_st(ctx, a);
3186     ctx->mmu_idx = hold_mmu_idx;
3187     return true;
3188 }
3189 
3190 static bool trans_ldil(DisasContext *ctx, arg_ldil *a)
3191 {
3192     TCGv_i64 tcg_rt = dest_gpr(ctx, a->t);
3193 
3194     tcg_gen_movi_i64(tcg_rt, a->i);
3195     save_gpr(ctx, a->t, tcg_rt);
3196     cond_free(&ctx->null_cond);
3197     return true;
3198 }
3199 
3200 static bool trans_addil(DisasContext *ctx, arg_addil *a)
3201 {
3202     TCGv_i64 tcg_rt = load_gpr(ctx, a->r);
3203     TCGv_i64 tcg_r1 = dest_gpr(ctx, 1);
3204 
3205     tcg_gen_addi_i64(tcg_r1, tcg_rt, a->i);
3206     save_gpr(ctx, 1, tcg_r1);
3207     cond_free(&ctx->null_cond);
3208     return true;
3209 }
3210 
3211 static bool trans_ldo(DisasContext *ctx, arg_ldo *a)
3212 {
3213     TCGv_i64 tcg_rt = dest_gpr(ctx, a->t);
3214 
3215     /* Special case rb == 0, for the LDI pseudo-op.
3216        The COPY pseudo-op is handled for free within tcg_gen_addi_i64.  */
3217     if (a->b == 0) {
3218         tcg_gen_movi_i64(tcg_rt, a->i);
3219     } else {
3220         tcg_gen_addi_i64(tcg_rt, cpu_gr[a->b], a->i);
3221     }
3222     save_gpr(ctx, a->t, tcg_rt);
3223     cond_free(&ctx->null_cond);
3224     return true;
3225 }
3226 
3227 static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_i64 in1,
3228                     unsigned c, unsigned f, bool d, unsigned n, int disp)
3229 {
3230     TCGv_i64 dest, in2, sv;
3231     DisasCond cond;
3232 
3233     in2 = load_gpr(ctx, r);
3234     dest = tcg_temp_new_i64();
3235 
3236     tcg_gen_sub_i64(dest, in1, in2);
3237 
3238     sv = NULL;
3239     if (cond_need_sv(c)) {
3240         sv = do_sub_sv(ctx, dest, in1, in2);
3241     }
3242 
3243     cond = do_sub_cond(ctx, c * 2 + f, d, dest, in1, in2, sv);
3244     return do_cbranch(ctx, disp, n, &cond);
3245 }
3246 
3247 static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a)
3248 {
3249     if (!ctx->is_pa20 && a->d) {
3250         return false;
3251     }
3252     nullify_over(ctx);
3253     return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1),
3254                    a->c, a->f, a->d, a->n, a->disp);
3255 }
3256 
3257 static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a)
3258 {
3259     if (!ctx->is_pa20 && a->d) {
3260         return false;
3261     }
3262     nullify_over(ctx);
3263     return do_cmpb(ctx, a->r, tcg_constant_i64(a->i),
3264                    a->c, a->f, a->d, a->n, a->disp);
3265 }
3266 
3267 static bool do_addb(DisasContext *ctx, unsigned r, TCGv_i64 in1,
3268                     unsigned c, unsigned f, unsigned n, int disp)
3269 {
3270     TCGv_i64 dest, in2, sv, cb_cond;
3271     DisasCond cond;
3272     bool d = false;
3273 
3274     /*
3275      * For hppa64, the ADDB conditions change with PSW.W,
3276      * dropping ZNV, SV, OD in favor of double-word EQ, LT, LE.
3277      */
3278     if (ctx->tb_flags & PSW_W) {
3279         d = c >= 5;
3280         if (d) {
3281             c &= 3;
3282         }
3283     }
3284 
3285     in2 = load_gpr(ctx, r);
3286     dest = tcg_temp_new_i64();
3287     sv = NULL;
3288     cb_cond = NULL;
3289 
3290     if (cond_need_cb(c)) {
3291         TCGv_i64 cb = tcg_temp_new_i64();
3292         TCGv_i64 cb_msb = tcg_temp_new_i64();
3293 
3294         tcg_gen_movi_i64(cb_msb, 0);
3295         tcg_gen_add2_i64(dest, cb_msb, in1, cb_msb, in2, cb_msb);
3296         tcg_gen_xor_i64(cb, in1, in2);
3297         tcg_gen_xor_i64(cb, cb, dest);
3298         cb_cond = get_carry(ctx, d, cb, cb_msb);
3299     } else {
3300         tcg_gen_add_i64(dest, in1, in2);
3301     }
3302     if (cond_need_sv(c)) {
3303         sv = do_add_sv(ctx, dest, in1, in2);
3304     }
3305 
3306     cond = do_cond(ctx, c * 2 + f, d, dest, cb_cond, sv);
3307     save_gpr(ctx, r, dest);
3308     return do_cbranch(ctx, disp, n, &cond);
3309 }
3310 
3311 static bool trans_addb(DisasContext *ctx, arg_addb *a)
3312 {
3313     nullify_over(ctx);
3314     return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3315 }
3316 
3317 static bool trans_addbi(DisasContext *ctx, arg_addbi *a)
3318 {
3319     nullify_over(ctx);
3320     return do_addb(ctx, a->r, tcg_constant_i64(a->i), a->c, a->f, a->n, a->disp);
3321 }
3322 
3323 static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a)
3324 {
3325     TCGv_i64 tmp, tcg_r;
3326     DisasCond cond;
3327 
3328     nullify_over(ctx);
3329 
3330     tmp = tcg_temp_new_i64();
3331     tcg_r = load_gpr(ctx, a->r);
3332     if (cond_need_ext(ctx, a->d)) {
3333         /* Force shift into [32,63] */
3334         tcg_gen_ori_i64(tmp, cpu_sar, 32);
3335         tcg_gen_shl_i64(tmp, tcg_r, tmp);
3336     } else {
3337         tcg_gen_shl_i64(tmp, tcg_r, cpu_sar);
3338     }
3339 
3340     cond = cond_make_0_tmp(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3341     return do_cbranch(ctx, a->disp, a->n, &cond);
3342 }
3343 
3344 static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a)
3345 {
3346     TCGv_i64 tmp, tcg_r;
3347     DisasCond cond;
3348     int p;
3349 
3350     nullify_over(ctx);
3351 
3352     tmp = tcg_temp_new_i64();
3353     tcg_r = load_gpr(ctx, a->r);
3354     p = a->p | (cond_need_ext(ctx, a->d) ? 32 : 0);
3355     tcg_gen_shli_i64(tmp, tcg_r, p);
3356 
3357     cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3358     return do_cbranch(ctx, a->disp, a->n, &cond);
3359 }
3360 
3361 static bool trans_movb(DisasContext *ctx, arg_movb *a)
3362 {
3363     TCGv_i64 dest;
3364     DisasCond cond;
3365 
3366     nullify_over(ctx);
3367 
3368     dest = dest_gpr(ctx, a->r2);
3369     if (a->r1 == 0) {
3370         tcg_gen_movi_i64(dest, 0);
3371     } else {
3372         tcg_gen_mov_i64(dest, cpu_gr[a->r1]);
3373     }
3374 
3375     /* All MOVB conditions are 32-bit. */
3376     cond = do_sed_cond(ctx, a->c, false, dest);
3377     return do_cbranch(ctx, a->disp, a->n, &cond);
3378 }
3379 
3380 static bool trans_movbi(DisasContext *ctx, arg_movbi *a)
3381 {
3382     TCGv_i64 dest;
3383     DisasCond cond;
3384 
3385     nullify_over(ctx);
3386 
3387     dest = dest_gpr(ctx, a->r);
3388     tcg_gen_movi_i64(dest, a->i);
3389 
3390     /* All MOVBI conditions are 32-bit. */
3391     cond = do_sed_cond(ctx, a->c, false, dest);
3392     return do_cbranch(ctx, a->disp, a->n, &cond);
3393 }
3394 
3395 static bool trans_shrp_sar(DisasContext *ctx, arg_shrp_sar *a)
3396 {
3397     TCGv_i64 dest, src2;
3398 
3399     if (!ctx->is_pa20 && a->d) {
3400         return false;
3401     }
3402     if (a->c) {
3403         nullify_over(ctx);
3404     }
3405 
3406     dest = dest_gpr(ctx, a->t);
3407     src2 = load_gpr(ctx, a->r2);
3408     if (a->r1 == 0) {
3409         if (a->d) {
3410             tcg_gen_shr_i64(dest, src2, cpu_sar);
3411         } else {
3412             TCGv_i64 tmp = tcg_temp_new_i64();
3413 
3414             tcg_gen_ext32u_i64(dest, src2);
3415             tcg_gen_andi_i64(tmp, cpu_sar, 31);
3416             tcg_gen_shr_i64(dest, dest, tmp);
3417         }
3418     } else if (a->r1 == a->r2) {
3419         if (a->d) {
3420             tcg_gen_rotr_i64(dest, src2, cpu_sar);
3421         } else {
3422             TCGv_i32 t32 = tcg_temp_new_i32();
3423             TCGv_i32 s32 = tcg_temp_new_i32();
3424 
3425             tcg_gen_extrl_i64_i32(t32, src2);
3426             tcg_gen_extrl_i64_i32(s32, cpu_sar);
3427             tcg_gen_andi_i32(s32, s32, 31);
3428             tcg_gen_rotr_i32(t32, t32, s32);
3429             tcg_gen_extu_i32_i64(dest, t32);
3430         }
3431     } else {
3432         TCGv_i64 src1 = load_gpr(ctx, a->r1);
3433 
3434         if (a->d) {
3435             TCGv_i64 t = tcg_temp_new_i64();
3436             TCGv_i64 n = tcg_temp_new_i64();
3437 
3438             tcg_gen_xori_i64(n, cpu_sar, 63);
3439             tcg_gen_shl_i64(t, src2, n);
3440             tcg_gen_shli_i64(t, t, 1);
3441             tcg_gen_shr_i64(dest, src1, cpu_sar);
3442             tcg_gen_or_i64(dest, dest, t);
3443         } else {
3444             TCGv_i64 t = tcg_temp_new_i64();
3445             TCGv_i64 s = tcg_temp_new_i64();
3446 
3447             tcg_gen_concat32_i64(t, src2, src1);
3448             tcg_gen_andi_i64(s, cpu_sar, 31);
3449             tcg_gen_shr_i64(dest, t, s);
3450         }
3451     }
3452     save_gpr(ctx, a->t, dest);
3453 
3454     /* Install the new nullification.  */
3455     cond_free(&ctx->null_cond);
3456     if (a->c) {
3457         ctx->null_cond = do_sed_cond(ctx, a->c, false, dest);
3458     }
3459     return nullify_end(ctx);
3460 }
3461 
3462 static bool trans_shrp_imm(DisasContext *ctx, arg_shrp_imm *a)
3463 {
3464     unsigned width, sa;
3465     TCGv_i64 dest, t2;
3466 
3467     if (!ctx->is_pa20 && a->d) {
3468         return false;
3469     }
3470     if (a->c) {
3471         nullify_over(ctx);
3472     }
3473 
3474     width = a->d ? 64 : 32;
3475     sa = width - 1 - a->cpos;
3476 
3477     dest = dest_gpr(ctx, a->t);
3478     t2 = load_gpr(ctx, a->r2);
3479     if (a->r1 == 0) {
3480         tcg_gen_extract_i64(dest, t2, sa, width - sa);
3481     } else if (width == TARGET_LONG_BITS) {
3482         tcg_gen_extract2_i64(dest, t2, cpu_gr[a->r1], sa);
3483     } else {
3484         assert(!a->d);
3485         if (a->r1 == a->r2) {
3486             TCGv_i32 t32 = tcg_temp_new_i32();
3487             tcg_gen_extrl_i64_i32(t32, t2);
3488             tcg_gen_rotri_i32(t32, t32, sa);
3489             tcg_gen_extu_i32_i64(dest, t32);
3490         } else {
3491             tcg_gen_concat32_i64(dest, t2, cpu_gr[a->r1]);
3492             tcg_gen_extract_i64(dest, dest, sa, 32);
3493         }
3494     }
3495     save_gpr(ctx, a->t, dest);
3496 
3497     /* Install the new nullification.  */
3498     cond_free(&ctx->null_cond);
3499     if (a->c) {
3500         ctx->null_cond = do_sed_cond(ctx, a->c, false, dest);
3501     }
3502     return nullify_end(ctx);
3503 }
3504 
3505 static bool trans_extr_sar(DisasContext *ctx, arg_extr_sar *a)
3506 {
3507     unsigned widthm1 = a->d ? 63 : 31;
3508     TCGv_i64 dest, src, tmp;
3509 
3510     if (!ctx->is_pa20 && a->d) {
3511         return false;
3512     }
3513     if (a->c) {
3514         nullify_over(ctx);
3515     }
3516 
3517     dest = dest_gpr(ctx, a->t);
3518     src = load_gpr(ctx, a->r);
3519     tmp = tcg_temp_new_i64();
3520 
3521     /* Recall that SAR is using big-endian bit numbering.  */
3522     tcg_gen_andi_i64(tmp, cpu_sar, widthm1);
3523     tcg_gen_xori_i64(tmp, tmp, widthm1);
3524 
3525     if (a->se) {
3526         if (!a->d) {
3527             tcg_gen_ext32s_i64(dest, src);
3528             src = dest;
3529         }
3530         tcg_gen_sar_i64(dest, src, tmp);
3531         tcg_gen_sextract_i64(dest, dest, 0, a->len);
3532     } else {
3533         if (!a->d) {
3534             tcg_gen_ext32u_i64(dest, src);
3535             src = dest;
3536         }
3537         tcg_gen_shr_i64(dest, src, tmp);
3538         tcg_gen_extract_i64(dest, dest, 0, a->len);
3539     }
3540     save_gpr(ctx, a->t, dest);
3541 
3542     /* Install the new nullification.  */
3543     cond_free(&ctx->null_cond);
3544     if (a->c) {
3545         ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3546     }
3547     return nullify_end(ctx);
3548 }
3549 
3550 static bool trans_extr_imm(DisasContext *ctx, arg_extr_imm *a)
3551 {
3552     unsigned len, cpos, width;
3553     TCGv_i64 dest, src;
3554 
3555     if (!ctx->is_pa20 && a->d) {
3556         return false;
3557     }
3558     if (a->c) {
3559         nullify_over(ctx);
3560     }
3561 
3562     len = a->len;
3563     width = a->d ? 64 : 32;
3564     cpos = width - 1 - a->pos;
3565     if (cpos + len > width) {
3566         len = width - cpos;
3567     }
3568 
3569     dest = dest_gpr(ctx, a->t);
3570     src = load_gpr(ctx, a->r);
3571     if (a->se) {
3572         tcg_gen_sextract_i64(dest, src, cpos, len);
3573     } else {
3574         tcg_gen_extract_i64(dest, src, cpos, len);
3575     }
3576     save_gpr(ctx, a->t, dest);
3577 
3578     /* Install the new nullification.  */
3579     cond_free(&ctx->null_cond);
3580     if (a->c) {
3581         ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3582     }
3583     return nullify_end(ctx);
3584 }
3585 
3586 static bool trans_depi_imm(DisasContext *ctx, arg_depi_imm *a)
3587 {
3588     unsigned len, width;
3589     uint64_t mask0, mask1;
3590     TCGv_i64 dest;
3591 
3592     if (!ctx->is_pa20 && a->d) {
3593         return false;
3594     }
3595     if (a->c) {
3596         nullify_over(ctx);
3597     }
3598 
3599     len = a->len;
3600     width = a->d ? 64 : 32;
3601     if (a->cpos + len > width) {
3602         len = width - a->cpos;
3603     }
3604 
3605     dest = dest_gpr(ctx, a->t);
3606     mask0 = deposit64(0, a->cpos, len, a->i);
3607     mask1 = deposit64(-1, a->cpos, len, a->i);
3608 
3609     if (a->nz) {
3610         TCGv_i64 src = load_gpr(ctx, a->t);
3611         tcg_gen_andi_i64(dest, src, mask1);
3612         tcg_gen_ori_i64(dest, dest, mask0);
3613     } else {
3614         tcg_gen_movi_i64(dest, mask0);
3615     }
3616     save_gpr(ctx, a->t, dest);
3617 
3618     /* Install the new nullification.  */
3619     cond_free(&ctx->null_cond);
3620     if (a->c) {
3621         ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3622     }
3623     return nullify_end(ctx);
3624 }
3625 
3626 static bool trans_dep_imm(DisasContext *ctx, arg_dep_imm *a)
3627 {
3628     unsigned rs = a->nz ? a->t : 0;
3629     unsigned len, width;
3630     TCGv_i64 dest, val;
3631 
3632     if (!ctx->is_pa20 && a->d) {
3633         return false;
3634     }
3635     if (a->c) {
3636         nullify_over(ctx);
3637     }
3638 
3639     len = a->len;
3640     width = a->d ? 64 : 32;
3641     if (a->cpos + len > width) {
3642         len = width - a->cpos;
3643     }
3644 
3645     dest = dest_gpr(ctx, a->t);
3646     val = load_gpr(ctx, a->r);
3647     if (rs == 0) {
3648         tcg_gen_deposit_z_i64(dest, val, a->cpos, len);
3649     } else {
3650         tcg_gen_deposit_i64(dest, cpu_gr[rs], val, a->cpos, len);
3651     }
3652     save_gpr(ctx, a->t, dest);
3653 
3654     /* Install the new nullification.  */
3655     cond_free(&ctx->null_cond);
3656     if (a->c) {
3657         ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3658     }
3659     return nullify_end(ctx);
3660 }
3661 
3662 static bool do_dep_sar(DisasContext *ctx, unsigned rt, unsigned c,
3663                        bool d, bool nz, unsigned len, TCGv_i64 val)
3664 {
3665     unsigned rs = nz ? rt : 0;
3666     unsigned widthm1 = d ? 63 : 31;
3667     TCGv_i64 mask, tmp, shift, dest;
3668     uint64_t msb = 1ULL << (len - 1);
3669 
3670     dest = dest_gpr(ctx, rt);
3671     shift = tcg_temp_new_i64();
3672     tmp = tcg_temp_new_i64();
3673 
3674     /* Convert big-endian bit numbering in SAR to left-shift.  */
3675     tcg_gen_andi_i64(shift, cpu_sar, widthm1);
3676     tcg_gen_xori_i64(shift, shift, widthm1);
3677 
3678     mask = tcg_temp_new_i64();
3679     tcg_gen_movi_i64(mask, msb + (msb - 1));
3680     tcg_gen_and_i64(tmp, val, mask);
3681     if (rs) {
3682         tcg_gen_shl_i64(mask, mask, shift);
3683         tcg_gen_shl_i64(tmp, tmp, shift);
3684         tcg_gen_andc_i64(dest, cpu_gr[rs], mask);
3685         tcg_gen_or_i64(dest, dest, tmp);
3686     } else {
3687         tcg_gen_shl_i64(dest, tmp, shift);
3688     }
3689     save_gpr(ctx, rt, dest);
3690 
3691     /* Install the new nullification.  */
3692     cond_free(&ctx->null_cond);
3693     if (c) {
3694         ctx->null_cond = do_sed_cond(ctx, c, d, dest);
3695     }
3696     return nullify_end(ctx);
3697 }
3698 
3699 static bool trans_dep_sar(DisasContext *ctx, arg_dep_sar *a)
3700 {
3701     if (!ctx->is_pa20 && a->d) {
3702         return false;
3703     }
3704     if (a->c) {
3705         nullify_over(ctx);
3706     }
3707     return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len,
3708                       load_gpr(ctx, a->r));
3709 }
3710 
3711 static bool trans_depi_sar(DisasContext *ctx, arg_depi_sar *a)
3712 {
3713     if (!ctx->is_pa20 && a->d) {
3714         return false;
3715     }
3716     if (a->c) {
3717         nullify_over(ctx);
3718     }
3719     return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len,
3720                       tcg_constant_i64(a->i));
3721 }
3722 
3723 static bool trans_be(DisasContext *ctx, arg_be *a)
3724 {
3725     TCGv_i64 tmp;
3726 
3727 #ifdef CONFIG_USER_ONLY
3728     /* ??? It seems like there should be a good way of using
3729        "be disp(sr2, r0)", the canonical gateway entry mechanism
3730        to our advantage.  But that appears to be inconvenient to
3731        manage along side branch delay slots.  Therefore we handle
3732        entry into the gateway page via absolute address.  */
3733     /* Since we don't implement spaces, just branch.  Do notice the special
3734        case of "be disp(*,r0)" using a direct branch to disp, so that we can
3735        goto_tb to the TB containing the syscall.  */
3736     if (a->b == 0) {
3737         return do_dbranch(ctx, a->disp, a->l, a->n);
3738     }
3739 #else
3740     nullify_over(ctx);
3741 #endif
3742 
3743     tmp = tcg_temp_new_i64();
3744     tcg_gen_addi_i64(tmp, load_gpr(ctx, a->b), a->disp);
3745     tmp = do_ibranch_priv(ctx, tmp);
3746 
3747 #ifdef CONFIG_USER_ONLY
3748     return do_ibranch(ctx, tmp, a->l, a->n);
3749 #else
3750     TCGv_i64 new_spc = tcg_temp_new_i64();
3751 
3752     load_spr(ctx, new_spc, a->sp);
3753     if (a->l) {
3754         copy_iaoq_entry(ctx, cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
3755         tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f);
3756     }
3757     if (a->n && use_nullify_skip(ctx)) {
3758         copy_iaoq_entry(ctx, cpu_iaoq_f, -1, tmp);
3759         tcg_gen_addi_i64(tmp, tmp, 4);
3760         copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
3761         tcg_gen_mov_i64(cpu_iasq_f, new_spc);
3762         tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f);
3763     } else {
3764         copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3765         if (ctx->iaoq_b == -1) {
3766             tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3767         }
3768         copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
3769         tcg_gen_mov_i64(cpu_iasq_b, new_spc);
3770         nullify_set(ctx, a->n);
3771     }
3772     tcg_gen_lookup_and_goto_ptr();
3773     ctx->base.is_jmp = DISAS_NORETURN;
3774     return nullify_end(ctx);
3775 #endif
3776 }
3777 
3778 static bool trans_bl(DisasContext *ctx, arg_bl *a)
3779 {
3780     return do_dbranch(ctx, iaoq_dest(ctx, a->disp), a->l, a->n);
3781 }
3782 
3783 static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a)
3784 {
3785     uint64_t dest = iaoq_dest(ctx, a->disp);
3786 
3787     nullify_over(ctx);
3788 
3789     /* Make sure the caller hasn't done something weird with the queue.
3790      * ??? This is not quite the same as the PSW[B] bit, which would be
3791      * expensive to track.  Real hardware will trap for
3792      *    b  gateway
3793      *    b  gateway+4  (in delay slot of first branch)
3794      * However, checking for a non-sequential instruction queue *will*
3795      * diagnose the security hole
3796      *    b  gateway
3797      *    b  evil
3798      * in which instructions at evil would run with increased privs.
3799      */
3800     if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) {
3801         return gen_illegal(ctx);
3802     }
3803 
3804 #ifndef CONFIG_USER_ONLY
3805     if (ctx->tb_flags & PSW_C) {
3806         CPUHPPAState *env = cpu_env(ctx->cs);
3807         int type = hppa_artype_for_page(env, ctx->base.pc_next);
3808         /* If we could not find a TLB entry, then we need to generate an
3809            ITLB miss exception so the kernel will provide it.
3810            The resulting TLB fill operation will invalidate this TB and
3811            we will re-translate, at which point we *will* be able to find
3812            the TLB entry and determine if this is in fact a gateway page.  */
3813         if (type < 0) {
3814             gen_excp(ctx, EXCP_ITLB_MISS);
3815             return true;
3816         }
3817         /* No change for non-gateway pages or for priv decrease.  */
3818         if (type >= 4 && type - 4 < ctx->privilege) {
3819             dest = deposit32(dest, 0, 2, type - 4);
3820         }
3821     } else {
3822         dest &= -4;  /* priv = 0 */
3823     }
3824 #endif
3825 
3826     if (a->l) {
3827         TCGv_i64 tmp = dest_gpr(ctx, a->l);
3828         if (ctx->privilege < 3) {
3829             tcg_gen_andi_i64(tmp, tmp, -4);
3830         }
3831         tcg_gen_ori_i64(tmp, tmp, ctx->privilege);
3832         save_gpr(ctx, a->l, tmp);
3833     }
3834 
3835     return do_dbranch(ctx, dest, 0, a->n);
3836 }
3837 
3838 static bool trans_blr(DisasContext *ctx, arg_blr *a)
3839 {
3840     if (a->x) {
3841         TCGv_i64 tmp = tcg_temp_new_i64();
3842         tcg_gen_shli_i64(tmp, load_gpr(ctx, a->x), 3);
3843         tcg_gen_addi_i64(tmp, tmp, ctx->iaoq_f + 8);
3844         /* The computation here never changes privilege level.  */
3845         return do_ibranch(ctx, tmp, a->l, a->n);
3846     } else {
3847         /* BLR R0,RX is a good way to load PC+8 into RX.  */
3848         return do_dbranch(ctx, ctx->iaoq_f + 8, a->l, a->n);
3849     }
3850 }
3851 
3852 static bool trans_bv(DisasContext *ctx, arg_bv *a)
3853 {
3854     TCGv_i64 dest;
3855 
3856     if (a->x == 0) {
3857         dest = load_gpr(ctx, a->b);
3858     } else {
3859         dest = tcg_temp_new_i64();
3860         tcg_gen_shli_i64(dest, load_gpr(ctx, a->x), 3);
3861         tcg_gen_add_i64(dest, dest, load_gpr(ctx, a->b));
3862     }
3863     dest = do_ibranch_priv(ctx, dest);
3864     return do_ibranch(ctx, dest, 0, a->n);
3865 }
3866 
3867 static bool trans_bve(DisasContext *ctx, arg_bve *a)
3868 {
3869     TCGv_i64 dest;
3870 
3871 #ifdef CONFIG_USER_ONLY
3872     dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3873     return do_ibranch(ctx, dest, a->l, a->n);
3874 #else
3875     nullify_over(ctx);
3876     dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3877 
3878     copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3879     if (ctx->iaoq_b == -1) {
3880         tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3881     }
3882     copy_iaoq_entry(ctx, cpu_iaoq_b, -1, dest);
3883     tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest));
3884     if (a->l) {
3885         copy_iaoq_entry(ctx, cpu_gr[a->l], ctx->iaoq_n, ctx->iaoq_n_var);
3886     }
3887     nullify_set(ctx, a->n);
3888     tcg_gen_lookup_and_goto_ptr();
3889     ctx->base.is_jmp = DISAS_NORETURN;
3890     return nullify_end(ctx);
3891 #endif
3892 }
3893 
3894 static bool trans_nopbts(DisasContext *ctx, arg_nopbts *a)
3895 {
3896     /* All branch target stack instructions implement as nop. */
3897     return ctx->is_pa20;
3898 }
3899 
3900 /*
3901  * Float class 0
3902  */
3903 
3904 static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3905 {
3906     tcg_gen_mov_i32(dst, src);
3907 }
3908 
3909 static bool trans_fid_f(DisasContext *ctx, arg_fid_f *a)
3910 {
3911     uint64_t ret;
3912 
3913     if (ctx->is_pa20) {
3914         ret = 0x13080000000000ULL; /* PA8700 (PCX-W2) */
3915     } else {
3916         ret = 0x0f080000000000ULL; /* PA7300LC (PCX-L2) */
3917     }
3918 
3919     nullify_over(ctx);
3920     save_frd(0, tcg_constant_i64(ret));
3921     return nullify_end(ctx);
3922 }
3923 
3924 static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a)
3925 {
3926     return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f);
3927 }
3928 
3929 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3930 {
3931     tcg_gen_mov_i64(dst, src);
3932 }
3933 
3934 static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a)
3935 {
3936     return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d);
3937 }
3938 
3939 static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3940 {
3941     tcg_gen_andi_i32(dst, src, INT32_MAX);
3942 }
3943 
3944 static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a)
3945 {
3946     return do_fop_wew(ctx, a->t, a->r, gen_fabs_f);
3947 }
3948 
3949 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3950 {
3951     tcg_gen_andi_i64(dst, src, INT64_MAX);
3952 }
3953 
3954 static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a)
3955 {
3956     return do_fop_ded(ctx, a->t, a->r, gen_fabs_d);
3957 }
3958 
3959 static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a)
3960 {
3961     return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s);
3962 }
3963 
3964 static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a)
3965 {
3966     return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d);
3967 }
3968 
3969 static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a)
3970 {
3971     return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s);
3972 }
3973 
3974 static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a)
3975 {
3976     return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d);
3977 }
3978 
3979 static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3980 {
3981     tcg_gen_xori_i32(dst, src, INT32_MIN);
3982 }
3983 
3984 static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a)
3985 {
3986     return do_fop_wew(ctx, a->t, a->r, gen_fneg_f);
3987 }
3988 
3989 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3990 {
3991     tcg_gen_xori_i64(dst, src, INT64_MIN);
3992 }
3993 
3994 static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a)
3995 {
3996     return do_fop_ded(ctx, a->t, a->r, gen_fneg_d);
3997 }
3998 
3999 static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4000 {
4001     tcg_gen_ori_i32(dst, src, INT32_MIN);
4002 }
4003 
4004 static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a)
4005 {
4006     return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f);
4007 }
4008 
4009 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4010 {
4011     tcg_gen_ori_i64(dst, src, INT64_MIN);
4012 }
4013 
4014 static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a)
4015 {
4016     return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d);
4017 }
4018 
4019 /*
4020  * Float class 1
4021  */
4022 
4023 static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a)
4024 {
4025     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s);
4026 }
4027 
4028 static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a)
4029 {
4030     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d);
4031 }
4032 
4033 static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a)
4034 {
4035     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s);
4036 }
4037 
4038 static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a)
4039 {
4040     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s);
4041 }
4042 
4043 static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a)
4044 {
4045     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d);
4046 }
4047 
4048 static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a)
4049 {
4050     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d);
4051 }
4052 
4053 static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a)
4054 {
4055     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w);
4056 }
4057 
4058 static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a)
4059 {
4060     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w);
4061 }
4062 
4063 static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a)
4064 {
4065     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw);
4066 }
4067 
4068 static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a)
4069 {
4070     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw);
4071 }
4072 
4073 static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a)
4074 {
4075     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w);
4076 }
4077 
4078 static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a)
4079 {
4080     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w);
4081 }
4082 
4083 static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a)
4084 {
4085     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw);
4086 }
4087 
4088 static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a)
4089 {
4090     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw);
4091 }
4092 
4093 static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a)
4094 {
4095     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s);
4096 }
4097 
4098 static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a)
4099 {
4100     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s);
4101 }
4102 
4103 static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a)
4104 {
4105     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d);
4106 }
4107 
4108 static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a)
4109 {
4110     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d);
4111 }
4112 
4113 static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a)
4114 {
4115     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw);
4116 }
4117 
4118 static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a)
4119 {
4120     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw);
4121 }
4122 
4123 static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a)
4124 {
4125     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw);
4126 }
4127 
4128 static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a)
4129 {
4130     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw);
4131 }
4132 
4133 static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a)
4134 {
4135     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw);
4136 }
4137 
4138 static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a)
4139 {
4140     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw);
4141 }
4142 
4143 static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a)
4144 {
4145     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw);
4146 }
4147 
4148 static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a)
4149 {
4150     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw);
4151 }
4152 
4153 /*
4154  * Float class 2
4155  */
4156 
4157 static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a)
4158 {
4159     TCGv_i32 ta, tb, tc, ty;
4160 
4161     nullify_over(ctx);
4162 
4163     ta = load_frw0_i32(a->r1);
4164     tb = load_frw0_i32(a->r2);
4165     ty = tcg_constant_i32(a->y);
4166     tc = tcg_constant_i32(a->c);
4167 
4168     gen_helper_fcmp_s(tcg_env, ta, tb, ty, tc);
4169 
4170     return nullify_end(ctx);
4171 }
4172 
4173 static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a)
4174 {
4175     TCGv_i64 ta, tb;
4176     TCGv_i32 tc, ty;
4177 
4178     nullify_over(ctx);
4179 
4180     ta = load_frd0(a->r1);
4181     tb = load_frd0(a->r2);
4182     ty = tcg_constant_i32(a->y);
4183     tc = tcg_constant_i32(a->c);
4184 
4185     gen_helper_fcmp_d(tcg_env, ta, tb, ty, tc);
4186 
4187     return nullify_end(ctx);
4188 }
4189 
4190 static bool trans_ftest(DisasContext *ctx, arg_ftest *a)
4191 {
4192     TCGv_i64 t;
4193 
4194     nullify_over(ctx);
4195 
4196     t = tcg_temp_new_i64();
4197     tcg_gen_ld32u_i64(t, tcg_env, offsetof(CPUHPPAState, fr0_shadow));
4198 
4199     if (a->y == 1) {
4200         int mask;
4201         bool inv = false;
4202 
4203         switch (a->c) {
4204         case 0: /* simple */
4205             tcg_gen_andi_i64(t, t, 0x4000000);
4206             ctx->null_cond = cond_make_0(TCG_COND_NE, t);
4207             goto done;
4208         case 2: /* rej */
4209             inv = true;
4210             /* fallthru */
4211         case 1: /* acc */
4212             mask = 0x43ff800;
4213             break;
4214         case 6: /* rej8 */
4215             inv = true;
4216             /* fallthru */
4217         case 5: /* acc8 */
4218             mask = 0x43f8000;
4219             break;
4220         case 9: /* acc6 */
4221             mask = 0x43e0000;
4222             break;
4223         case 13: /* acc4 */
4224             mask = 0x4380000;
4225             break;
4226         case 17: /* acc2 */
4227             mask = 0x4200000;
4228             break;
4229         default:
4230             gen_illegal(ctx);
4231             return true;
4232         }
4233         if (inv) {
4234             TCGv_i64 c = tcg_constant_i64(mask);
4235             tcg_gen_or_i64(t, t, c);
4236             ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
4237         } else {
4238             tcg_gen_andi_i64(t, t, mask);
4239             ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
4240         }
4241     } else {
4242         unsigned cbit = (a->y ^ 1) - 1;
4243 
4244         tcg_gen_extract_i64(t, t, 21 - cbit, 1);
4245         ctx->null_cond = cond_make_0(TCG_COND_NE, t);
4246     }
4247 
4248  done:
4249     return nullify_end(ctx);
4250 }
4251 
4252 /*
4253  * Float class 2
4254  */
4255 
4256 static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a)
4257 {
4258     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s);
4259 }
4260 
4261 static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a)
4262 {
4263     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d);
4264 }
4265 
4266 static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a)
4267 {
4268     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s);
4269 }
4270 
4271 static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a)
4272 {
4273     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d);
4274 }
4275 
4276 static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a)
4277 {
4278     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s);
4279 }
4280 
4281 static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a)
4282 {
4283     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d);
4284 }
4285 
4286 static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a)
4287 {
4288     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s);
4289 }
4290 
4291 static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a)
4292 {
4293     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d);
4294 }
4295 
4296 static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a)
4297 {
4298     TCGv_i64 x, y;
4299 
4300     nullify_over(ctx);
4301 
4302     x = load_frw0_i64(a->r1);
4303     y = load_frw0_i64(a->r2);
4304     tcg_gen_mul_i64(x, x, y);
4305     save_frd(a->t, x);
4306 
4307     return nullify_end(ctx);
4308 }
4309 
4310 /* Convert the fmpyadd single-precision register encodings to standard.  */
4311 static inline int fmpyadd_s_reg(unsigned r)
4312 {
4313     return (r & 16) * 2 + 16 + (r & 15);
4314 }
4315 
4316 static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4317 {
4318     int tm = fmpyadd_s_reg(a->tm);
4319     int ra = fmpyadd_s_reg(a->ra);
4320     int ta = fmpyadd_s_reg(a->ta);
4321     int rm2 = fmpyadd_s_reg(a->rm2);
4322     int rm1 = fmpyadd_s_reg(a->rm1);
4323 
4324     nullify_over(ctx);
4325 
4326     do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
4327     do_fop_weww(ctx, ta, ta, ra,
4328                 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
4329 
4330     return nullify_end(ctx);
4331 }
4332 
4333 static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a)
4334 {
4335     return do_fmpyadd_s(ctx, a, false);
4336 }
4337 
4338 static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a)
4339 {
4340     return do_fmpyadd_s(ctx, a, true);
4341 }
4342 
4343 static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4344 {
4345     nullify_over(ctx);
4346 
4347     do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d);
4348     do_fop_dedd(ctx, a->ta, a->ta, a->ra,
4349                 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
4350 
4351     return nullify_end(ctx);
4352 }
4353 
4354 static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a)
4355 {
4356     return do_fmpyadd_d(ctx, a, false);
4357 }
4358 
4359 static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a)
4360 {
4361     return do_fmpyadd_d(ctx, a, true);
4362 }
4363 
4364 static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a)
4365 {
4366     TCGv_i32 x, y, z;
4367 
4368     nullify_over(ctx);
4369     x = load_frw0_i32(a->rm1);
4370     y = load_frw0_i32(a->rm2);
4371     z = load_frw0_i32(a->ra3);
4372 
4373     if (a->neg) {
4374         gen_helper_fmpynfadd_s(x, tcg_env, x, y, z);
4375     } else {
4376         gen_helper_fmpyfadd_s(x, tcg_env, x, y, z);
4377     }
4378 
4379     save_frw_i32(a->t, x);
4380     return nullify_end(ctx);
4381 }
4382 
4383 static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a)
4384 {
4385     TCGv_i64 x, y, z;
4386 
4387     nullify_over(ctx);
4388     x = load_frd0(a->rm1);
4389     y = load_frd0(a->rm2);
4390     z = load_frd0(a->ra3);
4391 
4392     if (a->neg) {
4393         gen_helper_fmpynfadd_d(x, tcg_env, x, y, z);
4394     } else {
4395         gen_helper_fmpyfadd_d(x, tcg_env, x, y, z);
4396     }
4397 
4398     save_frd(a->t, x);
4399     return nullify_end(ctx);
4400 }
4401 
4402 static bool trans_diag(DisasContext *ctx, arg_diag *a)
4403 {
4404     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
4405 #ifndef CONFIG_USER_ONLY
4406     if (a->i == 0x100) {
4407         /* emulate PDC BTLB, called by SeaBIOS-hppa */
4408         nullify_over(ctx);
4409         gen_helper_diag_btlb(tcg_env);
4410         return nullify_end(ctx);
4411     }
4412 #endif
4413     qemu_log_mask(LOG_UNIMP, "DIAG opcode 0x%04x ignored\n", a->i);
4414     return true;
4415 }
4416 
4417 static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
4418 {
4419     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4420     int bound;
4421 
4422     ctx->cs = cs;
4423     ctx->tb_flags = ctx->base.tb->flags;
4424     ctx->is_pa20 = hppa_is_pa20(cpu_env(cs));
4425 
4426 #ifdef CONFIG_USER_ONLY
4427     ctx->privilege = MMU_IDX_TO_PRIV(MMU_USER_IDX);
4428     ctx->mmu_idx = MMU_USER_IDX;
4429     ctx->iaoq_f = ctx->base.pc_first | ctx->privilege;
4430     ctx->iaoq_b = ctx->base.tb->cs_base | ctx->privilege;
4431     ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN);
4432 #else
4433     ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
4434     ctx->mmu_idx = (ctx->tb_flags & PSW_D
4435                     ? PRIV_P_TO_MMU_IDX(ctx->privilege, ctx->tb_flags & PSW_P)
4436                     : MMU_PHYS_IDX);
4437 
4438     /* Recover the IAOQ values from the GVA + PRIV.  */
4439     uint64_t cs_base = ctx->base.tb->cs_base;
4440     uint64_t iasq_f = cs_base & ~0xffffffffull;
4441     int32_t diff = cs_base;
4442 
4443     ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege;
4444     ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1);
4445 #endif
4446     ctx->iaoq_n = -1;
4447     ctx->iaoq_n_var = NULL;
4448 
4449     ctx->zero = tcg_constant_i64(0);
4450 
4451     /* Bound the number of instructions by those left on the page.  */
4452     bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
4453     ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
4454 }
4455 
4456 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
4457 {
4458     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4459 
4460     /* Seed the nullification status from PSW[N], as saved in TB->FLAGS.  */
4461     ctx->null_cond = cond_make_f();
4462     ctx->psw_n_nonzero = false;
4463     if (ctx->tb_flags & PSW_N) {
4464         ctx->null_cond.c = TCG_COND_ALWAYS;
4465         ctx->psw_n_nonzero = true;
4466     }
4467     ctx->null_lab = NULL;
4468 }
4469 
4470 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4471 {
4472     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4473 
4474     tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b, 0);
4475     ctx->insn_start = tcg_last_op();
4476 }
4477 
4478 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4479 {
4480     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4481     CPUHPPAState *env = cpu_env(cs);
4482     DisasJumpType ret;
4483 
4484     /* Execute one insn.  */
4485 #ifdef CONFIG_USER_ONLY
4486     if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
4487         do_page_zero(ctx);
4488         ret = ctx->base.is_jmp;
4489         assert(ret != DISAS_NEXT);
4490     } else
4491 #endif
4492     {
4493         /* Always fetch the insn, even if nullified, so that we check
4494            the page permissions for execute.  */
4495         uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
4496 
4497         /* Set up the IA queue for the next insn.
4498            This will be overwritten by a branch.  */
4499         if (ctx->iaoq_b == -1) {
4500             ctx->iaoq_n = -1;
4501             ctx->iaoq_n_var = tcg_temp_new_i64();
4502             tcg_gen_addi_i64(ctx->iaoq_n_var, cpu_iaoq_b, 4);
4503         } else {
4504             ctx->iaoq_n = ctx->iaoq_b + 4;
4505             ctx->iaoq_n_var = NULL;
4506         }
4507 
4508         if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
4509             ctx->null_cond.c = TCG_COND_NEVER;
4510             ret = DISAS_NEXT;
4511         } else {
4512             ctx->insn = insn;
4513             if (!decode(ctx, insn)) {
4514                 gen_illegal(ctx);
4515             }
4516             ret = ctx->base.is_jmp;
4517             assert(ctx->null_lab == NULL);
4518         }
4519     }
4520 
4521     /* Advance the insn queue.  Note that this check also detects
4522        a priority change within the instruction queue.  */
4523     if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
4524         if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1
4525             && use_goto_tb(ctx, ctx->iaoq_b)
4526             && (ctx->null_cond.c == TCG_COND_NEVER
4527                 || ctx->null_cond.c == TCG_COND_ALWAYS)) {
4528             nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
4529             gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
4530             ctx->base.is_jmp = ret = DISAS_NORETURN;
4531         } else {
4532             ctx->base.is_jmp = ret = DISAS_IAQ_N_STALE;
4533         }
4534     }
4535     ctx->iaoq_f = ctx->iaoq_b;
4536     ctx->iaoq_b = ctx->iaoq_n;
4537     ctx->base.pc_next += 4;
4538 
4539     switch (ret) {
4540     case DISAS_NORETURN:
4541     case DISAS_IAQ_N_UPDATED:
4542         break;
4543 
4544     case DISAS_NEXT:
4545     case DISAS_IAQ_N_STALE:
4546     case DISAS_IAQ_N_STALE_EXIT:
4547         if (ctx->iaoq_f == -1) {
4548             copy_iaoq_entry(ctx, cpu_iaoq_f, -1, cpu_iaoq_b);
4549             copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
4550 #ifndef CONFIG_USER_ONLY
4551             tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
4552 #endif
4553             nullify_save(ctx);
4554             ctx->base.is_jmp = (ret == DISAS_IAQ_N_STALE_EXIT
4555                                 ? DISAS_EXIT
4556                                 : DISAS_IAQ_N_UPDATED);
4557         } else if (ctx->iaoq_b == -1) {
4558             copy_iaoq_entry(ctx, cpu_iaoq_b, -1, ctx->iaoq_n_var);
4559         }
4560         break;
4561 
4562     default:
4563         g_assert_not_reached();
4564     }
4565 }
4566 
4567 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4568 {
4569     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4570     DisasJumpType is_jmp = ctx->base.is_jmp;
4571 
4572     switch (is_jmp) {
4573     case DISAS_NORETURN:
4574         break;
4575     case DISAS_TOO_MANY:
4576     case DISAS_IAQ_N_STALE:
4577     case DISAS_IAQ_N_STALE_EXIT:
4578         copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
4579         copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
4580         nullify_save(ctx);
4581         /* FALLTHRU */
4582     case DISAS_IAQ_N_UPDATED:
4583         if (is_jmp != DISAS_IAQ_N_STALE_EXIT) {
4584             tcg_gen_lookup_and_goto_ptr();
4585             break;
4586         }
4587         /* FALLTHRU */
4588     case DISAS_EXIT:
4589         tcg_gen_exit_tb(NULL, 0);
4590         break;
4591     default:
4592         g_assert_not_reached();
4593     }
4594 }
4595 
4596 static void hppa_tr_disas_log(const DisasContextBase *dcbase,
4597                               CPUState *cs, FILE *logfile)
4598 {
4599     target_ulong pc = dcbase->pc_first;
4600 
4601 #ifdef CONFIG_USER_ONLY
4602     switch (pc) {
4603     case 0x00:
4604         fprintf(logfile, "IN:\n0x00000000:  (null)\n");
4605         return;
4606     case 0xb0:
4607         fprintf(logfile, "IN:\n0x000000b0:  light-weight-syscall\n");
4608         return;
4609     case 0xe0:
4610         fprintf(logfile, "IN:\n0x000000e0:  set-thread-pointer-syscall\n");
4611         return;
4612     case 0x100:
4613         fprintf(logfile, "IN:\n0x00000100:  syscall\n");
4614         return;
4615     }
4616 #endif
4617 
4618     fprintf(logfile, "IN: %s\n", lookup_symbol(pc));
4619     target_disas(logfile, cs, pc, dcbase->tb->size);
4620 }
4621 
4622 static const TranslatorOps hppa_tr_ops = {
4623     .init_disas_context = hppa_tr_init_disas_context,
4624     .tb_start           = hppa_tr_tb_start,
4625     .insn_start         = hppa_tr_insn_start,
4626     .translate_insn     = hppa_tr_translate_insn,
4627     .tb_stop            = hppa_tr_tb_stop,
4628     .disas_log          = hppa_tr_disas_log,
4629 };
4630 
4631 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
4632                            target_ulong pc, void *host_pc)
4633 {
4634     DisasContext ctx;
4635     translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);
4636 }
4637