xref: /openbmc/qemu/target/hppa/translate.c (revision c53e401e)
1 /*
2  * HPPA emulation cpu translation for qemu.
3  *
4  * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
28 #include "exec/translator.h"
29 #include "exec/log.h"
30 
31 #define HELPER_H "helper.h"
32 #include "exec/helper-info.c.inc"
33 #undef  HELPER_H
34 
35 
36 /* Since we have a distinction between register size and address size,
37    we need to redefine all of these.  */
38 
39 #undef TCGv
40 #undef tcg_temp_new
41 #undef tcg_global_mem_new
42 
43 #define TCGv_tl              TCGv_i64
44 #define tcg_temp_new_tl      tcg_temp_new_i64
45 #define tcg_gen_extu_reg_tl  tcg_gen_mov_i64
46 
47 #define TCGv_reg             TCGv_i64
48 
49 #define tcg_temp_new         tcg_temp_new_i64
50 #define tcg_global_mem_new   tcg_global_mem_new_i64
51 
52 #define tcg_gen_movi_reg     tcg_gen_movi_i64
53 #define tcg_gen_mov_reg      tcg_gen_mov_i64
54 #define tcg_gen_ld8u_reg     tcg_gen_ld8u_i64
55 #define tcg_gen_ld8s_reg     tcg_gen_ld8s_i64
56 #define tcg_gen_ld16u_reg    tcg_gen_ld16u_i64
57 #define tcg_gen_ld16s_reg    tcg_gen_ld16s_i64
58 #define tcg_gen_ld32u_reg    tcg_gen_ld32u_i64
59 #define tcg_gen_ld32s_reg    tcg_gen_ld32s_i64
60 #define tcg_gen_ld_reg       tcg_gen_ld_i64
61 #define tcg_gen_st8_reg      tcg_gen_st8_i64
62 #define tcg_gen_st16_reg     tcg_gen_st16_i64
63 #define tcg_gen_st32_reg     tcg_gen_st32_i64
64 #define tcg_gen_st_reg       tcg_gen_st_i64
65 #define tcg_gen_add_reg      tcg_gen_add_i64
66 #define tcg_gen_addi_reg     tcg_gen_addi_i64
67 #define tcg_gen_sub_reg      tcg_gen_sub_i64
68 #define tcg_gen_neg_reg      tcg_gen_neg_i64
69 #define tcg_gen_subfi_reg    tcg_gen_subfi_i64
70 #define tcg_gen_subi_reg     tcg_gen_subi_i64
71 #define tcg_gen_and_reg      tcg_gen_and_i64
72 #define tcg_gen_andi_reg     tcg_gen_andi_i64
73 #define tcg_gen_or_reg       tcg_gen_or_i64
74 #define tcg_gen_ori_reg      tcg_gen_ori_i64
75 #define tcg_gen_xor_reg      tcg_gen_xor_i64
76 #define tcg_gen_xori_reg     tcg_gen_xori_i64
77 #define tcg_gen_not_reg      tcg_gen_not_i64
78 #define tcg_gen_shl_reg      tcg_gen_shl_i64
79 #define tcg_gen_shli_reg     tcg_gen_shli_i64
80 #define tcg_gen_shr_reg      tcg_gen_shr_i64
81 #define tcg_gen_shri_reg     tcg_gen_shri_i64
82 #define tcg_gen_sar_reg      tcg_gen_sar_i64
83 #define tcg_gen_sari_reg     tcg_gen_sari_i64
84 #define tcg_gen_brcond_reg   tcg_gen_brcond_i64
85 #define tcg_gen_brcondi_reg  tcg_gen_brcondi_i64
86 #define tcg_gen_setcond_reg  tcg_gen_setcond_i64
87 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i64
88 #define tcg_gen_mul_reg      tcg_gen_mul_i64
89 #define tcg_gen_muli_reg     tcg_gen_muli_i64
90 #define tcg_gen_div_reg      tcg_gen_div_i64
91 #define tcg_gen_rem_reg      tcg_gen_rem_i64
92 #define tcg_gen_divu_reg     tcg_gen_divu_i64
93 #define tcg_gen_remu_reg     tcg_gen_remu_i64
94 #define tcg_gen_discard_reg  tcg_gen_discard_i64
95 #define tcg_gen_trunc_reg_i32 tcg_gen_extrl_i64_i32
96 #define tcg_gen_trunc_i64_reg tcg_gen_mov_i64
97 #define tcg_gen_extu_i32_reg tcg_gen_extu_i32_i64
98 #define tcg_gen_ext_i32_reg  tcg_gen_ext_i32_i64
99 #define tcg_gen_extu_reg_i64 tcg_gen_mov_i64
100 #define tcg_gen_ext_reg_i64  tcg_gen_mov_i64
101 #define tcg_gen_ext8u_reg    tcg_gen_ext8u_i64
102 #define tcg_gen_ext8s_reg    tcg_gen_ext8s_i64
103 #define tcg_gen_ext16u_reg   tcg_gen_ext16u_i64
104 #define tcg_gen_ext16s_reg   tcg_gen_ext16s_i64
105 #define tcg_gen_ext32u_reg   tcg_gen_ext32u_i64
106 #define tcg_gen_ext32s_reg   tcg_gen_ext32s_i64
107 #define tcg_gen_bswap16_reg  tcg_gen_bswap16_i64
108 #define tcg_gen_bswap32_reg  tcg_gen_bswap32_i64
109 #define tcg_gen_bswap64_reg  tcg_gen_bswap64_i64
110 #define tcg_gen_concat_reg_i64 tcg_gen_concat32_i64
111 #define tcg_gen_andc_reg     tcg_gen_andc_i64
112 #define tcg_gen_eqv_reg      tcg_gen_eqv_i64
113 #define tcg_gen_nand_reg     tcg_gen_nand_i64
114 #define tcg_gen_nor_reg      tcg_gen_nor_i64
115 #define tcg_gen_orc_reg      tcg_gen_orc_i64
116 #define tcg_gen_clz_reg      tcg_gen_clz_i64
117 #define tcg_gen_ctz_reg      tcg_gen_ctz_i64
118 #define tcg_gen_clzi_reg     tcg_gen_clzi_i64
119 #define tcg_gen_ctzi_reg     tcg_gen_ctzi_i64
120 #define tcg_gen_clrsb_reg    tcg_gen_clrsb_i64
121 #define tcg_gen_ctpop_reg    tcg_gen_ctpop_i64
122 #define tcg_gen_rotl_reg     tcg_gen_rotl_i64
123 #define tcg_gen_rotli_reg    tcg_gen_rotli_i64
124 #define tcg_gen_rotr_reg     tcg_gen_rotr_i64
125 #define tcg_gen_rotri_reg    tcg_gen_rotri_i64
126 #define tcg_gen_deposit_reg  tcg_gen_deposit_i64
127 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i64
128 #define tcg_gen_extract_reg  tcg_gen_extract_i64
129 #define tcg_gen_sextract_reg tcg_gen_sextract_i64
130 #define tcg_gen_extract2_reg tcg_gen_extract2_i64
131 #define tcg_constant_reg     tcg_constant_i64
132 #define tcg_gen_movcond_reg  tcg_gen_movcond_i64
133 #define tcg_gen_add2_reg     tcg_gen_add2_i64
134 #define tcg_gen_sub2_reg     tcg_gen_sub2_i64
135 #define tcg_gen_qemu_ld_reg  tcg_gen_qemu_ld_i64
136 #define tcg_gen_qemu_st_reg  tcg_gen_qemu_st_i64
137 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i64
138 #define tcg_gen_trunc_reg_ptr   tcg_gen_trunc_i64_ptr
139 
140 typedef struct DisasCond {
141     TCGCond c;
142     TCGv_reg a0, a1;
143 } DisasCond;
144 
145 typedef struct DisasContext {
146     DisasContextBase base;
147     CPUState *cs;
148 
149     uint64_t iaoq_f;
150     uint64_t iaoq_b;
151     uint64_t iaoq_n;
152     TCGv_reg iaoq_n_var;
153 
154     DisasCond null_cond;
155     TCGLabel *null_lab;
156 
157     uint32_t insn;
158     uint32_t tb_flags;
159     int mmu_idx;
160     int privilege;
161     bool psw_n_nonzero;
162     bool is_pa20;
163 
164 #ifdef CONFIG_USER_ONLY
165     MemOp unalign;
166 #endif
167 } DisasContext;
168 
169 #ifdef CONFIG_USER_ONLY
170 #define UNALIGN(C)  (C)->unalign
171 #else
172 #define UNALIGN(C)  MO_ALIGN
173 #endif
174 
175 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently.  */
176 static int expand_sm_imm(DisasContext *ctx, int val)
177 {
178     if (val & PSW_SM_E) {
179         val = (val & ~PSW_SM_E) | PSW_E;
180     }
181     if (val & PSW_SM_W) {
182         val = (val & ~PSW_SM_W) | PSW_W;
183     }
184     return val;
185 }
186 
187 /* Inverted space register indicates 0 means sr0 not inferred from base.  */
188 static int expand_sr3x(DisasContext *ctx, int val)
189 {
190     return ~val;
191 }
192 
193 /* Convert the M:A bits within a memory insn to the tri-state value
194    we use for the final M.  */
195 static int ma_to_m(DisasContext *ctx, int val)
196 {
197     return val & 2 ? (val & 1 ? -1 : 1) : 0;
198 }
199 
200 /* Convert the sign of the displacement to a pre or post-modify.  */
201 static int pos_to_m(DisasContext *ctx, int val)
202 {
203     return val ? 1 : -1;
204 }
205 
206 static int neg_to_m(DisasContext *ctx, int val)
207 {
208     return val ? -1 : 1;
209 }
210 
211 /* Used for branch targets and fp memory ops.  */
212 static int expand_shl2(DisasContext *ctx, int val)
213 {
214     return val << 2;
215 }
216 
217 /* Used for fp memory ops.  */
218 static int expand_shl3(DisasContext *ctx, int val)
219 {
220     return val << 3;
221 }
222 
223 /* Used for assemble_21.  */
224 static int expand_shl11(DisasContext *ctx, int val)
225 {
226     return val << 11;
227 }
228 
229 static int assemble_6(DisasContext *ctx, int val)
230 {
231     /*
232      * Officially, 32 * x + 32 - y.
233      * Here, x is already in bit 5, and y is [4:0].
234      * Since -y = ~y + 1, in 5 bits 32 - y => y ^ 31 + 1,
235      * with the overflow from bit 4 summing with x.
236      */
237     return (val ^ 31) + 1;
238 }
239 
240 /* Translate CMPI doubleword conditions to standard. */
241 static int cmpbid_c(DisasContext *ctx, int val)
242 {
243     return val ? val : 4; /* 0 == "*<<" */
244 }
245 
246 
247 /* Include the auto-generated decoder.  */
248 #include "decode-insns.c.inc"
249 
250 /* We are not using a goto_tb (for whatever reason), but have updated
251    the iaq (for whatever reason), so don't do it again on exit.  */
252 #define DISAS_IAQ_N_UPDATED  DISAS_TARGET_0
253 
254 /* We are exiting the TB, but have neither emitted a goto_tb, nor
255    updated the iaq for the next instruction to be executed.  */
256 #define DISAS_IAQ_N_STALE    DISAS_TARGET_1
257 
258 /* Similarly, but we want to return to the main loop immediately
259    to recognize unmasked interrupts.  */
260 #define DISAS_IAQ_N_STALE_EXIT      DISAS_TARGET_2
261 #define DISAS_EXIT                  DISAS_TARGET_3
262 
263 /* global register indexes */
264 static TCGv_reg cpu_gr[32];
265 static TCGv_i64 cpu_sr[4];
266 static TCGv_i64 cpu_srH;
267 static TCGv_reg cpu_iaoq_f;
268 static TCGv_reg cpu_iaoq_b;
269 static TCGv_i64 cpu_iasq_f;
270 static TCGv_i64 cpu_iasq_b;
271 static TCGv_reg cpu_sar;
272 static TCGv_reg cpu_psw_n;
273 static TCGv_reg cpu_psw_v;
274 static TCGv_reg cpu_psw_cb;
275 static TCGv_reg cpu_psw_cb_msb;
276 
277 void hppa_translate_init(void)
278 {
279 #define DEF_VAR(V)  { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
280 
281     typedef struct { TCGv_reg *var; const char *name; int ofs; } GlobalVar;
282     static const GlobalVar vars[] = {
283         { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
284         DEF_VAR(psw_n),
285         DEF_VAR(psw_v),
286         DEF_VAR(psw_cb),
287         DEF_VAR(psw_cb_msb),
288         DEF_VAR(iaoq_f),
289         DEF_VAR(iaoq_b),
290     };
291 
292 #undef DEF_VAR
293 
294     /* Use the symbolic register names that match the disassembler.  */
295     static const char gr_names[32][4] = {
296         "r0",  "r1",  "r2",  "r3",  "r4",  "r5",  "r6",  "r7",
297         "r8",  "r9",  "r10", "r11", "r12", "r13", "r14", "r15",
298         "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
299         "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
300     };
301     /* SR[4-7] are not global registers so that we can index them.  */
302     static const char sr_names[5][4] = {
303         "sr0", "sr1", "sr2", "sr3", "srH"
304     };
305 
306     int i;
307 
308     cpu_gr[0] = NULL;
309     for (i = 1; i < 32; i++) {
310         cpu_gr[i] = tcg_global_mem_new(tcg_env,
311                                        offsetof(CPUHPPAState, gr[i]),
312                                        gr_names[i]);
313     }
314     for (i = 0; i < 4; i++) {
315         cpu_sr[i] = tcg_global_mem_new_i64(tcg_env,
316                                            offsetof(CPUHPPAState, sr[i]),
317                                            sr_names[i]);
318     }
319     cpu_srH = tcg_global_mem_new_i64(tcg_env,
320                                      offsetof(CPUHPPAState, sr[4]),
321                                      sr_names[4]);
322 
323     for (i = 0; i < ARRAY_SIZE(vars); ++i) {
324         const GlobalVar *v = &vars[i];
325         *v->var = tcg_global_mem_new(tcg_env, v->ofs, v->name);
326     }
327 
328     cpu_iasq_f = tcg_global_mem_new_i64(tcg_env,
329                                         offsetof(CPUHPPAState, iasq_f),
330                                         "iasq_f");
331     cpu_iasq_b = tcg_global_mem_new_i64(tcg_env,
332                                         offsetof(CPUHPPAState, iasq_b),
333                                         "iasq_b");
334 }
335 
336 static DisasCond cond_make_f(void)
337 {
338     return (DisasCond){
339         .c = TCG_COND_NEVER,
340         .a0 = NULL,
341         .a1 = NULL,
342     };
343 }
344 
345 static DisasCond cond_make_t(void)
346 {
347     return (DisasCond){
348         .c = TCG_COND_ALWAYS,
349         .a0 = NULL,
350         .a1 = NULL,
351     };
352 }
353 
354 static DisasCond cond_make_n(void)
355 {
356     return (DisasCond){
357         .c = TCG_COND_NE,
358         .a0 = cpu_psw_n,
359         .a1 = tcg_constant_reg(0)
360     };
361 }
362 
363 static DisasCond cond_make_tmp(TCGCond c, TCGv_reg a0, TCGv_reg a1)
364 {
365     assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
366     return (DisasCond){ .c = c, .a0 = a0, .a1 = a1 };
367 }
368 
369 static DisasCond cond_make_0_tmp(TCGCond c, TCGv_reg a0)
370 {
371     return cond_make_tmp(c, a0, tcg_constant_reg(0));
372 }
373 
374 static DisasCond cond_make_0(TCGCond c, TCGv_reg a0)
375 {
376     TCGv_reg tmp = tcg_temp_new();
377     tcg_gen_mov_reg(tmp, a0);
378     return cond_make_0_tmp(c, tmp);
379 }
380 
381 static DisasCond cond_make(TCGCond c, TCGv_reg a0, TCGv_reg a1)
382 {
383     TCGv_reg t0 = tcg_temp_new();
384     TCGv_reg t1 = tcg_temp_new();
385 
386     tcg_gen_mov_reg(t0, a0);
387     tcg_gen_mov_reg(t1, a1);
388     return cond_make_tmp(c, t0, t1);
389 }
390 
391 static void cond_free(DisasCond *cond)
392 {
393     switch (cond->c) {
394     default:
395         cond->a0 = NULL;
396         cond->a1 = NULL;
397         /* fallthru */
398     case TCG_COND_ALWAYS:
399         cond->c = TCG_COND_NEVER;
400         break;
401     case TCG_COND_NEVER:
402         break;
403     }
404 }
405 
406 static TCGv_reg load_gpr(DisasContext *ctx, unsigned reg)
407 {
408     if (reg == 0) {
409         TCGv_reg t = tcg_temp_new();
410         tcg_gen_movi_reg(t, 0);
411         return t;
412     } else {
413         return cpu_gr[reg];
414     }
415 }
416 
417 static TCGv_reg dest_gpr(DisasContext *ctx, unsigned reg)
418 {
419     if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
420         return tcg_temp_new();
421     } else {
422         return cpu_gr[reg];
423     }
424 }
425 
426 static void save_or_nullify(DisasContext *ctx, TCGv_reg dest, TCGv_reg t)
427 {
428     if (ctx->null_cond.c != TCG_COND_NEVER) {
429         tcg_gen_movcond_reg(ctx->null_cond.c, dest, ctx->null_cond.a0,
430                             ctx->null_cond.a1, dest, t);
431     } else {
432         tcg_gen_mov_reg(dest, t);
433     }
434 }
435 
436 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_reg t)
437 {
438     if (reg != 0) {
439         save_or_nullify(ctx, cpu_gr[reg], t);
440     }
441 }
442 
443 #if HOST_BIG_ENDIAN
444 # define HI_OFS  0
445 # define LO_OFS  4
446 #else
447 # define HI_OFS  4
448 # define LO_OFS  0
449 #endif
450 
451 static TCGv_i32 load_frw_i32(unsigned rt)
452 {
453     TCGv_i32 ret = tcg_temp_new_i32();
454     tcg_gen_ld_i32(ret, tcg_env,
455                    offsetof(CPUHPPAState, fr[rt & 31])
456                    + (rt & 32 ? LO_OFS : HI_OFS));
457     return ret;
458 }
459 
460 static TCGv_i32 load_frw0_i32(unsigned rt)
461 {
462     if (rt == 0) {
463         TCGv_i32 ret = tcg_temp_new_i32();
464         tcg_gen_movi_i32(ret, 0);
465         return ret;
466     } else {
467         return load_frw_i32(rt);
468     }
469 }
470 
471 static TCGv_i64 load_frw0_i64(unsigned rt)
472 {
473     TCGv_i64 ret = tcg_temp_new_i64();
474     if (rt == 0) {
475         tcg_gen_movi_i64(ret, 0);
476     } else {
477         tcg_gen_ld32u_i64(ret, tcg_env,
478                           offsetof(CPUHPPAState, fr[rt & 31])
479                           + (rt & 32 ? LO_OFS : HI_OFS));
480     }
481     return ret;
482 }
483 
484 static void save_frw_i32(unsigned rt, TCGv_i32 val)
485 {
486     tcg_gen_st_i32(val, tcg_env,
487                    offsetof(CPUHPPAState, fr[rt & 31])
488                    + (rt & 32 ? LO_OFS : HI_OFS));
489 }
490 
491 #undef HI_OFS
492 #undef LO_OFS
493 
494 static TCGv_i64 load_frd(unsigned rt)
495 {
496     TCGv_i64 ret = tcg_temp_new_i64();
497     tcg_gen_ld_i64(ret, tcg_env, offsetof(CPUHPPAState, fr[rt]));
498     return ret;
499 }
500 
501 static TCGv_i64 load_frd0(unsigned rt)
502 {
503     if (rt == 0) {
504         TCGv_i64 ret = tcg_temp_new_i64();
505         tcg_gen_movi_i64(ret, 0);
506         return ret;
507     } else {
508         return load_frd(rt);
509     }
510 }
511 
512 static void save_frd(unsigned rt, TCGv_i64 val)
513 {
514     tcg_gen_st_i64(val, tcg_env, offsetof(CPUHPPAState, fr[rt]));
515 }
516 
517 static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
518 {
519 #ifdef CONFIG_USER_ONLY
520     tcg_gen_movi_i64(dest, 0);
521 #else
522     if (reg < 4) {
523         tcg_gen_mov_i64(dest, cpu_sr[reg]);
524     } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
525         tcg_gen_mov_i64(dest, cpu_srH);
526     } else {
527         tcg_gen_ld_i64(dest, tcg_env, offsetof(CPUHPPAState, sr[reg]));
528     }
529 #endif
530 }
531 
532 /* Skip over the implementation of an insn that has been nullified.
533    Use this when the insn is too complex for a conditional move.  */
534 static void nullify_over(DisasContext *ctx)
535 {
536     if (ctx->null_cond.c != TCG_COND_NEVER) {
537         /* The always condition should have been handled in the main loop.  */
538         assert(ctx->null_cond.c != TCG_COND_ALWAYS);
539 
540         ctx->null_lab = gen_new_label();
541 
542         /* If we're using PSW[N], copy it to a temp because... */
543         if (ctx->null_cond.a0 == cpu_psw_n) {
544             ctx->null_cond.a0 = tcg_temp_new();
545             tcg_gen_mov_reg(ctx->null_cond.a0, cpu_psw_n);
546         }
547         /* ... we clear it before branching over the implementation,
548            so that (1) it's clear after nullifying this insn and
549            (2) if this insn nullifies the next, PSW[N] is valid.  */
550         if (ctx->psw_n_nonzero) {
551             ctx->psw_n_nonzero = false;
552             tcg_gen_movi_reg(cpu_psw_n, 0);
553         }
554 
555         tcg_gen_brcond_reg(ctx->null_cond.c, ctx->null_cond.a0,
556                            ctx->null_cond.a1, ctx->null_lab);
557         cond_free(&ctx->null_cond);
558     }
559 }
560 
561 /* Save the current nullification state to PSW[N].  */
562 static void nullify_save(DisasContext *ctx)
563 {
564     if (ctx->null_cond.c == TCG_COND_NEVER) {
565         if (ctx->psw_n_nonzero) {
566             tcg_gen_movi_reg(cpu_psw_n, 0);
567         }
568         return;
569     }
570     if (ctx->null_cond.a0 != cpu_psw_n) {
571         tcg_gen_setcond_reg(ctx->null_cond.c, cpu_psw_n,
572                             ctx->null_cond.a0, ctx->null_cond.a1);
573         ctx->psw_n_nonzero = true;
574     }
575     cond_free(&ctx->null_cond);
576 }
577 
578 /* Set a PSW[N] to X.  The intention is that this is used immediately
579    before a goto_tb/exit_tb, so that there is no fallthru path to other
580    code within the TB.  Therefore we do not update psw_n_nonzero.  */
581 static void nullify_set(DisasContext *ctx, bool x)
582 {
583     if (ctx->psw_n_nonzero || x) {
584         tcg_gen_movi_reg(cpu_psw_n, x);
585     }
586 }
587 
588 /* Mark the end of an instruction that may have been nullified.
589    This is the pair to nullify_over.  Always returns true so that
590    it may be tail-called from a translate function.  */
591 static bool nullify_end(DisasContext *ctx)
592 {
593     TCGLabel *null_lab = ctx->null_lab;
594     DisasJumpType status = ctx->base.is_jmp;
595 
596     /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
597        For UPDATED, we cannot update on the nullified path.  */
598     assert(status != DISAS_IAQ_N_UPDATED);
599 
600     if (likely(null_lab == NULL)) {
601         /* The current insn wasn't conditional or handled the condition
602            applied to it without a branch, so the (new) setting of
603            NULL_COND can be applied directly to the next insn.  */
604         return true;
605     }
606     ctx->null_lab = NULL;
607 
608     if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
609         /* The next instruction will be unconditional,
610            and NULL_COND already reflects that.  */
611         gen_set_label(null_lab);
612     } else {
613         /* The insn that we just executed is itself nullifying the next
614            instruction.  Store the condition in the PSW[N] global.
615            We asserted PSW[N] = 0 in nullify_over, so that after the
616            label we have the proper value in place.  */
617         nullify_save(ctx);
618         gen_set_label(null_lab);
619         ctx->null_cond = cond_make_n();
620     }
621     if (status == DISAS_NORETURN) {
622         ctx->base.is_jmp = DISAS_NEXT;
623     }
624     return true;
625 }
626 
627 static uint64_t gva_offset_mask(DisasContext *ctx)
628 {
629     return (ctx->tb_flags & PSW_W
630             ? MAKE_64BIT_MASK(0, 62)
631             : MAKE_64BIT_MASK(0, 32));
632 }
633 
634 static void copy_iaoq_entry(DisasContext *ctx, TCGv_reg dest,
635                             uint64_t ival, TCGv_reg vval)
636 {
637     uint64_t mask = gva_offset_mask(ctx);
638 
639     if (ival != -1) {
640         tcg_gen_movi_reg(dest, ival & mask);
641         return;
642     }
643     tcg_debug_assert(vval != NULL);
644 
645     /*
646      * We know that the IAOQ is already properly masked.
647      * This optimization is primarily for "iaoq_f = iaoq_b".
648      */
649     if (vval == cpu_iaoq_f || vval == cpu_iaoq_b) {
650         tcg_gen_mov_reg(dest, vval);
651     } else {
652         tcg_gen_andi_reg(dest, vval, mask);
653     }
654 }
655 
656 static inline uint64_t iaoq_dest(DisasContext *ctx, int64_t disp)
657 {
658     return ctx->iaoq_f + disp + 8;
659 }
660 
661 static void gen_excp_1(int exception)
662 {
663     gen_helper_excp(tcg_env, tcg_constant_i32(exception));
664 }
665 
666 static void gen_excp(DisasContext *ctx, int exception)
667 {
668     copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
669     copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
670     nullify_save(ctx);
671     gen_excp_1(exception);
672     ctx->base.is_jmp = DISAS_NORETURN;
673 }
674 
675 static bool gen_excp_iir(DisasContext *ctx, int exc)
676 {
677     nullify_over(ctx);
678     tcg_gen_st_reg(tcg_constant_reg(ctx->insn),
679                    tcg_env, offsetof(CPUHPPAState, cr[CR_IIR]));
680     gen_excp(ctx, exc);
681     return nullify_end(ctx);
682 }
683 
684 static bool gen_illegal(DisasContext *ctx)
685 {
686     return gen_excp_iir(ctx, EXCP_ILL);
687 }
688 
689 #ifdef CONFIG_USER_ONLY
690 #define CHECK_MOST_PRIVILEGED(EXCP) \
691     return gen_excp_iir(ctx, EXCP)
692 #else
693 #define CHECK_MOST_PRIVILEGED(EXCP) \
694     do {                                     \
695         if (ctx->privilege != 0) {           \
696             return gen_excp_iir(ctx, EXCP);  \
697         }                                    \
698     } while (0)
699 #endif
700 
701 static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
702 {
703     return translator_use_goto_tb(&ctx->base, dest);
704 }
705 
706 /* If the next insn is to be nullified, and it's on the same page,
707    and we're not attempting to set a breakpoint on it, then we can
708    totally skip the nullified insn.  This avoids creating and
709    executing a TB that merely branches to the next TB.  */
710 static bool use_nullify_skip(DisasContext *ctx)
711 {
712     return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
713             && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
714 }
715 
716 static void gen_goto_tb(DisasContext *ctx, int which,
717                         uint64_t f, uint64_t b)
718 {
719     if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
720         tcg_gen_goto_tb(which);
721         copy_iaoq_entry(ctx, cpu_iaoq_f, f, NULL);
722         copy_iaoq_entry(ctx, cpu_iaoq_b, b, NULL);
723         tcg_gen_exit_tb(ctx->base.tb, which);
724     } else {
725         copy_iaoq_entry(ctx, cpu_iaoq_f, f, cpu_iaoq_b);
726         copy_iaoq_entry(ctx, cpu_iaoq_b, b, ctx->iaoq_n_var);
727         tcg_gen_lookup_and_goto_ptr();
728     }
729 }
730 
731 static bool cond_need_sv(int c)
732 {
733     return c == 2 || c == 3 || c == 6;
734 }
735 
736 static bool cond_need_cb(int c)
737 {
738     return c == 4 || c == 5;
739 }
740 
741 /* Need extensions from TCGv_i32 to TCGv_reg. */
742 static bool cond_need_ext(DisasContext *ctx, bool d)
743 {
744     return !(ctx->is_pa20 && d);
745 }
746 
747 /*
748  * Compute conditional for arithmetic.  See Page 5-3, Table 5-1, of
749  * the Parisc 1.1 Architecture Reference Manual for details.
750  */
751 
752 static DisasCond do_cond(DisasContext *ctx, unsigned cf, bool d,
753                          TCGv_reg res, TCGv_reg cb_msb, TCGv_reg sv)
754 {
755     DisasCond cond;
756     TCGv_reg tmp;
757 
758     switch (cf >> 1) {
759     case 0: /* Never / TR    (0 / 1) */
760         cond = cond_make_f();
761         break;
762     case 1: /* = / <>        (Z / !Z) */
763         if (cond_need_ext(ctx, d)) {
764             tmp = tcg_temp_new();
765             tcg_gen_ext32u_reg(tmp, res);
766             res = tmp;
767         }
768         cond = cond_make_0(TCG_COND_EQ, res);
769         break;
770     case 2: /* < / >=        (N ^ V / !(N ^ V) */
771         tmp = tcg_temp_new();
772         tcg_gen_xor_reg(tmp, res, sv);
773         if (cond_need_ext(ctx, d)) {
774             tcg_gen_ext32s_reg(tmp, tmp);
775         }
776         cond = cond_make_0_tmp(TCG_COND_LT, tmp);
777         break;
778     case 3: /* <= / >        (N ^ V) | Z / !((N ^ V) | Z) */
779         /*
780          * Simplify:
781          *   (N ^ V) | Z
782          *   ((res < 0) ^ (sv < 0)) | !res
783          *   ((res ^ sv) < 0) | !res
784          *   (~(res ^ sv) >= 0) | !res
785          *   !(~(res ^ sv) >> 31) | !res
786          *   !(~(res ^ sv) >> 31 & res)
787          */
788         tmp = tcg_temp_new();
789         tcg_gen_eqv_reg(tmp, res, sv);
790         if (cond_need_ext(ctx, d)) {
791             tcg_gen_sextract_reg(tmp, tmp, 31, 1);
792             tcg_gen_and_reg(tmp, tmp, res);
793             tcg_gen_ext32u_reg(tmp, tmp);
794         } else {
795             tcg_gen_sari_reg(tmp, tmp, 63);
796             tcg_gen_and_reg(tmp, tmp, res);
797         }
798         cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
799         break;
800     case 4: /* NUV / UV      (!C / C) */
801         /* Only bit 0 of cb_msb is ever set. */
802         cond = cond_make_0(TCG_COND_EQ, cb_msb);
803         break;
804     case 5: /* ZNV / VNZ     (!C | Z / C & !Z) */
805         tmp = tcg_temp_new();
806         tcg_gen_neg_reg(tmp, cb_msb);
807         tcg_gen_and_reg(tmp, tmp, res);
808         if (cond_need_ext(ctx, d)) {
809             tcg_gen_ext32u_reg(tmp, tmp);
810         }
811         cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
812         break;
813     case 6: /* SV / NSV      (V / !V) */
814         if (cond_need_ext(ctx, d)) {
815             tmp = tcg_temp_new();
816             tcg_gen_ext32s_reg(tmp, sv);
817             sv = tmp;
818         }
819         cond = cond_make_0(TCG_COND_LT, sv);
820         break;
821     case 7: /* OD / EV */
822         tmp = tcg_temp_new();
823         tcg_gen_andi_reg(tmp, res, 1);
824         cond = cond_make_0_tmp(TCG_COND_NE, tmp);
825         break;
826     default:
827         g_assert_not_reached();
828     }
829     if (cf & 1) {
830         cond.c = tcg_invert_cond(cond.c);
831     }
832 
833     return cond;
834 }
835 
836 /* Similar, but for the special case of subtraction without borrow, we
837    can use the inputs directly.  This can allow other computation to be
838    deleted as unused.  */
839 
840 static DisasCond do_sub_cond(DisasContext *ctx, unsigned cf, bool d,
841                              TCGv_reg res, TCGv_reg in1,
842                              TCGv_reg in2, TCGv_reg sv)
843 {
844     TCGCond tc;
845     bool ext_uns;
846 
847     switch (cf >> 1) {
848     case 1: /* = / <> */
849         tc = TCG_COND_EQ;
850         ext_uns = true;
851         break;
852     case 2: /* < / >= */
853         tc = TCG_COND_LT;
854         ext_uns = false;
855         break;
856     case 3: /* <= / > */
857         tc = TCG_COND_LE;
858         ext_uns = false;
859         break;
860     case 4: /* << / >>= */
861         tc = TCG_COND_LTU;
862         ext_uns = true;
863         break;
864     case 5: /* <<= / >> */
865         tc = TCG_COND_LEU;
866         ext_uns = true;
867         break;
868     default:
869         return do_cond(ctx, cf, d, res, NULL, sv);
870     }
871 
872     if (cf & 1) {
873         tc = tcg_invert_cond(tc);
874     }
875     if (cond_need_ext(ctx, d)) {
876         TCGv_reg t1 = tcg_temp_new();
877         TCGv_reg t2 = tcg_temp_new();
878 
879         if (ext_uns) {
880             tcg_gen_ext32u_reg(t1, in1);
881             tcg_gen_ext32u_reg(t2, in2);
882         } else {
883             tcg_gen_ext32s_reg(t1, in1);
884             tcg_gen_ext32s_reg(t2, in2);
885         }
886         return cond_make_tmp(tc, t1, t2);
887     }
888     return cond_make(tc, in1, in2);
889 }
890 
891 /*
892  * Similar, but for logicals, where the carry and overflow bits are not
893  * computed, and use of them is undefined.
894  *
895  * Undefined or not, hardware does not trap.  It seems reasonable to
896  * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's
897  * how cases c={2,3} are treated.
898  */
899 
900 static DisasCond do_log_cond(DisasContext *ctx, unsigned cf, bool d,
901                              TCGv_reg res)
902 {
903     TCGCond tc;
904     bool ext_uns;
905 
906     switch (cf) {
907     case 0:  /* never */
908     case 9:  /* undef, C */
909     case 11: /* undef, C & !Z */
910     case 12: /* undef, V */
911         return cond_make_f();
912 
913     case 1:  /* true */
914     case 8:  /* undef, !C */
915     case 10: /* undef, !C | Z */
916     case 13: /* undef, !V */
917         return cond_make_t();
918 
919     case 2:  /* == */
920         tc = TCG_COND_EQ;
921         ext_uns = true;
922         break;
923     case 3:  /* <> */
924         tc = TCG_COND_NE;
925         ext_uns = true;
926         break;
927     case 4:  /* < */
928         tc = TCG_COND_LT;
929         ext_uns = false;
930         break;
931     case 5:  /* >= */
932         tc = TCG_COND_GE;
933         ext_uns = false;
934         break;
935     case 6:  /* <= */
936         tc = TCG_COND_LE;
937         ext_uns = false;
938         break;
939     case 7:  /* > */
940         tc = TCG_COND_GT;
941         ext_uns = false;
942         break;
943 
944     case 14: /* OD */
945     case 15: /* EV */
946         return do_cond(ctx, cf, d, res, NULL, NULL);
947 
948     default:
949         g_assert_not_reached();
950     }
951 
952     if (cond_need_ext(ctx, d)) {
953         TCGv_reg tmp = tcg_temp_new();
954 
955         if (ext_uns) {
956             tcg_gen_ext32u_reg(tmp, res);
957         } else {
958             tcg_gen_ext32s_reg(tmp, res);
959         }
960         return cond_make_0_tmp(tc, tmp);
961     }
962     return cond_make_0(tc, res);
963 }
964 
965 /* Similar, but for shift/extract/deposit conditions.  */
966 
967 static DisasCond do_sed_cond(DisasContext *ctx, unsigned orig, bool d,
968                              TCGv_reg res)
969 {
970     unsigned c, f;
971 
972     /* Convert the compressed condition codes to standard.
973        0-2 are the same as logicals (nv,<,<=), while 3 is OD.
974        4-7 are the reverse of 0-3.  */
975     c = orig & 3;
976     if (c == 3) {
977         c = 7;
978     }
979     f = (orig & 4) / 4;
980 
981     return do_log_cond(ctx, c * 2 + f, d, res);
982 }
983 
984 /* Similar, but for unit conditions.  */
985 
986 static DisasCond do_unit_cond(unsigned cf, bool d, TCGv_reg res,
987                               TCGv_reg in1, TCGv_reg in2)
988 {
989     DisasCond cond;
990     TCGv_reg tmp, cb = NULL;
991     uint64_t d_repl = d ? 0x0000000100000001ull : 1;
992 
993     if (cf & 8) {
994         /* Since we want to test lots of carry-out bits all at once, do not
995          * do our normal thing and compute carry-in of bit B+1 since that
996          * leaves us with carry bits spread across two words.
997          */
998         cb = tcg_temp_new();
999         tmp = tcg_temp_new();
1000         tcg_gen_or_reg(cb, in1, in2);
1001         tcg_gen_and_reg(tmp, in1, in2);
1002         tcg_gen_andc_reg(cb, cb, res);
1003         tcg_gen_or_reg(cb, cb, tmp);
1004     }
1005 
1006     switch (cf >> 1) {
1007     case 0: /* never / TR */
1008     case 1: /* undefined */
1009     case 5: /* undefined */
1010         cond = cond_make_f();
1011         break;
1012 
1013     case 2: /* SBZ / NBZ */
1014         /* See hasless(v,1) from
1015          * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
1016          */
1017         tmp = tcg_temp_new();
1018         tcg_gen_subi_reg(tmp, res, d_repl * 0x01010101u);
1019         tcg_gen_andc_reg(tmp, tmp, res);
1020         tcg_gen_andi_reg(tmp, tmp, d_repl * 0x80808080u);
1021         cond = cond_make_0(TCG_COND_NE, tmp);
1022         break;
1023 
1024     case 3: /* SHZ / NHZ */
1025         tmp = tcg_temp_new();
1026         tcg_gen_subi_reg(tmp, res, d_repl * 0x00010001u);
1027         tcg_gen_andc_reg(tmp, tmp, res);
1028         tcg_gen_andi_reg(tmp, tmp, d_repl * 0x80008000u);
1029         cond = cond_make_0(TCG_COND_NE, tmp);
1030         break;
1031 
1032     case 4: /* SDC / NDC */
1033         tcg_gen_andi_reg(cb, cb, d_repl * 0x88888888u);
1034         cond = cond_make_0(TCG_COND_NE, cb);
1035         break;
1036 
1037     case 6: /* SBC / NBC */
1038         tcg_gen_andi_reg(cb, cb, d_repl * 0x80808080u);
1039         cond = cond_make_0(TCG_COND_NE, cb);
1040         break;
1041 
1042     case 7: /* SHC / NHC */
1043         tcg_gen_andi_reg(cb, cb, d_repl * 0x80008000u);
1044         cond = cond_make_0(TCG_COND_NE, cb);
1045         break;
1046 
1047     default:
1048         g_assert_not_reached();
1049     }
1050     if (cf & 1) {
1051         cond.c = tcg_invert_cond(cond.c);
1052     }
1053 
1054     return cond;
1055 }
1056 
1057 static TCGv_reg get_carry(DisasContext *ctx, bool d,
1058                           TCGv_reg cb, TCGv_reg cb_msb)
1059 {
1060     if (cond_need_ext(ctx, d)) {
1061         TCGv_reg t = tcg_temp_new();
1062         tcg_gen_extract_reg(t, cb, 32, 1);
1063         return t;
1064     }
1065     return cb_msb;
1066 }
1067 
1068 static TCGv_reg get_psw_carry(DisasContext *ctx, bool d)
1069 {
1070     return get_carry(ctx, d, cpu_psw_cb, cpu_psw_cb_msb);
1071 }
1072 
1073 /* Compute signed overflow for addition.  */
1074 static TCGv_reg do_add_sv(DisasContext *ctx, TCGv_reg res,
1075                           TCGv_reg in1, TCGv_reg in2)
1076 {
1077     TCGv_reg sv = tcg_temp_new();
1078     TCGv_reg tmp = tcg_temp_new();
1079 
1080     tcg_gen_xor_reg(sv, res, in1);
1081     tcg_gen_xor_reg(tmp, in1, in2);
1082     tcg_gen_andc_reg(sv, sv, tmp);
1083 
1084     return sv;
1085 }
1086 
1087 /* Compute signed overflow for subtraction.  */
1088 static TCGv_reg do_sub_sv(DisasContext *ctx, TCGv_reg res,
1089                           TCGv_reg in1, TCGv_reg in2)
1090 {
1091     TCGv_reg sv = tcg_temp_new();
1092     TCGv_reg tmp = tcg_temp_new();
1093 
1094     tcg_gen_xor_reg(sv, res, in1);
1095     tcg_gen_xor_reg(tmp, in1, in2);
1096     tcg_gen_and_reg(sv, sv, tmp);
1097 
1098     return sv;
1099 }
1100 
1101 static void do_add(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1102                    TCGv_reg in2, unsigned shift, bool is_l,
1103                    bool is_tsv, bool is_tc, bool is_c, unsigned cf, bool d)
1104 {
1105     TCGv_reg dest, cb, cb_msb, cb_cond, sv, tmp;
1106     unsigned c = cf >> 1;
1107     DisasCond cond;
1108 
1109     dest = tcg_temp_new();
1110     cb = NULL;
1111     cb_msb = NULL;
1112     cb_cond = NULL;
1113 
1114     if (shift) {
1115         tmp = tcg_temp_new();
1116         tcg_gen_shli_reg(tmp, in1, shift);
1117         in1 = tmp;
1118     }
1119 
1120     if (!is_l || cond_need_cb(c)) {
1121         TCGv_reg zero = tcg_constant_reg(0);
1122         cb_msb = tcg_temp_new();
1123         cb = tcg_temp_new();
1124 
1125         tcg_gen_add2_reg(dest, cb_msb, in1, zero, in2, zero);
1126         if (is_c) {
1127             tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb,
1128                              get_psw_carry(ctx, d), zero);
1129         }
1130         tcg_gen_xor_reg(cb, in1, in2);
1131         tcg_gen_xor_reg(cb, cb, dest);
1132         if (cond_need_cb(c)) {
1133             cb_cond = get_carry(ctx, d, cb, cb_msb);
1134         }
1135     } else {
1136         tcg_gen_add_reg(dest, in1, in2);
1137         if (is_c) {
1138             tcg_gen_add_reg(dest, dest, get_psw_carry(ctx, d));
1139         }
1140     }
1141 
1142     /* Compute signed overflow if required.  */
1143     sv = NULL;
1144     if (is_tsv || cond_need_sv(c)) {
1145         sv = do_add_sv(ctx, dest, in1, in2);
1146         if (is_tsv) {
1147             /* ??? Need to include overflow from shift.  */
1148             gen_helper_tsv(tcg_env, sv);
1149         }
1150     }
1151 
1152     /* Emit any conditional trap before any writeback.  */
1153     cond = do_cond(ctx, cf, d, dest, cb_cond, sv);
1154     if (is_tc) {
1155         tmp = tcg_temp_new();
1156         tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1157         gen_helper_tcond(tcg_env, tmp);
1158     }
1159 
1160     /* Write back the result.  */
1161     if (!is_l) {
1162         save_or_nullify(ctx, cpu_psw_cb, cb);
1163         save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1164     }
1165     save_gpr(ctx, rt, dest);
1166 
1167     /* Install the new nullification.  */
1168     cond_free(&ctx->null_cond);
1169     ctx->null_cond = cond;
1170 }
1171 
1172 static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_d_sh *a,
1173                        bool is_l, bool is_tsv, bool is_tc, bool is_c)
1174 {
1175     TCGv_reg tcg_r1, tcg_r2;
1176 
1177     if (a->cf) {
1178         nullify_over(ctx);
1179     }
1180     tcg_r1 = load_gpr(ctx, a->r1);
1181     tcg_r2 = load_gpr(ctx, a->r2);
1182     do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l,
1183            is_tsv, is_tc, is_c, a->cf, a->d);
1184     return nullify_end(ctx);
1185 }
1186 
1187 static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a,
1188                        bool is_tsv, bool is_tc)
1189 {
1190     TCGv_reg tcg_im, tcg_r2;
1191 
1192     if (a->cf) {
1193         nullify_over(ctx);
1194     }
1195     tcg_im = tcg_constant_reg(a->i);
1196     tcg_r2 = load_gpr(ctx, a->r);
1197     /* All ADDI conditions are 32-bit. */
1198     do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf, false);
1199     return nullify_end(ctx);
1200 }
1201 
1202 static void do_sub(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1203                    TCGv_reg in2, bool is_tsv, bool is_b,
1204                    bool is_tc, unsigned cf, bool d)
1205 {
1206     TCGv_reg dest, sv, cb, cb_msb, zero, tmp;
1207     unsigned c = cf >> 1;
1208     DisasCond cond;
1209 
1210     dest = tcg_temp_new();
1211     cb = tcg_temp_new();
1212     cb_msb = tcg_temp_new();
1213 
1214     zero = tcg_constant_reg(0);
1215     if (is_b) {
1216         /* DEST,C = IN1 + ~IN2 + C.  */
1217         tcg_gen_not_reg(cb, in2);
1218         tcg_gen_add2_reg(dest, cb_msb, in1, zero, get_psw_carry(ctx, d), zero);
1219         tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cb, zero);
1220         tcg_gen_xor_reg(cb, cb, in1);
1221         tcg_gen_xor_reg(cb, cb, dest);
1222     } else {
1223         /*
1224          * DEST,C = IN1 + ~IN2 + 1.  We can produce the same result in fewer
1225          * operations by seeding the high word with 1 and subtracting.
1226          */
1227         TCGv_reg one = tcg_constant_reg(1);
1228         tcg_gen_sub2_reg(dest, cb_msb, in1, one, in2, zero);
1229         tcg_gen_eqv_reg(cb, in1, in2);
1230         tcg_gen_xor_reg(cb, cb, dest);
1231     }
1232 
1233     /* Compute signed overflow if required.  */
1234     sv = NULL;
1235     if (is_tsv || cond_need_sv(c)) {
1236         sv = do_sub_sv(ctx, dest, in1, in2);
1237         if (is_tsv) {
1238             gen_helper_tsv(tcg_env, sv);
1239         }
1240     }
1241 
1242     /* Compute the condition.  We cannot use the special case for borrow.  */
1243     if (!is_b) {
1244         cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv);
1245     } else {
1246         cond = do_cond(ctx, cf, d, dest, get_carry(ctx, d, cb, cb_msb), sv);
1247     }
1248 
1249     /* Emit any conditional trap before any writeback.  */
1250     if (is_tc) {
1251         tmp = tcg_temp_new();
1252         tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1253         gen_helper_tcond(tcg_env, tmp);
1254     }
1255 
1256     /* Write back the result.  */
1257     save_or_nullify(ctx, cpu_psw_cb, cb);
1258     save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1259     save_gpr(ctx, rt, dest);
1260 
1261     /* Install the new nullification.  */
1262     cond_free(&ctx->null_cond);
1263     ctx->null_cond = cond;
1264 }
1265 
1266 static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf_d *a,
1267                        bool is_tsv, bool is_b, bool is_tc)
1268 {
1269     TCGv_reg tcg_r1, tcg_r2;
1270 
1271     if (a->cf) {
1272         nullify_over(ctx);
1273     }
1274     tcg_r1 = load_gpr(ctx, a->r1);
1275     tcg_r2 = load_gpr(ctx, a->r2);
1276     do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf, a->d);
1277     return nullify_end(ctx);
1278 }
1279 
1280 static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv)
1281 {
1282     TCGv_reg tcg_im, tcg_r2;
1283 
1284     if (a->cf) {
1285         nullify_over(ctx);
1286     }
1287     tcg_im = tcg_constant_reg(a->i);
1288     tcg_r2 = load_gpr(ctx, a->r);
1289     /* All SUBI conditions are 32-bit. */
1290     do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf, false);
1291     return nullify_end(ctx);
1292 }
1293 
1294 static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1295                       TCGv_reg in2, unsigned cf, bool d)
1296 {
1297     TCGv_reg dest, sv;
1298     DisasCond cond;
1299 
1300     dest = tcg_temp_new();
1301     tcg_gen_sub_reg(dest, in1, in2);
1302 
1303     /* Compute signed overflow if required.  */
1304     sv = NULL;
1305     if (cond_need_sv(cf >> 1)) {
1306         sv = do_sub_sv(ctx, dest, in1, in2);
1307     }
1308 
1309     /* Form the condition for the compare.  */
1310     cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv);
1311 
1312     /* Clear.  */
1313     tcg_gen_movi_reg(dest, 0);
1314     save_gpr(ctx, rt, dest);
1315 
1316     /* Install the new nullification.  */
1317     cond_free(&ctx->null_cond);
1318     ctx->null_cond = cond;
1319 }
1320 
1321 static void do_log(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1322                    TCGv_reg in2, unsigned cf, bool d,
1323                    void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1324 {
1325     TCGv_reg dest = dest_gpr(ctx, rt);
1326 
1327     /* Perform the operation, and writeback.  */
1328     fn(dest, in1, in2);
1329     save_gpr(ctx, rt, dest);
1330 
1331     /* Install the new nullification.  */
1332     cond_free(&ctx->null_cond);
1333     if (cf) {
1334         ctx->null_cond = do_log_cond(ctx, cf, d, dest);
1335     }
1336 }
1337 
1338 static bool do_log_reg(DisasContext *ctx, arg_rrr_cf_d *a,
1339                        void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1340 {
1341     TCGv_reg tcg_r1, tcg_r2;
1342 
1343     if (a->cf) {
1344         nullify_over(ctx);
1345     }
1346     tcg_r1 = load_gpr(ctx, a->r1);
1347     tcg_r2 = load_gpr(ctx, a->r2);
1348     do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d, fn);
1349     return nullify_end(ctx);
1350 }
1351 
1352 static void do_unit(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1353                     TCGv_reg in2, unsigned cf, bool d, bool is_tc,
1354                     void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1355 {
1356     TCGv_reg dest;
1357     DisasCond cond;
1358 
1359     if (cf == 0) {
1360         dest = dest_gpr(ctx, rt);
1361         fn(dest, in1, in2);
1362         save_gpr(ctx, rt, dest);
1363         cond_free(&ctx->null_cond);
1364     } else {
1365         dest = tcg_temp_new();
1366         fn(dest, in1, in2);
1367 
1368         cond = do_unit_cond(cf, d, dest, in1, in2);
1369 
1370         if (is_tc) {
1371             TCGv_reg tmp = tcg_temp_new();
1372             tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1373             gen_helper_tcond(tcg_env, tmp);
1374         }
1375         save_gpr(ctx, rt, dest);
1376 
1377         cond_free(&ctx->null_cond);
1378         ctx->null_cond = cond;
1379     }
1380 }
1381 
1382 #ifndef CONFIG_USER_ONLY
1383 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1384    from the top 2 bits of the base register.  There are a few system
1385    instructions that have a 3-bit space specifier, for which SR0 is
1386    not special.  To handle this, pass ~SP.  */
1387 static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_reg base)
1388 {
1389     TCGv_ptr ptr;
1390     TCGv_reg tmp;
1391     TCGv_i64 spc;
1392 
1393     if (sp != 0) {
1394         if (sp < 0) {
1395             sp = ~sp;
1396         }
1397         spc = tcg_temp_new_tl();
1398         load_spr(ctx, spc, sp);
1399         return spc;
1400     }
1401     if (ctx->tb_flags & TB_FLAG_SR_SAME) {
1402         return cpu_srH;
1403     }
1404 
1405     ptr = tcg_temp_new_ptr();
1406     tmp = tcg_temp_new();
1407     spc = tcg_temp_new_tl();
1408 
1409     /* Extract top 2 bits of the address, shift left 3 for uint64_t index. */
1410     tcg_gen_shri_reg(tmp, base, (ctx->tb_flags & PSW_W ? 64 : 32) - 5);
1411     tcg_gen_andi_reg(tmp, tmp, 030);
1412     tcg_gen_trunc_reg_ptr(ptr, tmp);
1413 
1414     tcg_gen_add_ptr(ptr, ptr, tcg_env);
1415     tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
1416 
1417     return spc;
1418 }
1419 #endif
1420 
1421 static void form_gva(DisasContext *ctx, TCGv_tl *pgva, TCGv_reg *pofs,
1422                      unsigned rb, unsigned rx, int scale, int64_t disp,
1423                      unsigned sp, int modify, bool is_phys)
1424 {
1425     TCGv_reg base = load_gpr(ctx, rb);
1426     TCGv_reg ofs;
1427     TCGv_tl addr;
1428 
1429     /* Note that RX is mutually exclusive with DISP.  */
1430     if (rx) {
1431         ofs = tcg_temp_new();
1432         tcg_gen_shli_reg(ofs, cpu_gr[rx], scale);
1433         tcg_gen_add_reg(ofs, ofs, base);
1434     } else if (disp || modify) {
1435         ofs = tcg_temp_new();
1436         tcg_gen_addi_reg(ofs, base, disp);
1437     } else {
1438         ofs = base;
1439     }
1440 
1441     *pofs = ofs;
1442     *pgva = addr = tcg_temp_new_tl();
1443     tcg_gen_extu_reg_tl(addr, modify <= 0 ? ofs : base);
1444     tcg_gen_andi_tl(addr, addr, gva_offset_mask(ctx));
1445 #ifndef CONFIG_USER_ONLY
1446     if (!is_phys) {
1447         tcg_gen_or_tl(addr, addr, space_select(ctx, sp, base));
1448     }
1449 #endif
1450 }
1451 
1452 /* Emit a memory load.  The modify parameter should be
1453  * < 0 for pre-modify,
1454  * > 0 for post-modify,
1455  * = 0 for no base register update.
1456  */
1457 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
1458                        unsigned rx, int scale, int64_t disp,
1459                        unsigned sp, int modify, MemOp mop)
1460 {
1461     TCGv_reg ofs;
1462     TCGv_tl addr;
1463 
1464     /* Caller uses nullify_over/nullify_end.  */
1465     assert(ctx->null_cond.c == TCG_COND_NEVER);
1466 
1467     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1468              ctx->mmu_idx == MMU_PHYS_IDX);
1469     tcg_gen_qemu_ld_i32(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1470     if (modify) {
1471         save_gpr(ctx, rb, ofs);
1472     }
1473 }
1474 
1475 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1476                        unsigned rx, int scale, int64_t disp,
1477                        unsigned sp, int modify, MemOp mop)
1478 {
1479     TCGv_reg ofs;
1480     TCGv_tl addr;
1481 
1482     /* Caller uses nullify_over/nullify_end.  */
1483     assert(ctx->null_cond.c == TCG_COND_NEVER);
1484 
1485     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1486              ctx->mmu_idx == MMU_PHYS_IDX);
1487     tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1488     if (modify) {
1489         save_gpr(ctx, rb, ofs);
1490     }
1491 }
1492 
1493 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1494                         unsigned rx, int scale, int64_t disp,
1495                         unsigned sp, int modify, MemOp mop)
1496 {
1497     TCGv_reg ofs;
1498     TCGv_tl addr;
1499 
1500     /* Caller uses nullify_over/nullify_end.  */
1501     assert(ctx->null_cond.c == TCG_COND_NEVER);
1502 
1503     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1504              ctx->mmu_idx == MMU_PHYS_IDX);
1505     tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1506     if (modify) {
1507         save_gpr(ctx, rb, ofs);
1508     }
1509 }
1510 
1511 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1512                         unsigned rx, int scale, int64_t disp,
1513                         unsigned sp, int modify, MemOp mop)
1514 {
1515     TCGv_reg ofs;
1516     TCGv_tl addr;
1517 
1518     /* Caller uses nullify_over/nullify_end.  */
1519     assert(ctx->null_cond.c == TCG_COND_NEVER);
1520 
1521     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1522              ctx->mmu_idx == MMU_PHYS_IDX);
1523     tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1524     if (modify) {
1525         save_gpr(ctx, rb, ofs);
1526     }
1527 }
1528 
1529 #define do_load_reg   do_load_64
1530 #define do_store_reg  do_store_64
1531 
1532 static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1533                     unsigned rx, int scale, int64_t disp,
1534                     unsigned sp, int modify, MemOp mop)
1535 {
1536     TCGv_reg dest;
1537 
1538     nullify_over(ctx);
1539 
1540     if (modify == 0) {
1541         /* No base register update.  */
1542         dest = dest_gpr(ctx, rt);
1543     } else {
1544         /* Make sure if RT == RB, we see the result of the load.  */
1545         dest = tcg_temp_new();
1546     }
1547     do_load_reg(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
1548     save_gpr(ctx, rt, dest);
1549 
1550     return nullify_end(ctx);
1551 }
1552 
1553 static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1554                       unsigned rx, int scale, int64_t disp,
1555                       unsigned sp, int modify)
1556 {
1557     TCGv_i32 tmp;
1558 
1559     nullify_over(ctx);
1560 
1561     tmp = tcg_temp_new_i32();
1562     do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1563     save_frw_i32(rt, tmp);
1564 
1565     if (rt == 0) {
1566         gen_helper_loaded_fr0(tcg_env);
1567     }
1568 
1569     return nullify_end(ctx);
1570 }
1571 
1572 static bool trans_fldw(DisasContext *ctx, arg_ldst *a)
1573 {
1574     return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1575                      a->disp, a->sp, a->m);
1576 }
1577 
1578 static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1579                       unsigned rx, int scale, int64_t disp,
1580                       unsigned sp, int modify)
1581 {
1582     TCGv_i64 tmp;
1583 
1584     nullify_over(ctx);
1585 
1586     tmp = tcg_temp_new_i64();
1587     do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1588     save_frd(rt, tmp);
1589 
1590     if (rt == 0) {
1591         gen_helper_loaded_fr0(tcg_env);
1592     }
1593 
1594     return nullify_end(ctx);
1595 }
1596 
1597 static bool trans_fldd(DisasContext *ctx, arg_ldst *a)
1598 {
1599     return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1600                      a->disp, a->sp, a->m);
1601 }
1602 
1603 static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1604                      int64_t disp, unsigned sp,
1605                      int modify, MemOp mop)
1606 {
1607     nullify_over(ctx);
1608     do_store_reg(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
1609     return nullify_end(ctx);
1610 }
1611 
1612 static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1613                        unsigned rx, int scale, int64_t disp,
1614                        unsigned sp, int modify)
1615 {
1616     TCGv_i32 tmp;
1617 
1618     nullify_over(ctx);
1619 
1620     tmp = load_frw_i32(rt);
1621     do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1622 
1623     return nullify_end(ctx);
1624 }
1625 
1626 static bool trans_fstw(DisasContext *ctx, arg_ldst *a)
1627 {
1628     return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1629                       a->disp, a->sp, a->m);
1630 }
1631 
1632 static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1633                        unsigned rx, int scale, int64_t disp,
1634                        unsigned sp, int modify)
1635 {
1636     TCGv_i64 tmp;
1637 
1638     nullify_over(ctx);
1639 
1640     tmp = load_frd(rt);
1641     do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1642 
1643     return nullify_end(ctx);
1644 }
1645 
1646 static bool trans_fstd(DisasContext *ctx, arg_ldst *a)
1647 {
1648     return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1649                       a->disp, a->sp, a->m);
1650 }
1651 
1652 static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
1653                        void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
1654 {
1655     TCGv_i32 tmp;
1656 
1657     nullify_over(ctx);
1658     tmp = load_frw0_i32(ra);
1659 
1660     func(tmp, tcg_env, tmp);
1661 
1662     save_frw_i32(rt, tmp);
1663     return nullify_end(ctx);
1664 }
1665 
1666 static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
1667                        void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
1668 {
1669     TCGv_i32 dst;
1670     TCGv_i64 src;
1671 
1672     nullify_over(ctx);
1673     src = load_frd(ra);
1674     dst = tcg_temp_new_i32();
1675 
1676     func(dst, tcg_env, src);
1677 
1678     save_frw_i32(rt, dst);
1679     return nullify_end(ctx);
1680 }
1681 
1682 static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
1683                        void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
1684 {
1685     TCGv_i64 tmp;
1686 
1687     nullify_over(ctx);
1688     tmp = load_frd0(ra);
1689 
1690     func(tmp, tcg_env, tmp);
1691 
1692     save_frd(rt, tmp);
1693     return nullify_end(ctx);
1694 }
1695 
1696 static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
1697                        void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
1698 {
1699     TCGv_i32 src;
1700     TCGv_i64 dst;
1701 
1702     nullify_over(ctx);
1703     src = load_frw0_i32(ra);
1704     dst = tcg_temp_new_i64();
1705 
1706     func(dst, tcg_env, src);
1707 
1708     save_frd(rt, dst);
1709     return nullify_end(ctx);
1710 }
1711 
1712 static bool do_fop_weww(DisasContext *ctx, unsigned rt,
1713                         unsigned ra, unsigned rb,
1714                         void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
1715 {
1716     TCGv_i32 a, b;
1717 
1718     nullify_over(ctx);
1719     a = load_frw0_i32(ra);
1720     b = load_frw0_i32(rb);
1721 
1722     func(a, tcg_env, a, b);
1723 
1724     save_frw_i32(rt, a);
1725     return nullify_end(ctx);
1726 }
1727 
1728 static bool do_fop_dedd(DisasContext *ctx, unsigned rt,
1729                         unsigned ra, unsigned rb,
1730                         void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
1731 {
1732     TCGv_i64 a, b;
1733 
1734     nullify_over(ctx);
1735     a = load_frd0(ra);
1736     b = load_frd0(rb);
1737 
1738     func(a, tcg_env, a, b);
1739 
1740     save_frd(rt, a);
1741     return nullify_end(ctx);
1742 }
1743 
1744 /* Emit an unconditional branch to a direct target, which may or may not
1745    have already had nullification handled.  */
1746 static bool do_dbranch(DisasContext *ctx, uint64_t dest,
1747                        unsigned link, bool is_n)
1748 {
1749     if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1750         if (link != 0) {
1751             copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1752         }
1753         ctx->iaoq_n = dest;
1754         if (is_n) {
1755             ctx->null_cond.c = TCG_COND_ALWAYS;
1756         }
1757     } else {
1758         nullify_over(ctx);
1759 
1760         if (link != 0) {
1761             copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1762         }
1763 
1764         if (is_n && use_nullify_skip(ctx)) {
1765             nullify_set(ctx, 0);
1766             gen_goto_tb(ctx, 0, dest, dest + 4);
1767         } else {
1768             nullify_set(ctx, is_n);
1769             gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1770         }
1771 
1772         nullify_end(ctx);
1773 
1774         nullify_set(ctx, 0);
1775         gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
1776         ctx->base.is_jmp = DISAS_NORETURN;
1777     }
1778     return true;
1779 }
1780 
1781 /* Emit a conditional branch to a direct target.  If the branch itself
1782    is nullified, we should have already used nullify_over.  */
1783 static bool do_cbranch(DisasContext *ctx, int64_t disp, bool is_n,
1784                        DisasCond *cond)
1785 {
1786     uint64_t dest = iaoq_dest(ctx, disp);
1787     TCGLabel *taken = NULL;
1788     TCGCond c = cond->c;
1789     bool n;
1790 
1791     assert(ctx->null_cond.c == TCG_COND_NEVER);
1792 
1793     /* Handle TRUE and NEVER as direct branches.  */
1794     if (c == TCG_COND_ALWAYS) {
1795         return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
1796     }
1797     if (c == TCG_COND_NEVER) {
1798         return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
1799     }
1800 
1801     taken = gen_new_label();
1802     tcg_gen_brcond_reg(c, cond->a0, cond->a1, taken);
1803     cond_free(cond);
1804 
1805     /* Not taken: Condition not satisfied; nullify on backward branches. */
1806     n = is_n && disp < 0;
1807     if (n && use_nullify_skip(ctx)) {
1808         nullify_set(ctx, 0);
1809         gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
1810     } else {
1811         if (!n && ctx->null_lab) {
1812             gen_set_label(ctx->null_lab);
1813             ctx->null_lab = NULL;
1814         }
1815         nullify_set(ctx, n);
1816         if (ctx->iaoq_n == -1) {
1817             /* The temporary iaoq_n_var died at the branch above.
1818                Regenerate it here instead of saving it.  */
1819             tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
1820         }
1821         gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
1822     }
1823 
1824     gen_set_label(taken);
1825 
1826     /* Taken: Condition satisfied; nullify on forward branches.  */
1827     n = is_n && disp >= 0;
1828     if (n && use_nullify_skip(ctx)) {
1829         nullify_set(ctx, 0);
1830         gen_goto_tb(ctx, 1, dest, dest + 4);
1831     } else {
1832         nullify_set(ctx, n);
1833         gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
1834     }
1835 
1836     /* Not taken: the branch itself was nullified.  */
1837     if (ctx->null_lab) {
1838         gen_set_label(ctx->null_lab);
1839         ctx->null_lab = NULL;
1840         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
1841     } else {
1842         ctx->base.is_jmp = DISAS_NORETURN;
1843     }
1844     return true;
1845 }
1846 
1847 /* Emit an unconditional branch to an indirect target.  This handles
1848    nullification of the branch itself.  */
1849 static bool do_ibranch(DisasContext *ctx, TCGv_reg dest,
1850                        unsigned link, bool is_n)
1851 {
1852     TCGv_reg a0, a1, next, tmp;
1853     TCGCond c;
1854 
1855     assert(ctx->null_lab == NULL);
1856 
1857     if (ctx->null_cond.c == TCG_COND_NEVER) {
1858         if (link != 0) {
1859             copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1860         }
1861         next = tcg_temp_new();
1862         tcg_gen_mov_reg(next, dest);
1863         if (is_n) {
1864             if (use_nullify_skip(ctx)) {
1865                 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, next);
1866                 tcg_gen_addi_reg(next, next, 4);
1867                 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, next);
1868                 nullify_set(ctx, 0);
1869                 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1870                 return true;
1871             }
1872             ctx->null_cond.c = TCG_COND_ALWAYS;
1873         }
1874         ctx->iaoq_n = -1;
1875         ctx->iaoq_n_var = next;
1876     } else if (is_n && use_nullify_skip(ctx)) {
1877         /* The (conditional) branch, B, nullifies the next insn, N,
1878            and we're allowed to skip execution N (no single-step or
1879            tracepoint in effect).  Since the goto_ptr that we must use
1880            for the indirect branch consumes no special resources, we
1881            can (conditionally) skip B and continue execution.  */
1882         /* The use_nullify_skip test implies we have a known control path.  */
1883         tcg_debug_assert(ctx->iaoq_b != -1);
1884         tcg_debug_assert(ctx->iaoq_n != -1);
1885 
1886         /* We do have to handle the non-local temporary, DEST, before
1887            branching.  Since IOAQ_F is not really live at this point, we
1888            can simply store DEST optimistically.  Similarly with IAOQ_B.  */
1889         copy_iaoq_entry(ctx, cpu_iaoq_f, -1, dest);
1890         next = tcg_temp_new();
1891         tcg_gen_addi_reg(next, dest, 4);
1892         copy_iaoq_entry(ctx, cpu_iaoq_b, -1, next);
1893 
1894         nullify_over(ctx);
1895         if (link != 0) {
1896             copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1897         }
1898         tcg_gen_lookup_and_goto_ptr();
1899         return nullify_end(ctx);
1900     } else {
1901         c = ctx->null_cond.c;
1902         a0 = ctx->null_cond.a0;
1903         a1 = ctx->null_cond.a1;
1904 
1905         tmp = tcg_temp_new();
1906         next = tcg_temp_new();
1907 
1908         copy_iaoq_entry(ctx, tmp, ctx->iaoq_n, ctx->iaoq_n_var);
1909         tcg_gen_movcond_reg(c, next, a0, a1, tmp, dest);
1910         ctx->iaoq_n = -1;
1911         ctx->iaoq_n_var = next;
1912 
1913         if (link != 0) {
1914             tcg_gen_movcond_reg(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
1915         }
1916 
1917         if (is_n) {
1918             /* The branch nullifies the next insn, which means the state of N
1919                after the branch is the inverse of the state of N that applied
1920                to the branch.  */
1921             tcg_gen_setcond_reg(tcg_invert_cond(c), cpu_psw_n, a0, a1);
1922             cond_free(&ctx->null_cond);
1923             ctx->null_cond = cond_make_n();
1924             ctx->psw_n_nonzero = true;
1925         } else {
1926             cond_free(&ctx->null_cond);
1927         }
1928     }
1929     return true;
1930 }
1931 
1932 /* Implement
1933  *    if (IAOQ_Front{30..31} < GR[b]{30..31})
1934  *      IAOQ_Next{30..31} ← GR[b]{30..31};
1935  *    else
1936  *      IAOQ_Next{30..31} ← IAOQ_Front{30..31};
1937  * which keeps the privilege level from being increased.
1938  */
1939 static TCGv_reg do_ibranch_priv(DisasContext *ctx, TCGv_reg offset)
1940 {
1941     TCGv_reg dest;
1942     switch (ctx->privilege) {
1943     case 0:
1944         /* Privilege 0 is maximum and is allowed to decrease.  */
1945         return offset;
1946     case 3:
1947         /* Privilege 3 is minimum and is never allowed to increase.  */
1948         dest = tcg_temp_new();
1949         tcg_gen_ori_reg(dest, offset, 3);
1950         break;
1951     default:
1952         dest = tcg_temp_new();
1953         tcg_gen_andi_reg(dest, offset, -4);
1954         tcg_gen_ori_reg(dest, dest, ctx->privilege);
1955         tcg_gen_movcond_reg(TCG_COND_GTU, dest, dest, offset, dest, offset);
1956         break;
1957     }
1958     return dest;
1959 }
1960 
1961 #ifdef CONFIG_USER_ONLY
1962 /* On Linux, page zero is normally marked execute only + gateway.
1963    Therefore normal read or write is supposed to fail, but specific
1964    offsets have kernel code mapped to raise permissions to implement
1965    system calls.  Handling this via an explicit check here, rather
1966    in than the "be disp(sr2,r0)" instruction that probably sent us
1967    here, is the easiest way to handle the branch delay slot on the
1968    aforementioned BE.  */
1969 static void do_page_zero(DisasContext *ctx)
1970 {
1971     TCGv_reg tmp;
1972 
1973     /* If by some means we get here with PSW[N]=1, that implies that
1974        the B,GATE instruction would be skipped, and we'd fault on the
1975        next insn within the privileged page.  */
1976     switch (ctx->null_cond.c) {
1977     case TCG_COND_NEVER:
1978         break;
1979     case TCG_COND_ALWAYS:
1980         tcg_gen_movi_reg(cpu_psw_n, 0);
1981         goto do_sigill;
1982     default:
1983         /* Since this is always the first (and only) insn within the
1984            TB, we should know the state of PSW[N] from TB->FLAGS.  */
1985         g_assert_not_reached();
1986     }
1987 
1988     /* Check that we didn't arrive here via some means that allowed
1989        non-sequential instruction execution.  Normally the PSW[B] bit
1990        detects this by disallowing the B,GATE instruction to execute
1991        under such conditions.  */
1992     if (ctx->iaoq_b != ctx->iaoq_f + 4) {
1993         goto do_sigill;
1994     }
1995 
1996     switch (ctx->iaoq_f & -4) {
1997     case 0x00: /* Null pointer call */
1998         gen_excp_1(EXCP_IMP);
1999         ctx->base.is_jmp = DISAS_NORETURN;
2000         break;
2001 
2002     case 0xb0: /* LWS */
2003         gen_excp_1(EXCP_SYSCALL_LWS);
2004         ctx->base.is_jmp = DISAS_NORETURN;
2005         break;
2006 
2007     case 0xe0: /* SET_THREAD_POINTER */
2008         tcg_gen_st_reg(cpu_gr[26], tcg_env, offsetof(CPUHPPAState, cr[27]));
2009         tmp = tcg_temp_new();
2010         tcg_gen_ori_reg(tmp, cpu_gr[31], 3);
2011         copy_iaoq_entry(ctx, cpu_iaoq_f, -1, tmp);
2012         tcg_gen_addi_reg(tmp, tmp, 4);
2013         copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
2014         ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
2015         break;
2016 
2017     case 0x100: /* SYSCALL */
2018         gen_excp_1(EXCP_SYSCALL);
2019         ctx->base.is_jmp = DISAS_NORETURN;
2020         break;
2021 
2022     default:
2023     do_sigill:
2024         gen_excp_1(EXCP_ILL);
2025         ctx->base.is_jmp = DISAS_NORETURN;
2026         break;
2027     }
2028 }
2029 #endif
2030 
2031 static bool trans_nop(DisasContext *ctx, arg_nop *a)
2032 {
2033     cond_free(&ctx->null_cond);
2034     return true;
2035 }
2036 
2037 static bool trans_break(DisasContext *ctx, arg_break *a)
2038 {
2039     return gen_excp_iir(ctx, EXCP_BREAK);
2040 }
2041 
2042 static bool trans_sync(DisasContext *ctx, arg_sync *a)
2043 {
2044     /* No point in nullifying the memory barrier.  */
2045     tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
2046 
2047     cond_free(&ctx->null_cond);
2048     return true;
2049 }
2050 
2051 static bool trans_mfia(DisasContext *ctx, arg_mfia *a)
2052 {
2053     unsigned rt = a->t;
2054     TCGv_reg tmp = dest_gpr(ctx, rt);
2055     tcg_gen_movi_reg(tmp, ctx->iaoq_f);
2056     save_gpr(ctx, rt, tmp);
2057 
2058     cond_free(&ctx->null_cond);
2059     return true;
2060 }
2061 
2062 static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a)
2063 {
2064     unsigned rt = a->t;
2065     unsigned rs = a->sp;
2066     TCGv_i64 t0 = tcg_temp_new_i64();
2067     TCGv_reg t1 = tcg_temp_new();
2068 
2069     load_spr(ctx, t0, rs);
2070     tcg_gen_shri_i64(t0, t0, 32);
2071     tcg_gen_trunc_i64_reg(t1, t0);
2072 
2073     save_gpr(ctx, rt, t1);
2074 
2075     cond_free(&ctx->null_cond);
2076     return true;
2077 }
2078 
2079 static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
2080 {
2081     unsigned rt = a->t;
2082     unsigned ctl = a->r;
2083     TCGv_reg tmp;
2084 
2085     switch (ctl) {
2086     case CR_SAR:
2087         if (a->e == 0) {
2088             /* MFSAR without ,W masks low 5 bits.  */
2089             tmp = dest_gpr(ctx, rt);
2090             tcg_gen_andi_reg(tmp, cpu_sar, 31);
2091             save_gpr(ctx, rt, tmp);
2092             goto done;
2093         }
2094         save_gpr(ctx, rt, cpu_sar);
2095         goto done;
2096     case CR_IT: /* Interval Timer */
2097         /* FIXME: Respect PSW_S bit.  */
2098         nullify_over(ctx);
2099         tmp = dest_gpr(ctx, rt);
2100         if (translator_io_start(&ctx->base)) {
2101             gen_helper_read_interval_timer(tmp);
2102             ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2103         } else {
2104             gen_helper_read_interval_timer(tmp);
2105         }
2106         save_gpr(ctx, rt, tmp);
2107         return nullify_end(ctx);
2108     case 26:
2109     case 27:
2110         break;
2111     default:
2112         /* All other control registers are privileged.  */
2113         CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2114         break;
2115     }
2116 
2117     tmp = tcg_temp_new();
2118     tcg_gen_ld_reg(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2119     save_gpr(ctx, rt, tmp);
2120 
2121  done:
2122     cond_free(&ctx->null_cond);
2123     return true;
2124 }
2125 
2126 static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a)
2127 {
2128     unsigned rr = a->r;
2129     unsigned rs = a->sp;
2130     TCGv_i64 t64;
2131 
2132     if (rs >= 5) {
2133         CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2134     }
2135     nullify_over(ctx);
2136 
2137     t64 = tcg_temp_new_i64();
2138     tcg_gen_extu_reg_i64(t64, load_gpr(ctx, rr));
2139     tcg_gen_shli_i64(t64, t64, 32);
2140 
2141     if (rs >= 4) {
2142         tcg_gen_st_i64(t64, tcg_env, offsetof(CPUHPPAState, sr[rs]));
2143         ctx->tb_flags &= ~TB_FLAG_SR_SAME;
2144     } else {
2145         tcg_gen_mov_i64(cpu_sr[rs], t64);
2146     }
2147 
2148     return nullify_end(ctx);
2149 }
2150 
2151 static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
2152 {
2153     unsigned ctl = a->t;
2154     TCGv_reg reg;
2155     TCGv_reg tmp;
2156 
2157     if (ctl == CR_SAR) {
2158         reg = load_gpr(ctx, a->r);
2159         tmp = tcg_temp_new();
2160         tcg_gen_andi_reg(tmp, reg, ctx->is_pa20 ? 63 : 31);
2161         save_or_nullify(ctx, cpu_sar, tmp);
2162 
2163         cond_free(&ctx->null_cond);
2164         return true;
2165     }
2166 
2167     /* All other control registers are privileged or read-only.  */
2168     CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2169 
2170 #ifndef CONFIG_USER_ONLY
2171     nullify_over(ctx);
2172     reg = load_gpr(ctx, a->r);
2173 
2174     switch (ctl) {
2175     case CR_IT:
2176         gen_helper_write_interval_timer(tcg_env, reg);
2177         break;
2178     case CR_EIRR:
2179         gen_helper_write_eirr(tcg_env, reg);
2180         break;
2181     case CR_EIEM:
2182         gen_helper_write_eiem(tcg_env, reg);
2183         ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2184         break;
2185 
2186     case CR_IIASQ:
2187     case CR_IIAOQ:
2188         /* FIXME: Respect PSW_Q bit */
2189         /* The write advances the queue and stores to the back element.  */
2190         tmp = tcg_temp_new();
2191         tcg_gen_ld_reg(tmp, tcg_env,
2192                        offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2193         tcg_gen_st_reg(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2194         tcg_gen_st_reg(reg, tcg_env,
2195                        offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2196         break;
2197 
2198     case CR_PID1:
2199     case CR_PID2:
2200     case CR_PID3:
2201     case CR_PID4:
2202         tcg_gen_st_reg(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2203 #ifndef CONFIG_USER_ONLY
2204         gen_helper_change_prot_id(tcg_env);
2205 #endif
2206         break;
2207 
2208     default:
2209         tcg_gen_st_reg(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2210         break;
2211     }
2212     return nullify_end(ctx);
2213 #endif
2214 }
2215 
2216 static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a)
2217 {
2218     TCGv_reg tmp = tcg_temp_new();
2219 
2220     tcg_gen_not_reg(tmp, load_gpr(ctx, a->r));
2221     tcg_gen_andi_reg(tmp, tmp, ctx->is_pa20 ? 63 : 31);
2222     save_or_nullify(ctx, cpu_sar, tmp);
2223 
2224     cond_free(&ctx->null_cond);
2225     return true;
2226 }
2227 
2228 static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a)
2229 {
2230     TCGv_reg dest = dest_gpr(ctx, a->t);
2231 
2232 #ifdef CONFIG_USER_ONLY
2233     /* We don't implement space registers in user mode. */
2234     tcg_gen_movi_reg(dest, 0);
2235 #else
2236     TCGv_i64 t0 = tcg_temp_new_i64();
2237 
2238     tcg_gen_mov_i64(t0, space_select(ctx, a->sp, load_gpr(ctx, a->b)));
2239     tcg_gen_shri_i64(t0, t0, 32);
2240     tcg_gen_trunc_i64_reg(dest, t0);
2241 #endif
2242     save_gpr(ctx, a->t, dest);
2243 
2244     cond_free(&ctx->null_cond);
2245     return true;
2246 }
2247 
2248 static bool trans_rsm(DisasContext *ctx, arg_rsm *a)
2249 {
2250     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2251 #ifndef CONFIG_USER_ONLY
2252     TCGv_reg tmp;
2253 
2254     nullify_over(ctx);
2255 
2256     tmp = tcg_temp_new();
2257     tcg_gen_ld_reg(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2258     tcg_gen_andi_reg(tmp, tmp, ~a->i);
2259     gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2260     save_gpr(ctx, a->t, tmp);
2261 
2262     /* Exit the TB to recognize new interrupts, e.g. PSW_M.  */
2263     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2264     return nullify_end(ctx);
2265 #endif
2266 }
2267 
2268 static bool trans_ssm(DisasContext *ctx, arg_ssm *a)
2269 {
2270     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2271 #ifndef CONFIG_USER_ONLY
2272     TCGv_reg tmp;
2273 
2274     nullify_over(ctx);
2275 
2276     tmp = tcg_temp_new();
2277     tcg_gen_ld_reg(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2278     tcg_gen_ori_reg(tmp, tmp, a->i);
2279     gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2280     save_gpr(ctx, a->t, tmp);
2281 
2282     /* Exit the TB to recognize new interrupts, e.g. PSW_I.  */
2283     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2284     return nullify_end(ctx);
2285 #endif
2286 }
2287 
2288 static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a)
2289 {
2290     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2291 #ifndef CONFIG_USER_ONLY
2292     TCGv_reg tmp, reg;
2293     nullify_over(ctx);
2294 
2295     reg = load_gpr(ctx, a->r);
2296     tmp = tcg_temp_new();
2297     gen_helper_swap_system_mask(tmp, tcg_env, reg);
2298 
2299     /* Exit the TB to recognize new interrupts.  */
2300     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2301     return nullify_end(ctx);
2302 #endif
2303 }
2304 
2305 static bool do_rfi(DisasContext *ctx, bool rfi_r)
2306 {
2307     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2308 #ifndef CONFIG_USER_ONLY
2309     nullify_over(ctx);
2310 
2311     if (rfi_r) {
2312         gen_helper_rfi_r(tcg_env);
2313     } else {
2314         gen_helper_rfi(tcg_env);
2315     }
2316     /* Exit the TB to recognize new interrupts.  */
2317     tcg_gen_exit_tb(NULL, 0);
2318     ctx->base.is_jmp = DISAS_NORETURN;
2319 
2320     return nullify_end(ctx);
2321 #endif
2322 }
2323 
2324 static bool trans_rfi(DisasContext *ctx, arg_rfi *a)
2325 {
2326     return do_rfi(ctx, false);
2327 }
2328 
2329 static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a)
2330 {
2331     return do_rfi(ctx, true);
2332 }
2333 
2334 static bool trans_halt(DisasContext *ctx, arg_halt *a)
2335 {
2336     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2337 #ifndef CONFIG_USER_ONLY
2338     nullify_over(ctx);
2339     gen_helper_halt(tcg_env);
2340     ctx->base.is_jmp = DISAS_NORETURN;
2341     return nullify_end(ctx);
2342 #endif
2343 }
2344 
2345 static bool trans_reset(DisasContext *ctx, arg_reset *a)
2346 {
2347     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2348 #ifndef CONFIG_USER_ONLY
2349     nullify_over(ctx);
2350     gen_helper_reset(tcg_env);
2351     ctx->base.is_jmp = DISAS_NORETURN;
2352     return nullify_end(ctx);
2353 #endif
2354 }
2355 
2356 static bool trans_getshadowregs(DisasContext *ctx, arg_getshadowregs *a)
2357 {
2358     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2359 #ifndef CONFIG_USER_ONLY
2360     nullify_over(ctx);
2361     gen_helper_getshadowregs(tcg_env);
2362     return nullify_end(ctx);
2363 #endif
2364 }
2365 
2366 static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a)
2367 {
2368     if (a->m) {
2369         TCGv_reg dest = dest_gpr(ctx, a->b);
2370         TCGv_reg src1 = load_gpr(ctx, a->b);
2371         TCGv_reg src2 = load_gpr(ctx, a->x);
2372 
2373         /* The only thing we need to do is the base register modification.  */
2374         tcg_gen_add_reg(dest, src1, src2);
2375         save_gpr(ctx, a->b, dest);
2376     }
2377     cond_free(&ctx->null_cond);
2378     return true;
2379 }
2380 
2381 static bool trans_probe(DisasContext *ctx, arg_probe *a)
2382 {
2383     TCGv_reg dest, ofs;
2384     TCGv_i32 level, want;
2385     TCGv_tl addr;
2386 
2387     nullify_over(ctx);
2388 
2389     dest = dest_gpr(ctx, a->t);
2390     form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2391 
2392     if (a->imm) {
2393         level = tcg_constant_i32(a->ri);
2394     } else {
2395         level = tcg_temp_new_i32();
2396         tcg_gen_trunc_reg_i32(level, load_gpr(ctx, a->ri));
2397         tcg_gen_andi_i32(level, level, 3);
2398     }
2399     want = tcg_constant_i32(a->write ? PAGE_WRITE : PAGE_READ);
2400 
2401     gen_helper_probe(dest, tcg_env, addr, level, want);
2402 
2403     save_gpr(ctx, a->t, dest);
2404     return nullify_end(ctx);
2405 }
2406 
2407 static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a)
2408 {
2409     if (ctx->is_pa20) {
2410         return false;
2411     }
2412     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2413 #ifndef CONFIG_USER_ONLY
2414     TCGv_tl addr;
2415     TCGv_reg ofs, reg;
2416 
2417     nullify_over(ctx);
2418 
2419     form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2420     reg = load_gpr(ctx, a->r);
2421     if (a->addr) {
2422         gen_helper_itlba_pa11(tcg_env, addr, reg);
2423     } else {
2424         gen_helper_itlbp_pa11(tcg_env, addr, reg);
2425     }
2426 
2427     /* Exit TB for TLB change if mmu is enabled.  */
2428     if (ctx->tb_flags & PSW_C) {
2429         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2430     }
2431     return nullify_end(ctx);
2432 #endif
2433 }
2434 
2435 static bool trans_pxtlbx(DisasContext *ctx, arg_pxtlbx *a)
2436 {
2437     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2438 #ifndef CONFIG_USER_ONLY
2439     TCGv_tl addr;
2440     TCGv_reg ofs;
2441 
2442     nullify_over(ctx);
2443 
2444     form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2445     if (a->m) {
2446         save_gpr(ctx, a->b, ofs);
2447     }
2448     if (a->local) {
2449         gen_helper_ptlbe(tcg_env);
2450     } else {
2451         gen_helper_ptlb(tcg_env, addr);
2452     }
2453 
2454     /* Exit TB for TLB change if mmu is enabled.  */
2455     if (ctx->tb_flags & PSW_C) {
2456         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2457     }
2458     return nullify_end(ctx);
2459 #endif
2460 }
2461 
2462 /*
2463  * Implement the pcxl and pcxl2 Fast TLB Insert instructions.
2464  * See
2465  *     https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf
2466  *     page 13-9 (195/206)
2467  */
2468 static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a)
2469 {
2470     if (ctx->is_pa20) {
2471         return false;
2472     }
2473     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2474 #ifndef CONFIG_USER_ONLY
2475     TCGv_tl addr, atl, stl;
2476     TCGv_reg reg;
2477 
2478     nullify_over(ctx);
2479 
2480     /*
2481      * FIXME:
2482      *  if (not (pcxl or pcxl2))
2483      *    return gen_illegal(ctx);
2484      */
2485 
2486     atl = tcg_temp_new_tl();
2487     stl = tcg_temp_new_tl();
2488     addr = tcg_temp_new_tl();
2489 
2490     tcg_gen_ld32u_i64(stl, tcg_env,
2491                       a->data ? offsetof(CPUHPPAState, cr[CR_ISR])
2492                       : offsetof(CPUHPPAState, cr[CR_IIASQ]));
2493     tcg_gen_ld32u_i64(atl, tcg_env,
2494                       a->data ? offsetof(CPUHPPAState, cr[CR_IOR])
2495                       : offsetof(CPUHPPAState, cr[CR_IIAOQ]));
2496     tcg_gen_shli_i64(stl, stl, 32);
2497     tcg_gen_or_tl(addr, atl, stl);
2498 
2499     reg = load_gpr(ctx, a->r);
2500     if (a->addr) {
2501         gen_helper_itlba_pa11(tcg_env, addr, reg);
2502     } else {
2503         gen_helper_itlbp_pa11(tcg_env, addr, reg);
2504     }
2505 
2506     /* Exit TB for TLB change if mmu is enabled.  */
2507     if (ctx->tb_flags & PSW_C) {
2508         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2509     }
2510     return nullify_end(ctx);
2511 #endif
2512 }
2513 
2514 static bool trans_ixtlbt(DisasContext *ctx, arg_ixtlbt *a)
2515 {
2516     if (!ctx->is_pa20) {
2517         return false;
2518     }
2519     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2520 #ifndef CONFIG_USER_ONLY
2521     nullify_over(ctx);
2522     {
2523         TCGv_i64 src1 = load_gpr(ctx, a->r1);
2524         TCGv_i64 src2 = load_gpr(ctx, a->r2);
2525 
2526         if (a->data) {
2527             gen_helper_idtlbt_pa20(tcg_env, src1, src2);
2528         } else {
2529             gen_helper_iitlbt_pa20(tcg_env, src1, src2);
2530         }
2531     }
2532     /* Exit TB for TLB change if mmu is enabled.  */
2533     if (ctx->tb_flags & PSW_C) {
2534         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2535     }
2536     return nullify_end(ctx);
2537 #endif
2538 }
2539 
2540 static bool trans_lpa(DisasContext *ctx, arg_ldst *a)
2541 {
2542     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2543 #ifndef CONFIG_USER_ONLY
2544     TCGv_tl vaddr;
2545     TCGv_reg ofs, paddr;
2546 
2547     nullify_over(ctx);
2548 
2549     form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2550 
2551     paddr = tcg_temp_new();
2552     gen_helper_lpa(paddr, tcg_env, vaddr);
2553 
2554     /* Note that physical address result overrides base modification.  */
2555     if (a->m) {
2556         save_gpr(ctx, a->b, ofs);
2557     }
2558     save_gpr(ctx, a->t, paddr);
2559 
2560     return nullify_end(ctx);
2561 #endif
2562 }
2563 
2564 static bool trans_lci(DisasContext *ctx, arg_lci *a)
2565 {
2566     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2567 
2568     /* The Coherence Index is an implementation-defined function of the
2569        physical address.  Two addresses with the same CI have a coherent
2570        view of the cache.  Our implementation is to return 0 for all,
2571        since the entire address space is coherent.  */
2572     save_gpr(ctx, a->t, tcg_constant_reg(0));
2573 
2574     cond_free(&ctx->null_cond);
2575     return true;
2576 }
2577 
2578 static bool trans_add(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2579 {
2580     return do_add_reg(ctx, a, false, false, false, false);
2581 }
2582 
2583 static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2584 {
2585     return do_add_reg(ctx, a, true, false, false, false);
2586 }
2587 
2588 static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2589 {
2590     return do_add_reg(ctx, a, false, true, false, false);
2591 }
2592 
2593 static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2594 {
2595     return do_add_reg(ctx, a, false, false, false, true);
2596 }
2597 
2598 static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2599 {
2600     return do_add_reg(ctx, a, false, true, false, true);
2601 }
2602 
2603 static bool trans_sub(DisasContext *ctx, arg_rrr_cf_d *a)
2604 {
2605     return do_sub_reg(ctx, a, false, false, false);
2606 }
2607 
2608 static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf_d *a)
2609 {
2610     return do_sub_reg(ctx, a, true, false, false);
2611 }
2612 
2613 static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2614 {
2615     return do_sub_reg(ctx, a, false, false, true);
2616 }
2617 
2618 static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2619 {
2620     return do_sub_reg(ctx, a, true, false, true);
2621 }
2622 
2623 static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf_d *a)
2624 {
2625     return do_sub_reg(ctx, a, false, true, false);
2626 }
2627 
2628 static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf_d *a)
2629 {
2630     return do_sub_reg(ctx, a, true, true, false);
2631 }
2632 
2633 static bool trans_andcm(DisasContext *ctx, arg_rrr_cf_d *a)
2634 {
2635     return do_log_reg(ctx, a, tcg_gen_andc_reg);
2636 }
2637 
2638 static bool trans_and(DisasContext *ctx, arg_rrr_cf_d *a)
2639 {
2640     return do_log_reg(ctx, a, tcg_gen_and_reg);
2641 }
2642 
2643 static bool trans_or(DisasContext *ctx, arg_rrr_cf_d *a)
2644 {
2645     if (a->cf == 0) {
2646         unsigned r2 = a->r2;
2647         unsigned r1 = a->r1;
2648         unsigned rt = a->t;
2649 
2650         if (rt == 0) { /* NOP */
2651             cond_free(&ctx->null_cond);
2652             return true;
2653         }
2654         if (r2 == 0) { /* COPY */
2655             if (r1 == 0) {
2656                 TCGv_reg dest = dest_gpr(ctx, rt);
2657                 tcg_gen_movi_reg(dest, 0);
2658                 save_gpr(ctx, rt, dest);
2659             } else {
2660                 save_gpr(ctx, rt, cpu_gr[r1]);
2661             }
2662             cond_free(&ctx->null_cond);
2663             return true;
2664         }
2665 #ifndef CONFIG_USER_ONLY
2666         /* These are QEMU extensions and are nops in the real architecture:
2667          *
2668          * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2669          * or %r31,%r31,%r31 -- death loop; offline cpu
2670          *                      currently implemented as idle.
2671          */
2672         if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */
2673             /* No need to check for supervisor, as userland can only pause
2674                until the next timer interrupt.  */
2675             nullify_over(ctx);
2676 
2677             /* Advance the instruction queue.  */
2678             copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
2679             copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
2680             nullify_set(ctx, 0);
2681 
2682             /* Tell the qemu main loop to halt until this cpu has work.  */
2683             tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
2684                            offsetof(CPUState, halted) - offsetof(HPPACPU, env));
2685             gen_excp_1(EXCP_HALTED);
2686             ctx->base.is_jmp = DISAS_NORETURN;
2687 
2688             return nullify_end(ctx);
2689         }
2690 #endif
2691     }
2692     return do_log_reg(ctx, a, tcg_gen_or_reg);
2693 }
2694 
2695 static bool trans_xor(DisasContext *ctx, arg_rrr_cf_d *a)
2696 {
2697     return do_log_reg(ctx, a, tcg_gen_xor_reg);
2698 }
2699 
2700 static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf_d *a)
2701 {
2702     TCGv_reg tcg_r1, tcg_r2;
2703 
2704     if (a->cf) {
2705         nullify_over(ctx);
2706     }
2707     tcg_r1 = load_gpr(ctx, a->r1);
2708     tcg_r2 = load_gpr(ctx, a->r2);
2709     do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d);
2710     return nullify_end(ctx);
2711 }
2712 
2713 static bool trans_uxor(DisasContext *ctx, arg_rrr_cf_d *a)
2714 {
2715     TCGv_reg tcg_r1, tcg_r2;
2716 
2717     if (a->cf) {
2718         nullify_over(ctx);
2719     }
2720     tcg_r1 = load_gpr(ctx, a->r1);
2721     tcg_r2 = load_gpr(ctx, a->r2);
2722     do_unit(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d, false, tcg_gen_xor_reg);
2723     return nullify_end(ctx);
2724 }
2725 
2726 static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a, bool is_tc)
2727 {
2728     TCGv_reg tcg_r1, tcg_r2, tmp;
2729 
2730     if (a->cf) {
2731         nullify_over(ctx);
2732     }
2733     tcg_r1 = load_gpr(ctx, a->r1);
2734     tcg_r2 = load_gpr(ctx, a->r2);
2735     tmp = tcg_temp_new();
2736     tcg_gen_not_reg(tmp, tcg_r2);
2737     do_unit(ctx, a->t, tcg_r1, tmp, a->cf, a->d, is_tc, tcg_gen_add_reg);
2738     return nullify_end(ctx);
2739 }
2740 
2741 static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a)
2742 {
2743     return do_uaddcm(ctx, a, false);
2744 }
2745 
2746 static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2747 {
2748     return do_uaddcm(ctx, a, true);
2749 }
2750 
2751 static bool do_dcor(DisasContext *ctx, arg_rr_cf_d *a, bool is_i)
2752 {
2753     TCGv_reg tmp;
2754 
2755     nullify_over(ctx);
2756 
2757     tmp = tcg_temp_new();
2758     tcg_gen_shri_reg(tmp, cpu_psw_cb, 3);
2759     if (!is_i) {
2760         tcg_gen_not_reg(tmp, tmp);
2761     }
2762     tcg_gen_andi_reg(tmp, tmp, (uint64_t)0x1111111111111111ull);
2763     tcg_gen_muli_reg(tmp, tmp, 6);
2764     do_unit(ctx, a->t, load_gpr(ctx, a->r), tmp, a->cf, a->d, false,
2765             is_i ? tcg_gen_add_reg : tcg_gen_sub_reg);
2766     return nullify_end(ctx);
2767 }
2768 
2769 static bool trans_dcor(DisasContext *ctx, arg_rr_cf_d *a)
2770 {
2771     return do_dcor(ctx, a, false);
2772 }
2773 
2774 static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf_d *a)
2775 {
2776     return do_dcor(ctx, a, true);
2777 }
2778 
2779 static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a)
2780 {
2781     TCGv_reg dest, add1, add2, addc, zero, in1, in2;
2782     TCGv_reg cout;
2783 
2784     nullify_over(ctx);
2785 
2786     in1 = load_gpr(ctx, a->r1);
2787     in2 = load_gpr(ctx, a->r2);
2788 
2789     add1 = tcg_temp_new();
2790     add2 = tcg_temp_new();
2791     addc = tcg_temp_new();
2792     dest = tcg_temp_new();
2793     zero = tcg_constant_reg(0);
2794 
2795     /* Form R1 << 1 | PSW[CB]{8}.  */
2796     tcg_gen_add_reg(add1, in1, in1);
2797     tcg_gen_add_reg(add1, add1, get_psw_carry(ctx, false));
2798 
2799     /*
2800      * Add or subtract R2, depending on PSW[V].  Proper computation of
2801      * carry requires that we subtract via + ~R2 + 1, as described in
2802      * the manual.  By extracting and masking V, we can produce the
2803      * proper inputs to the addition without movcond.
2804      */
2805     tcg_gen_sextract_reg(addc, cpu_psw_v, 31, 1);
2806     tcg_gen_xor_reg(add2, in2, addc);
2807     tcg_gen_andi_reg(addc, addc, 1);
2808 
2809     tcg_gen_add2_reg(dest, cpu_psw_cb_msb, add1, zero, add2, zero);
2810     tcg_gen_add2_reg(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero);
2811 
2812     /* Write back the result register.  */
2813     save_gpr(ctx, a->t, dest);
2814 
2815     /* Write back PSW[CB].  */
2816     tcg_gen_xor_reg(cpu_psw_cb, add1, add2);
2817     tcg_gen_xor_reg(cpu_psw_cb, cpu_psw_cb, dest);
2818 
2819     /* Write back PSW[V] for the division step.  */
2820     cout = get_psw_carry(ctx, false);
2821     tcg_gen_neg_reg(cpu_psw_v, cout);
2822     tcg_gen_xor_reg(cpu_psw_v, cpu_psw_v, in2);
2823 
2824     /* Install the new nullification.  */
2825     if (a->cf) {
2826         TCGv_reg sv = NULL;
2827         if (cond_need_sv(a->cf >> 1)) {
2828             /* ??? The lshift is supposed to contribute to overflow.  */
2829             sv = do_add_sv(ctx, dest, add1, add2);
2830         }
2831         ctx->null_cond = do_cond(ctx, a->cf, false, dest, cout, sv);
2832     }
2833 
2834     return nullify_end(ctx);
2835 }
2836 
2837 static bool trans_addi(DisasContext *ctx, arg_rri_cf *a)
2838 {
2839     return do_add_imm(ctx, a, false, false);
2840 }
2841 
2842 static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a)
2843 {
2844     return do_add_imm(ctx, a, true, false);
2845 }
2846 
2847 static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a)
2848 {
2849     return do_add_imm(ctx, a, false, true);
2850 }
2851 
2852 static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a)
2853 {
2854     return do_add_imm(ctx, a, true, true);
2855 }
2856 
2857 static bool trans_subi(DisasContext *ctx, arg_rri_cf *a)
2858 {
2859     return do_sub_imm(ctx, a, false);
2860 }
2861 
2862 static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a)
2863 {
2864     return do_sub_imm(ctx, a, true);
2865 }
2866 
2867 static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf_d *a)
2868 {
2869     TCGv_reg tcg_im, tcg_r2;
2870 
2871     if (a->cf) {
2872         nullify_over(ctx);
2873     }
2874 
2875     tcg_im = tcg_constant_reg(a->i);
2876     tcg_r2 = load_gpr(ctx, a->r);
2877     do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf, a->d);
2878 
2879     return nullify_end(ctx);
2880 }
2881 
2882 static bool trans_ld(DisasContext *ctx, arg_ldst *a)
2883 {
2884     if (!ctx->is_pa20 && a->size > MO_32) {
2885         return gen_illegal(ctx);
2886     }
2887     return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0,
2888                    a->disp, a->sp, a->m, a->size | MO_TE);
2889 }
2890 
2891 static bool trans_st(DisasContext *ctx, arg_ldst *a)
2892 {
2893     assert(a->x == 0 && a->scale == 0);
2894     if (!ctx->is_pa20 && a->size > MO_32) {
2895         return gen_illegal(ctx);
2896     }
2897     return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE);
2898 }
2899 
2900 static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
2901 {
2902     MemOp mop = MO_TE | MO_ALIGN | a->size;
2903     TCGv_reg zero, dest, ofs;
2904     TCGv_tl addr;
2905 
2906     if (!ctx->is_pa20 && a->size > MO_32) {
2907         return gen_illegal(ctx);
2908     }
2909 
2910     nullify_over(ctx);
2911 
2912     if (a->m) {
2913         /* Base register modification.  Make sure if RT == RB,
2914            we see the result of the load.  */
2915         dest = tcg_temp_new();
2916     } else {
2917         dest = dest_gpr(ctx, a->t);
2918     }
2919 
2920     form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? a->size : 0,
2921              a->disp, a->sp, a->m, ctx->mmu_idx == MMU_PHYS_IDX);
2922 
2923     /*
2924      * For hppa1.1, LDCW is undefined unless aligned mod 16.
2925      * However actual hardware succeeds with aligned mod 4.
2926      * Detect this case and log a GUEST_ERROR.
2927      *
2928      * TODO: HPPA64 relaxes the over-alignment requirement
2929      * with the ,co completer.
2930      */
2931     gen_helper_ldc_check(addr);
2932 
2933     zero = tcg_constant_reg(0);
2934     tcg_gen_atomic_xchg_reg(dest, addr, zero, ctx->mmu_idx, mop);
2935 
2936     if (a->m) {
2937         save_gpr(ctx, a->b, ofs);
2938     }
2939     save_gpr(ctx, a->t, dest);
2940 
2941     return nullify_end(ctx);
2942 }
2943 
2944 static bool trans_stby(DisasContext *ctx, arg_stby *a)
2945 {
2946     TCGv_reg ofs, val;
2947     TCGv_tl addr;
2948 
2949     nullify_over(ctx);
2950 
2951     form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
2952              ctx->mmu_idx == MMU_PHYS_IDX);
2953     val = load_gpr(ctx, a->r);
2954     if (a->a) {
2955         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2956             gen_helper_stby_e_parallel(tcg_env, addr, val);
2957         } else {
2958             gen_helper_stby_e(tcg_env, addr, val);
2959         }
2960     } else {
2961         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2962             gen_helper_stby_b_parallel(tcg_env, addr, val);
2963         } else {
2964             gen_helper_stby_b(tcg_env, addr, val);
2965         }
2966     }
2967     if (a->m) {
2968         tcg_gen_andi_reg(ofs, ofs, ~3);
2969         save_gpr(ctx, a->b, ofs);
2970     }
2971 
2972     return nullify_end(ctx);
2973 }
2974 
2975 static bool trans_stdby(DisasContext *ctx, arg_stby *a)
2976 {
2977     TCGv_reg ofs, val;
2978     TCGv_tl addr;
2979 
2980     if (!ctx->is_pa20) {
2981         return false;
2982     }
2983     nullify_over(ctx);
2984 
2985     form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
2986              ctx->mmu_idx == MMU_PHYS_IDX);
2987     val = load_gpr(ctx, a->r);
2988     if (a->a) {
2989         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2990             gen_helper_stdby_e_parallel(tcg_env, addr, val);
2991         } else {
2992             gen_helper_stdby_e(tcg_env, addr, val);
2993         }
2994     } else {
2995         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2996             gen_helper_stdby_b_parallel(tcg_env, addr, val);
2997         } else {
2998             gen_helper_stdby_b(tcg_env, addr, val);
2999         }
3000     }
3001     if (a->m) {
3002         tcg_gen_andi_reg(ofs, ofs, ~7);
3003         save_gpr(ctx, a->b, ofs);
3004     }
3005 
3006     return nullify_end(ctx);
3007 }
3008 
3009 static bool trans_lda(DisasContext *ctx, arg_ldst *a)
3010 {
3011     int hold_mmu_idx = ctx->mmu_idx;
3012 
3013     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3014     ctx->mmu_idx = MMU_PHYS_IDX;
3015     trans_ld(ctx, a);
3016     ctx->mmu_idx = hold_mmu_idx;
3017     return true;
3018 }
3019 
3020 static bool trans_sta(DisasContext *ctx, arg_ldst *a)
3021 {
3022     int hold_mmu_idx = ctx->mmu_idx;
3023 
3024     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3025     ctx->mmu_idx = MMU_PHYS_IDX;
3026     trans_st(ctx, a);
3027     ctx->mmu_idx = hold_mmu_idx;
3028     return true;
3029 }
3030 
3031 static bool trans_ldil(DisasContext *ctx, arg_ldil *a)
3032 {
3033     TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
3034 
3035     tcg_gen_movi_reg(tcg_rt, a->i);
3036     save_gpr(ctx, a->t, tcg_rt);
3037     cond_free(&ctx->null_cond);
3038     return true;
3039 }
3040 
3041 static bool trans_addil(DisasContext *ctx, arg_addil *a)
3042 {
3043     TCGv_reg tcg_rt = load_gpr(ctx, a->r);
3044     TCGv_reg tcg_r1 = dest_gpr(ctx, 1);
3045 
3046     tcg_gen_addi_reg(tcg_r1, tcg_rt, a->i);
3047     save_gpr(ctx, 1, tcg_r1);
3048     cond_free(&ctx->null_cond);
3049     return true;
3050 }
3051 
3052 static bool trans_ldo(DisasContext *ctx, arg_ldo *a)
3053 {
3054     TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
3055 
3056     /* Special case rb == 0, for the LDI pseudo-op.
3057        The COPY pseudo-op is handled for free within tcg_gen_addi_tl.  */
3058     if (a->b == 0) {
3059         tcg_gen_movi_reg(tcg_rt, a->i);
3060     } else {
3061         tcg_gen_addi_reg(tcg_rt, cpu_gr[a->b], a->i);
3062     }
3063     save_gpr(ctx, a->t, tcg_rt);
3064     cond_free(&ctx->null_cond);
3065     return true;
3066 }
3067 
3068 static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_reg in1,
3069                     unsigned c, unsigned f, bool d, unsigned n, int disp)
3070 {
3071     TCGv_reg dest, in2, sv;
3072     DisasCond cond;
3073 
3074     in2 = load_gpr(ctx, r);
3075     dest = tcg_temp_new();
3076 
3077     tcg_gen_sub_reg(dest, in1, in2);
3078 
3079     sv = NULL;
3080     if (cond_need_sv(c)) {
3081         sv = do_sub_sv(ctx, dest, in1, in2);
3082     }
3083 
3084     cond = do_sub_cond(ctx, c * 2 + f, d, dest, in1, in2, sv);
3085     return do_cbranch(ctx, disp, n, &cond);
3086 }
3087 
3088 static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a)
3089 {
3090     if (!ctx->is_pa20 && a->d) {
3091         return false;
3092     }
3093     nullify_over(ctx);
3094     return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1),
3095                    a->c, a->f, a->d, a->n, a->disp);
3096 }
3097 
3098 static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a)
3099 {
3100     if (!ctx->is_pa20 && a->d) {
3101         return false;
3102     }
3103     nullify_over(ctx);
3104     return do_cmpb(ctx, a->r, tcg_constant_reg(a->i),
3105                    a->c, a->f, a->d, a->n, a->disp);
3106 }
3107 
3108 static bool do_addb(DisasContext *ctx, unsigned r, TCGv_reg in1,
3109                     unsigned c, unsigned f, unsigned n, int disp)
3110 {
3111     TCGv_reg dest, in2, sv, cb_cond;
3112     DisasCond cond;
3113     bool d = false;
3114 
3115     /*
3116      * For hppa64, the ADDB conditions change with PSW.W,
3117      * dropping ZNV, SV, OD in favor of double-word EQ, LT, LE.
3118      */
3119     if (ctx->tb_flags & PSW_W) {
3120         d = c >= 5;
3121         if (d) {
3122             c &= 3;
3123         }
3124     }
3125 
3126     in2 = load_gpr(ctx, r);
3127     dest = tcg_temp_new();
3128     sv = NULL;
3129     cb_cond = NULL;
3130 
3131     if (cond_need_cb(c)) {
3132         TCGv_reg cb = tcg_temp_new();
3133         TCGv_reg cb_msb = tcg_temp_new();
3134 
3135         tcg_gen_movi_reg(cb_msb, 0);
3136         tcg_gen_add2_reg(dest, cb_msb, in1, cb_msb, in2, cb_msb);
3137         tcg_gen_xor_reg(cb, in1, in2);
3138         tcg_gen_xor_reg(cb, cb, dest);
3139         cb_cond = get_carry(ctx, d, cb, cb_msb);
3140     } else {
3141         tcg_gen_add_reg(dest, in1, in2);
3142     }
3143     if (cond_need_sv(c)) {
3144         sv = do_add_sv(ctx, dest, in1, in2);
3145     }
3146 
3147     cond = do_cond(ctx, c * 2 + f, d, dest, cb_cond, sv);
3148     save_gpr(ctx, r, dest);
3149     return do_cbranch(ctx, disp, n, &cond);
3150 }
3151 
3152 static bool trans_addb(DisasContext *ctx, arg_addb *a)
3153 {
3154     nullify_over(ctx);
3155     return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3156 }
3157 
3158 static bool trans_addbi(DisasContext *ctx, arg_addbi *a)
3159 {
3160     nullify_over(ctx);
3161     return do_addb(ctx, a->r, tcg_constant_reg(a->i), a->c, a->f, a->n, a->disp);
3162 }
3163 
3164 static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a)
3165 {
3166     TCGv_reg tmp, tcg_r;
3167     DisasCond cond;
3168 
3169     nullify_over(ctx);
3170 
3171     tmp = tcg_temp_new();
3172     tcg_r = load_gpr(ctx, a->r);
3173     if (cond_need_ext(ctx, a->d)) {
3174         /* Force shift into [32,63] */
3175         tcg_gen_ori_reg(tmp, cpu_sar, 32);
3176         tcg_gen_shl_reg(tmp, tcg_r, tmp);
3177     } else {
3178         tcg_gen_shl_reg(tmp, tcg_r, cpu_sar);
3179     }
3180 
3181     cond = cond_make_0_tmp(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3182     return do_cbranch(ctx, a->disp, a->n, &cond);
3183 }
3184 
3185 static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a)
3186 {
3187     TCGv_reg tmp, tcg_r;
3188     DisasCond cond;
3189     int p;
3190 
3191     nullify_over(ctx);
3192 
3193     tmp = tcg_temp_new();
3194     tcg_r = load_gpr(ctx, a->r);
3195     p = a->p | (cond_need_ext(ctx, a->d) ? 32 : 0);
3196     tcg_gen_shli_reg(tmp, tcg_r, p);
3197 
3198     cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3199     return do_cbranch(ctx, a->disp, a->n, &cond);
3200 }
3201 
3202 static bool trans_movb(DisasContext *ctx, arg_movb *a)
3203 {
3204     TCGv_reg dest;
3205     DisasCond cond;
3206 
3207     nullify_over(ctx);
3208 
3209     dest = dest_gpr(ctx, a->r2);
3210     if (a->r1 == 0) {
3211         tcg_gen_movi_reg(dest, 0);
3212     } else {
3213         tcg_gen_mov_reg(dest, cpu_gr[a->r1]);
3214     }
3215 
3216     /* All MOVB conditions are 32-bit. */
3217     cond = do_sed_cond(ctx, a->c, false, dest);
3218     return do_cbranch(ctx, a->disp, a->n, &cond);
3219 }
3220 
3221 static bool trans_movbi(DisasContext *ctx, arg_movbi *a)
3222 {
3223     TCGv_reg dest;
3224     DisasCond cond;
3225 
3226     nullify_over(ctx);
3227 
3228     dest = dest_gpr(ctx, a->r);
3229     tcg_gen_movi_reg(dest, a->i);
3230 
3231     /* All MOVBI conditions are 32-bit. */
3232     cond = do_sed_cond(ctx, a->c, false, dest);
3233     return do_cbranch(ctx, a->disp, a->n, &cond);
3234 }
3235 
3236 static bool trans_shrp_sar(DisasContext *ctx, arg_shrp_sar *a)
3237 {
3238     TCGv_reg dest, src2;
3239 
3240     if (!ctx->is_pa20 && a->d) {
3241         return false;
3242     }
3243     if (a->c) {
3244         nullify_over(ctx);
3245     }
3246 
3247     dest = dest_gpr(ctx, a->t);
3248     src2 = load_gpr(ctx, a->r2);
3249     if (a->r1 == 0) {
3250         if (a->d) {
3251             tcg_gen_shr_reg(dest, src2, cpu_sar);
3252         } else {
3253             TCGv_reg tmp = tcg_temp_new();
3254 
3255             tcg_gen_ext32u_reg(dest, src2);
3256             tcg_gen_andi_reg(tmp, cpu_sar, 31);
3257             tcg_gen_shr_reg(dest, dest, tmp);
3258         }
3259     } else if (a->r1 == a->r2) {
3260         if (a->d) {
3261             tcg_gen_rotr_reg(dest, src2, cpu_sar);
3262         } else {
3263             TCGv_i32 t32 = tcg_temp_new_i32();
3264             TCGv_i32 s32 = tcg_temp_new_i32();
3265 
3266             tcg_gen_trunc_reg_i32(t32, src2);
3267             tcg_gen_trunc_reg_i32(s32, cpu_sar);
3268             tcg_gen_andi_i32(s32, s32, 31);
3269             tcg_gen_rotr_i32(t32, t32, s32);
3270             tcg_gen_extu_i32_reg(dest, t32);
3271         }
3272     } else {
3273         TCGv_reg src1 = load_gpr(ctx, a->r1);
3274 
3275         if (a->d) {
3276             TCGv_reg t = tcg_temp_new();
3277             TCGv_reg n = tcg_temp_new();
3278 
3279             tcg_gen_xori_reg(n, cpu_sar, 63);
3280             tcg_gen_shl_reg(t, src2, n);
3281             tcg_gen_shli_reg(t, t, 1);
3282             tcg_gen_shr_reg(dest, src1, cpu_sar);
3283             tcg_gen_or_reg(dest, dest, t);
3284         } else {
3285             TCGv_i64 t = tcg_temp_new_i64();
3286             TCGv_i64 s = tcg_temp_new_i64();
3287 
3288             tcg_gen_concat_reg_i64(t, src2, src1);
3289             tcg_gen_extu_reg_i64(s, cpu_sar);
3290             tcg_gen_andi_i64(s, s, 31);
3291             tcg_gen_shr_i64(t, t, s);
3292             tcg_gen_trunc_i64_reg(dest, t);
3293         }
3294     }
3295     save_gpr(ctx, a->t, dest);
3296 
3297     /* Install the new nullification.  */
3298     cond_free(&ctx->null_cond);
3299     if (a->c) {
3300         ctx->null_cond = do_sed_cond(ctx, a->c, false, dest);
3301     }
3302     return nullify_end(ctx);
3303 }
3304 
3305 static bool trans_shrp_imm(DisasContext *ctx, arg_shrp_imm *a)
3306 {
3307     unsigned width, sa;
3308     TCGv_reg dest, t2;
3309 
3310     if (!ctx->is_pa20 && a->d) {
3311         return false;
3312     }
3313     if (a->c) {
3314         nullify_over(ctx);
3315     }
3316 
3317     width = a->d ? 64 : 32;
3318     sa = width - 1 - a->cpos;
3319 
3320     dest = dest_gpr(ctx, a->t);
3321     t2 = load_gpr(ctx, a->r2);
3322     if (a->r1 == 0) {
3323         tcg_gen_extract_reg(dest, t2, sa, width - sa);
3324     } else if (width == TARGET_LONG_BITS) {
3325         tcg_gen_extract2_reg(dest, t2, cpu_gr[a->r1], sa);
3326     } else {
3327         assert(!a->d);
3328         if (a->r1 == a->r2) {
3329             TCGv_i32 t32 = tcg_temp_new_i32();
3330             tcg_gen_trunc_reg_i32(t32, t2);
3331             tcg_gen_rotri_i32(t32, t32, sa);
3332             tcg_gen_extu_i32_reg(dest, t32);
3333         } else {
3334             TCGv_i64 t64 = tcg_temp_new_i64();
3335             tcg_gen_concat_reg_i64(t64, t2, cpu_gr[a->r1]);
3336             tcg_gen_shri_i64(t64, t64, sa);
3337             tcg_gen_trunc_i64_reg(dest, t64);
3338         }
3339     }
3340     save_gpr(ctx, a->t, dest);
3341 
3342     /* Install the new nullification.  */
3343     cond_free(&ctx->null_cond);
3344     if (a->c) {
3345         ctx->null_cond = do_sed_cond(ctx, a->c, false, dest);
3346     }
3347     return nullify_end(ctx);
3348 }
3349 
3350 static bool trans_extr_sar(DisasContext *ctx, arg_extr_sar *a)
3351 {
3352     unsigned widthm1 = a->d ? 63 : 31;
3353     TCGv_reg dest, src, tmp;
3354 
3355     if (!ctx->is_pa20 && a->d) {
3356         return false;
3357     }
3358     if (a->c) {
3359         nullify_over(ctx);
3360     }
3361 
3362     dest = dest_gpr(ctx, a->t);
3363     src = load_gpr(ctx, a->r);
3364     tmp = tcg_temp_new();
3365 
3366     /* Recall that SAR is using big-endian bit numbering.  */
3367     tcg_gen_andi_reg(tmp, cpu_sar, widthm1);
3368     tcg_gen_xori_reg(tmp, tmp, widthm1);
3369 
3370     if (a->se) {
3371         if (!a->d) {
3372             tcg_gen_ext32s_reg(dest, src);
3373             src = dest;
3374         }
3375         tcg_gen_sar_reg(dest, src, tmp);
3376         tcg_gen_sextract_reg(dest, dest, 0, a->len);
3377     } else {
3378         if (!a->d) {
3379             tcg_gen_ext32u_reg(dest, src);
3380             src = dest;
3381         }
3382         tcg_gen_shr_reg(dest, src, tmp);
3383         tcg_gen_extract_reg(dest, dest, 0, a->len);
3384     }
3385     save_gpr(ctx, a->t, dest);
3386 
3387     /* Install the new nullification.  */
3388     cond_free(&ctx->null_cond);
3389     if (a->c) {
3390         ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3391     }
3392     return nullify_end(ctx);
3393 }
3394 
3395 static bool trans_extr_imm(DisasContext *ctx, arg_extr_imm *a)
3396 {
3397     unsigned len, cpos, width;
3398     TCGv_reg dest, src;
3399 
3400     if (!ctx->is_pa20 && a->d) {
3401         return false;
3402     }
3403     if (a->c) {
3404         nullify_over(ctx);
3405     }
3406 
3407     len = a->len;
3408     width = a->d ? 64 : 32;
3409     cpos = width - 1 - a->pos;
3410     if (cpos + len > width) {
3411         len = width - cpos;
3412     }
3413 
3414     dest = dest_gpr(ctx, a->t);
3415     src = load_gpr(ctx, a->r);
3416     if (a->se) {
3417         tcg_gen_sextract_reg(dest, src, cpos, len);
3418     } else {
3419         tcg_gen_extract_reg(dest, src, cpos, len);
3420     }
3421     save_gpr(ctx, a->t, dest);
3422 
3423     /* Install the new nullification.  */
3424     cond_free(&ctx->null_cond);
3425     if (a->c) {
3426         ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3427     }
3428     return nullify_end(ctx);
3429 }
3430 
3431 static bool trans_depi_imm(DisasContext *ctx, arg_depi_imm *a)
3432 {
3433     unsigned len, width;
3434     uint64_t mask0, mask1;
3435     TCGv_reg dest;
3436 
3437     if (!ctx->is_pa20 && a->d) {
3438         return false;
3439     }
3440     if (a->c) {
3441         nullify_over(ctx);
3442     }
3443 
3444     len = a->len;
3445     width = a->d ? 64 : 32;
3446     if (a->cpos + len > width) {
3447         len = width - a->cpos;
3448     }
3449 
3450     dest = dest_gpr(ctx, a->t);
3451     mask0 = deposit64(0, a->cpos, len, a->i);
3452     mask1 = deposit64(-1, a->cpos, len, a->i);
3453 
3454     if (a->nz) {
3455         TCGv_reg src = load_gpr(ctx, a->t);
3456         tcg_gen_andi_reg(dest, src, mask1);
3457         tcg_gen_ori_reg(dest, dest, mask0);
3458     } else {
3459         tcg_gen_movi_reg(dest, mask0);
3460     }
3461     save_gpr(ctx, a->t, dest);
3462 
3463     /* Install the new nullification.  */
3464     cond_free(&ctx->null_cond);
3465     if (a->c) {
3466         ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3467     }
3468     return nullify_end(ctx);
3469 }
3470 
3471 static bool trans_dep_imm(DisasContext *ctx, arg_dep_imm *a)
3472 {
3473     unsigned rs = a->nz ? a->t : 0;
3474     unsigned len, width;
3475     TCGv_reg dest, val;
3476 
3477     if (!ctx->is_pa20 && a->d) {
3478         return false;
3479     }
3480     if (a->c) {
3481         nullify_over(ctx);
3482     }
3483 
3484     len = a->len;
3485     width = a->d ? 64 : 32;
3486     if (a->cpos + len > width) {
3487         len = width - a->cpos;
3488     }
3489 
3490     dest = dest_gpr(ctx, a->t);
3491     val = load_gpr(ctx, a->r);
3492     if (rs == 0) {
3493         tcg_gen_deposit_z_reg(dest, val, a->cpos, len);
3494     } else {
3495         tcg_gen_deposit_reg(dest, cpu_gr[rs], val, a->cpos, len);
3496     }
3497     save_gpr(ctx, a->t, dest);
3498 
3499     /* Install the new nullification.  */
3500     cond_free(&ctx->null_cond);
3501     if (a->c) {
3502         ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3503     }
3504     return nullify_end(ctx);
3505 }
3506 
3507 static bool do_dep_sar(DisasContext *ctx, unsigned rt, unsigned c,
3508                        bool d, bool nz, unsigned len, TCGv_reg val)
3509 {
3510     unsigned rs = nz ? rt : 0;
3511     unsigned widthm1 = d ? 63 : 31;
3512     TCGv_reg mask, tmp, shift, dest;
3513     uint64_t msb = 1ULL << (len - 1);
3514 
3515     dest = dest_gpr(ctx, rt);
3516     shift = tcg_temp_new();
3517     tmp = tcg_temp_new();
3518 
3519     /* Convert big-endian bit numbering in SAR to left-shift.  */
3520     tcg_gen_andi_reg(shift, cpu_sar, widthm1);
3521     tcg_gen_xori_reg(shift, shift, widthm1);
3522 
3523     mask = tcg_temp_new();
3524     tcg_gen_movi_reg(mask, msb + (msb - 1));
3525     tcg_gen_and_reg(tmp, val, mask);
3526     if (rs) {
3527         tcg_gen_shl_reg(mask, mask, shift);
3528         tcg_gen_shl_reg(tmp, tmp, shift);
3529         tcg_gen_andc_reg(dest, cpu_gr[rs], mask);
3530         tcg_gen_or_reg(dest, dest, tmp);
3531     } else {
3532         tcg_gen_shl_reg(dest, tmp, shift);
3533     }
3534     save_gpr(ctx, rt, dest);
3535 
3536     /* Install the new nullification.  */
3537     cond_free(&ctx->null_cond);
3538     if (c) {
3539         ctx->null_cond = do_sed_cond(ctx, c, d, dest);
3540     }
3541     return nullify_end(ctx);
3542 }
3543 
3544 static bool trans_dep_sar(DisasContext *ctx, arg_dep_sar *a)
3545 {
3546     if (!ctx->is_pa20 && a->d) {
3547         return false;
3548     }
3549     if (a->c) {
3550         nullify_over(ctx);
3551     }
3552     return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len,
3553                       load_gpr(ctx, a->r));
3554 }
3555 
3556 static bool trans_depi_sar(DisasContext *ctx, arg_depi_sar *a)
3557 {
3558     if (!ctx->is_pa20 && a->d) {
3559         return false;
3560     }
3561     if (a->c) {
3562         nullify_over(ctx);
3563     }
3564     return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len,
3565                       tcg_constant_reg(a->i));
3566 }
3567 
3568 static bool trans_be(DisasContext *ctx, arg_be *a)
3569 {
3570     TCGv_reg tmp;
3571 
3572 #ifdef CONFIG_USER_ONLY
3573     /* ??? It seems like there should be a good way of using
3574        "be disp(sr2, r0)", the canonical gateway entry mechanism
3575        to our advantage.  But that appears to be inconvenient to
3576        manage along side branch delay slots.  Therefore we handle
3577        entry into the gateway page via absolute address.  */
3578     /* Since we don't implement spaces, just branch.  Do notice the special
3579        case of "be disp(*,r0)" using a direct branch to disp, so that we can
3580        goto_tb to the TB containing the syscall.  */
3581     if (a->b == 0) {
3582         return do_dbranch(ctx, a->disp, a->l, a->n);
3583     }
3584 #else
3585     nullify_over(ctx);
3586 #endif
3587 
3588     tmp = tcg_temp_new();
3589     tcg_gen_addi_reg(tmp, load_gpr(ctx, a->b), a->disp);
3590     tmp = do_ibranch_priv(ctx, tmp);
3591 
3592 #ifdef CONFIG_USER_ONLY
3593     return do_ibranch(ctx, tmp, a->l, a->n);
3594 #else
3595     TCGv_i64 new_spc = tcg_temp_new_i64();
3596 
3597     load_spr(ctx, new_spc, a->sp);
3598     if (a->l) {
3599         copy_iaoq_entry(ctx, cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
3600         tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f);
3601     }
3602     if (a->n && use_nullify_skip(ctx)) {
3603         copy_iaoq_entry(ctx, cpu_iaoq_f, -1, tmp);
3604         tcg_gen_addi_reg(tmp, tmp, 4);
3605         copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
3606         tcg_gen_mov_i64(cpu_iasq_f, new_spc);
3607         tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f);
3608     } else {
3609         copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3610         if (ctx->iaoq_b == -1) {
3611             tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3612         }
3613         copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
3614         tcg_gen_mov_i64(cpu_iasq_b, new_spc);
3615         nullify_set(ctx, a->n);
3616     }
3617     tcg_gen_lookup_and_goto_ptr();
3618     ctx->base.is_jmp = DISAS_NORETURN;
3619     return nullify_end(ctx);
3620 #endif
3621 }
3622 
3623 static bool trans_bl(DisasContext *ctx, arg_bl *a)
3624 {
3625     return do_dbranch(ctx, iaoq_dest(ctx, a->disp), a->l, a->n);
3626 }
3627 
3628 static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a)
3629 {
3630     uint64_t dest = iaoq_dest(ctx, a->disp);
3631 
3632     nullify_over(ctx);
3633 
3634     /* Make sure the caller hasn't done something weird with the queue.
3635      * ??? This is not quite the same as the PSW[B] bit, which would be
3636      * expensive to track.  Real hardware will trap for
3637      *    b  gateway
3638      *    b  gateway+4  (in delay slot of first branch)
3639      * However, checking for a non-sequential instruction queue *will*
3640      * diagnose the security hole
3641      *    b  gateway
3642      *    b  evil
3643      * in which instructions at evil would run with increased privs.
3644      */
3645     if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) {
3646         return gen_illegal(ctx);
3647     }
3648 
3649 #ifndef CONFIG_USER_ONLY
3650     if (ctx->tb_flags & PSW_C) {
3651         CPUHPPAState *env = cpu_env(ctx->cs);
3652         int type = hppa_artype_for_page(env, ctx->base.pc_next);
3653         /* If we could not find a TLB entry, then we need to generate an
3654            ITLB miss exception so the kernel will provide it.
3655            The resulting TLB fill operation will invalidate this TB and
3656            we will re-translate, at which point we *will* be able to find
3657            the TLB entry and determine if this is in fact a gateway page.  */
3658         if (type < 0) {
3659             gen_excp(ctx, EXCP_ITLB_MISS);
3660             return true;
3661         }
3662         /* No change for non-gateway pages or for priv decrease.  */
3663         if (type >= 4 && type - 4 < ctx->privilege) {
3664             dest = deposit32(dest, 0, 2, type - 4);
3665         }
3666     } else {
3667         dest &= -4;  /* priv = 0 */
3668     }
3669 #endif
3670 
3671     if (a->l) {
3672         TCGv_reg tmp = dest_gpr(ctx, a->l);
3673         if (ctx->privilege < 3) {
3674             tcg_gen_andi_reg(tmp, tmp, -4);
3675         }
3676         tcg_gen_ori_reg(tmp, tmp, ctx->privilege);
3677         save_gpr(ctx, a->l, tmp);
3678     }
3679 
3680     return do_dbranch(ctx, dest, 0, a->n);
3681 }
3682 
3683 static bool trans_blr(DisasContext *ctx, arg_blr *a)
3684 {
3685     if (a->x) {
3686         TCGv_reg tmp = tcg_temp_new();
3687         tcg_gen_shli_reg(tmp, load_gpr(ctx, a->x), 3);
3688         tcg_gen_addi_reg(tmp, tmp, ctx->iaoq_f + 8);
3689         /* The computation here never changes privilege level.  */
3690         return do_ibranch(ctx, tmp, a->l, a->n);
3691     } else {
3692         /* BLR R0,RX is a good way to load PC+8 into RX.  */
3693         return do_dbranch(ctx, ctx->iaoq_f + 8, a->l, a->n);
3694     }
3695 }
3696 
3697 static bool trans_bv(DisasContext *ctx, arg_bv *a)
3698 {
3699     TCGv_reg dest;
3700 
3701     if (a->x == 0) {
3702         dest = load_gpr(ctx, a->b);
3703     } else {
3704         dest = tcg_temp_new();
3705         tcg_gen_shli_reg(dest, load_gpr(ctx, a->x), 3);
3706         tcg_gen_add_reg(dest, dest, load_gpr(ctx, a->b));
3707     }
3708     dest = do_ibranch_priv(ctx, dest);
3709     return do_ibranch(ctx, dest, 0, a->n);
3710 }
3711 
3712 static bool trans_bve(DisasContext *ctx, arg_bve *a)
3713 {
3714     TCGv_reg dest;
3715 
3716 #ifdef CONFIG_USER_ONLY
3717     dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3718     return do_ibranch(ctx, dest, a->l, a->n);
3719 #else
3720     nullify_over(ctx);
3721     dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3722 
3723     copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3724     if (ctx->iaoq_b == -1) {
3725         tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3726     }
3727     copy_iaoq_entry(ctx, cpu_iaoq_b, -1, dest);
3728     tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest));
3729     if (a->l) {
3730         copy_iaoq_entry(ctx, cpu_gr[a->l], ctx->iaoq_n, ctx->iaoq_n_var);
3731     }
3732     nullify_set(ctx, a->n);
3733     tcg_gen_lookup_and_goto_ptr();
3734     ctx->base.is_jmp = DISAS_NORETURN;
3735     return nullify_end(ctx);
3736 #endif
3737 }
3738 
3739 static bool trans_nopbts(DisasContext *ctx, arg_nopbts *a)
3740 {
3741     /* All branch target stack instructions implement as nop. */
3742     return ctx->is_pa20;
3743 }
3744 
3745 /*
3746  * Float class 0
3747  */
3748 
3749 static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3750 {
3751     tcg_gen_mov_i32(dst, src);
3752 }
3753 
3754 static bool trans_fid_f(DisasContext *ctx, arg_fid_f *a)
3755 {
3756     uint64_t ret;
3757 
3758     if (ctx->is_pa20) {
3759         ret = 0x13080000000000ULL; /* PA8700 (PCX-W2) */
3760     } else {
3761         ret = 0x0f080000000000ULL; /* PA7300LC (PCX-L2) */
3762     }
3763 
3764     nullify_over(ctx);
3765     save_frd(0, tcg_constant_i64(ret));
3766     return nullify_end(ctx);
3767 }
3768 
3769 static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a)
3770 {
3771     return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f);
3772 }
3773 
3774 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3775 {
3776     tcg_gen_mov_i64(dst, src);
3777 }
3778 
3779 static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a)
3780 {
3781     return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d);
3782 }
3783 
3784 static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3785 {
3786     tcg_gen_andi_i32(dst, src, INT32_MAX);
3787 }
3788 
3789 static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a)
3790 {
3791     return do_fop_wew(ctx, a->t, a->r, gen_fabs_f);
3792 }
3793 
3794 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3795 {
3796     tcg_gen_andi_i64(dst, src, INT64_MAX);
3797 }
3798 
3799 static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a)
3800 {
3801     return do_fop_ded(ctx, a->t, a->r, gen_fabs_d);
3802 }
3803 
3804 static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a)
3805 {
3806     return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s);
3807 }
3808 
3809 static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a)
3810 {
3811     return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d);
3812 }
3813 
3814 static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a)
3815 {
3816     return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s);
3817 }
3818 
3819 static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a)
3820 {
3821     return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d);
3822 }
3823 
3824 static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3825 {
3826     tcg_gen_xori_i32(dst, src, INT32_MIN);
3827 }
3828 
3829 static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a)
3830 {
3831     return do_fop_wew(ctx, a->t, a->r, gen_fneg_f);
3832 }
3833 
3834 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3835 {
3836     tcg_gen_xori_i64(dst, src, INT64_MIN);
3837 }
3838 
3839 static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a)
3840 {
3841     return do_fop_ded(ctx, a->t, a->r, gen_fneg_d);
3842 }
3843 
3844 static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3845 {
3846     tcg_gen_ori_i32(dst, src, INT32_MIN);
3847 }
3848 
3849 static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a)
3850 {
3851     return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f);
3852 }
3853 
3854 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3855 {
3856     tcg_gen_ori_i64(dst, src, INT64_MIN);
3857 }
3858 
3859 static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a)
3860 {
3861     return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d);
3862 }
3863 
3864 /*
3865  * Float class 1
3866  */
3867 
3868 static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a)
3869 {
3870     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s);
3871 }
3872 
3873 static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a)
3874 {
3875     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d);
3876 }
3877 
3878 static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a)
3879 {
3880     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s);
3881 }
3882 
3883 static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a)
3884 {
3885     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s);
3886 }
3887 
3888 static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a)
3889 {
3890     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d);
3891 }
3892 
3893 static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a)
3894 {
3895     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d);
3896 }
3897 
3898 static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a)
3899 {
3900     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w);
3901 }
3902 
3903 static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a)
3904 {
3905     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w);
3906 }
3907 
3908 static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a)
3909 {
3910     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw);
3911 }
3912 
3913 static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a)
3914 {
3915     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw);
3916 }
3917 
3918 static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a)
3919 {
3920     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w);
3921 }
3922 
3923 static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a)
3924 {
3925     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w);
3926 }
3927 
3928 static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a)
3929 {
3930     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw);
3931 }
3932 
3933 static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a)
3934 {
3935     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw);
3936 }
3937 
3938 static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a)
3939 {
3940     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s);
3941 }
3942 
3943 static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a)
3944 {
3945     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s);
3946 }
3947 
3948 static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a)
3949 {
3950     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d);
3951 }
3952 
3953 static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a)
3954 {
3955     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d);
3956 }
3957 
3958 static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a)
3959 {
3960     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw);
3961 }
3962 
3963 static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a)
3964 {
3965     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw);
3966 }
3967 
3968 static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a)
3969 {
3970     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw);
3971 }
3972 
3973 static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a)
3974 {
3975     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw);
3976 }
3977 
3978 static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a)
3979 {
3980     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw);
3981 }
3982 
3983 static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a)
3984 {
3985     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw);
3986 }
3987 
3988 static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a)
3989 {
3990     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw);
3991 }
3992 
3993 static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a)
3994 {
3995     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw);
3996 }
3997 
3998 /*
3999  * Float class 2
4000  */
4001 
4002 static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a)
4003 {
4004     TCGv_i32 ta, tb, tc, ty;
4005 
4006     nullify_over(ctx);
4007 
4008     ta = load_frw0_i32(a->r1);
4009     tb = load_frw0_i32(a->r2);
4010     ty = tcg_constant_i32(a->y);
4011     tc = tcg_constant_i32(a->c);
4012 
4013     gen_helper_fcmp_s(tcg_env, ta, tb, ty, tc);
4014 
4015     return nullify_end(ctx);
4016 }
4017 
4018 static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a)
4019 {
4020     TCGv_i64 ta, tb;
4021     TCGv_i32 tc, ty;
4022 
4023     nullify_over(ctx);
4024 
4025     ta = load_frd0(a->r1);
4026     tb = load_frd0(a->r2);
4027     ty = tcg_constant_i32(a->y);
4028     tc = tcg_constant_i32(a->c);
4029 
4030     gen_helper_fcmp_d(tcg_env, ta, tb, ty, tc);
4031 
4032     return nullify_end(ctx);
4033 }
4034 
4035 static bool trans_ftest(DisasContext *ctx, arg_ftest *a)
4036 {
4037     TCGv_reg t;
4038 
4039     nullify_over(ctx);
4040 
4041     t = tcg_temp_new();
4042     tcg_gen_ld32u_reg(t, tcg_env, offsetof(CPUHPPAState, fr0_shadow));
4043 
4044     if (a->y == 1) {
4045         int mask;
4046         bool inv = false;
4047 
4048         switch (a->c) {
4049         case 0: /* simple */
4050             tcg_gen_andi_reg(t, t, 0x4000000);
4051             ctx->null_cond = cond_make_0(TCG_COND_NE, t);
4052             goto done;
4053         case 2: /* rej */
4054             inv = true;
4055             /* fallthru */
4056         case 1: /* acc */
4057             mask = 0x43ff800;
4058             break;
4059         case 6: /* rej8 */
4060             inv = true;
4061             /* fallthru */
4062         case 5: /* acc8 */
4063             mask = 0x43f8000;
4064             break;
4065         case 9: /* acc6 */
4066             mask = 0x43e0000;
4067             break;
4068         case 13: /* acc4 */
4069             mask = 0x4380000;
4070             break;
4071         case 17: /* acc2 */
4072             mask = 0x4200000;
4073             break;
4074         default:
4075             gen_illegal(ctx);
4076             return true;
4077         }
4078         if (inv) {
4079             TCGv_reg c = tcg_constant_reg(mask);
4080             tcg_gen_or_reg(t, t, c);
4081             ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
4082         } else {
4083             tcg_gen_andi_reg(t, t, mask);
4084             ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
4085         }
4086     } else {
4087         unsigned cbit = (a->y ^ 1) - 1;
4088 
4089         tcg_gen_extract_reg(t, t, 21 - cbit, 1);
4090         ctx->null_cond = cond_make_0(TCG_COND_NE, t);
4091     }
4092 
4093  done:
4094     return nullify_end(ctx);
4095 }
4096 
4097 /*
4098  * Float class 2
4099  */
4100 
4101 static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a)
4102 {
4103     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s);
4104 }
4105 
4106 static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a)
4107 {
4108     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d);
4109 }
4110 
4111 static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a)
4112 {
4113     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s);
4114 }
4115 
4116 static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a)
4117 {
4118     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d);
4119 }
4120 
4121 static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a)
4122 {
4123     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s);
4124 }
4125 
4126 static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a)
4127 {
4128     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d);
4129 }
4130 
4131 static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a)
4132 {
4133     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s);
4134 }
4135 
4136 static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a)
4137 {
4138     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d);
4139 }
4140 
4141 static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a)
4142 {
4143     TCGv_i64 x, y;
4144 
4145     nullify_over(ctx);
4146 
4147     x = load_frw0_i64(a->r1);
4148     y = load_frw0_i64(a->r2);
4149     tcg_gen_mul_i64(x, x, y);
4150     save_frd(a->t, x);
4151 
4152     return nullify_end(ctx);
4153 }
4154 
4155 /* Convert the fmpyadd single-precision register encodings to standard.  */
4156 static inline int fmpyadd_s_reg(unsigned r)
4157 {
4158     return (r & 16) * 2 + 16 + (r & 15);
4159 }
4160 
4161 static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4162 {
4163     int tm = fmpyadd_s_reg(a->tm);
4164     int ra = fmpyadd_s_reg(a->ra);
4165     int ta = fmpyadd_s_reg(a->ta);
4166     int rm2 = fmpyadd_s_reg(a->rm2);
4167     int rm1 = fmpyadd_s_reg(a->rm1);
4168 
4169     nullify_over(ctx);
4170 
4171     do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
4172     do_fop_weww(ctx, ta, ta, ra,
4173                 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
4174 
4175     return nullify_end(ctx);
4176 }
4177 
4178 static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a)
4179 {
4180     return do_fmpyadd_s(ctx, a, false);
4181 }
4182 
4183 static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a)
4184 {
4185     return do_fmpyadd_s(ctx, a, true);
4186 }
4187 
4188 static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4189 {
4190     nullify_over(ctx);
4191 
4192     do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d);
4193     do_fop_dedd(ctx, a->ta, a->ta, a->ra,
4194                 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
4195 
4196     return nullify_end(ctx);
4197 }
4198 
4199 static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a)
4200 {
4201     return do_fmpyadd_d(ctx, a, false);
4202 }
4203 
4204 static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a)
4205 {
4206     return do_fmpyadd_d(ctx, a, true);
4207 }
4208 
4209 static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a)
4210 {
4211     TCGv_i32 x, y, z;
4212 
4213     nullify_over(ctx);
4214     x = load_frw0_i32(a->rm1);
4215     y = load_frw0_i32(a->rm2);
4216     z = load_frw0_i32(a->ra3);
4217 
4218     if (a->neg) {
4219         gen_helper_fmpynfadd_s(x, tcg_env, x, y, z);
4220     } else {
4221         gen_helper_fmpyfadd_s(x, tcg_env, x, y, z);
4222     }
4223 
4224     save_frw_i32(a->t, x);
4225     return nullify_end(ctx);
4226 }
4227 
4228 static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a)
4229 {
4230     TCGv_i64 x, y, z;
4231 
4232     nullify_over(ctx);
4233     x = load_frd0(a->rm1);
4234     y = load_frd0(a->rm2);
4235     z = load_frd0(a->ra3);
4236 
4237     if (a->neg) {
4238         gen_helper_fmpynfadd_d(x, tcg_env, x, y, z);
4239     } else {
4240         gen_helper_fmpyfadd_d(x, tcg_env, x, y, z);
4241     }
4242 
4243     save_frd(a->t, x);
4244     return nullify_end(ctx);
4245 }
4246 
4247 static bool trans_diag(DisasContext *ctx, arg_diag *a)
4248 {
4249     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
4250 #ifndef CONFIG_USER_ONLY
4251     if (a->i == 0x100) {
4252         /* emulate PDC BTLB, called by SeaBIOS-hppa */
4253         nullify_over(ctx);
4254         gen_helper_diag_btlb(tcg_env);
4255         return nullify_end(ctx);
4256     }
4257 #endif
4258     qemu_log_mask(LOG_UNIMP, "DIAG opcode 0x%04x ignored\n", a->i);
4259     return true;
4260 }
4261 
4262 static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
4263 {
4264     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4265     int bound;
4266 
4267     ctx->cs = cs;
4268     ctx->tb_flags = ctx->base.tb->flags;
4269     ctx->is_pa20 = hppa_is_pa20(cpu_env(cs));
4270 
4271 #ifdef CONFIG_USER_ONLY
4272     ctx->privilege = MMU_IDX_TO_PRIV(MMU_USER_IDX);
4273     ctx->mmu_idx = MMU_USER_IDX;
4274     ctx->iaoq_f = ctx->base.pc_first | ctx->privilege;
4275     ctx->iaoq_b = ctx->base.tb->cs_base | ctx->privilege;
4276     ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN);
4277 #else
4278     ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
4279     ctx->mmu_idx = (ctx->tb_flags & PSW_D
4280                     ? PRIV_P_TO_MMU_IDX(ctx->privilege, ctx->tb_flags & PSW_P)
4281                     : MMU_PHYS_IDX);
4282 
4283     /* Recover the IAOQ values from the GVA + PRIV.  */
4284     uint64_t cs_base = ctx->base.tb->cs_base;
4285     uint64_t iasq_f = cs_base & ~0xffffffffull;
4286     int32_t diff = cs_base;
4287 
4288     ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege;
4289     ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1);
4290 #endif
4291     ctx->iaoq_n = -1;
4292     ctx->iaoq_n_var = NULL;
4293 
4294     /* Bound the number of instructions by those left on the page.  */
4295     bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
4296     ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
4297 }
4298 
4299 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
4300 {
4301     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4302 
4303     /* Seed the nullification status from PSW[N], as saved in TB->FLAGS.  */
4304     ctx->null_cond = cond_make_f();
4305     ctx->psw_n_nonzero = false;
4306     if (ctx->tb_flags & PSW_N) {
4307         ctx->null_cond.c = TCG_COND_ALWAYS;
4308         ctx->psw_n_nonzero = true;
4309     }
4310     ctx->null_lab = NULL;
4311 }
4312 
4313 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4314 {
4315     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4316 
4317     tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b);
4318 }
4319 
4320 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4321 {
4322     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4323     CPUHPPAState *env = cpu_env(cs);
4324     DisasJumpType ret;
4325 
4326     /* Execute one insn.  */
4327 #ifdef CONFIG_USER_ONLY
4328     if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
4329         do_page_zero(ctx);
4330         ret = ctx->base.is_jmp;
4331         assert(ret != DISAS_NEXT);
4332     } else
4333 #endif
4334     {
4335         /* Always fetch the insn, even if nullified, so that we check
4336            the page permissions for execute.  */
4337         uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
4338 
4339         /* Set up the IA queue for the next insn.
4340            This will be overwritten by a branch.  */
4341         if (ctx->iaoq_b == -1) {
4342             ctx->iaoq_n = -1;
4343             ctx->iaoq_n_var = tcg_temp_new();
4344             tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
4345         } else {
4346             ctx->iaoq_n = ctx->iaoq_b + 4;
4347             ctx->iaoq_n_var = NULL;
4348         }
4349 
4350         if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
4351             ctx->null_cond.c = TCG_COND_NEVER;
4352             ret = DISAS_NEXT;
4353         } else {
4354             ctx->insn = insn;
4355             if (!decode(ctx, insn)) {
4356                 gen_illegal(ctx);
4357             }
4358             ret = ctx->base.is_jmp;
4359             assert(ctx->null_lab == NULL);
4360         }
4361     }
4362 
4363     /* Advance the insn queue.  Note that this check also detects
4364        a priority change within the instruction queue.  */
4365     if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
4366         if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1
4367             && use_goto_tb(ctx, ctx->iaoq_b)
4368             && (ctx->null_cond.c == TCG_COND_NEVER
4369                 || ctx->null_cond.c == TCG_COND_ALWAYS)) {
4370             nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
4371             gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
4372             ctx->base.is_jmp = ret = DISAS_NORETURN;
4373         } else {
4374             ctx->base.is_jmp = ret = DISAS_IAQ_N_STALE;
4375         }
4376     }
4377     ctx->iaoq_f = ctx->iaoq_b;
4378     ctx->iaoq_b = ctx->iaoq_n;
4379     ctx->base.pc_next += 4;
4380 
4381     switch (ret) {
4382     case DISAS_NORETURN:
4383     case DISAS_IAQ_N_UPDATED:
4384         break;
4385 
4386     case DISAS_NEXT:
4387     case DISAS_IAQ_N_STALE:
4388     case DISAS_IAQ_N_STALE_EXIT:
4389         if (ctx->iaoq_f == -1) {
4390             copy_iaoq_entry(ctx, cpu_iaoq_f, -1, cpu_iaoq_b);
4391             copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
4392 #ifndef CONFIG_USER_ONLY
4393             tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
4394 #endif
4395             nullify_save(ctx);
4396             ctx->base.is_jmp = (ret == DISAS_IAQ_N_STALE_EXIT
4397                                 ? DISAS_EXIT
4398                                 : DISAS_IAQ_N_UPDATED);
4399         } else if (ctx->iaoq_b == -1) {
4400             copy_iaoq_entry(ctx, cpu_iaoq_b, -1, ctx->iaoq_n_var);
4401         }
4402         break;
4403 
4404     default:
4405         g_assert_not_reached();
4406     }
4407 }
4408 
4409 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4410 {
4411     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4412     DisasJumpType is_jmp = ctx->base.is_jmp;
4413 
4414     switch (is_jmp) {
4415     case DISAS_NORETURN:
4416         break;
4417     case DISAS_TOO_MANY:
4418     case DISAS_IAQ_N_STALE:
4419     case DISAS_IAQ_N_STALE_EXIT:
4420         copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
4421         copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
4422         nullify_save(ctx);
4423         /* FALLTHRU */
4424     case DISAS_IAQ_N_UPDATED:
4425         if (is_jmp != DISAS_IAQ_N_STALE_EXIT) {
4426             tcg_gen_lookup_and_goto_ptr();
4427             break;
4428         }
4429         /* FALLTHRU */
4430     case DISAS_EXIT:
4431         tcg_gen_exit_tb(NULL, 0);
4432         break;
4433     default:
4434         g_assert_not_reached();
4435     }
4436 }
4437 
4438 static void hppa_tr_disas_log(const DisasContextBase *dcbase,
4439                               CPUState *cs, FILE *logfile)
4440 {
4441     target_ulong pc = dcbase->pc_first;
4442 
4443 #ifdef CONFIG_USER_ONLY
4444     switch (pc) {
4445     case 0x00:
4446         fprintf(logfile, "IN:\n0x00000000:  (null)\n");
4447         return;
4448     case 0xb0:
4449         fprintf(logfile, "IN:\n0x000000b0:  light-weight-syscall\n");
4450         return;
4451     case 0xe0:
4452         fprintf(logfile, "IN:\n0x000000e0:  set-thread-pointer-syscall\n");
4453         return;
4454     case 0x100:
4455         fprintf(logfile, "IN:\n0x00000100:  syscall\n");
4456         return;
4457     }
4458 #endif
4459 
4460     fprintf(logfile, "IN: %s\n", lookup_symbol(pc));
4461     target_disas(logfile, cs, pc, dcbase->tb->size);
4462 }
4463 
4464 static const TranslatorOps hppa_tr_ops = {
4465     .init_disas_context = hppa_tr_init_disas_context,
4466     .tb_start           = hppa_tr_tb_start,
4467     .insn_start         = hppa_tr_insn_start,
4468     .translate_insn     = hppa_tr_translate_insn,
4469     .tb_stop            = hppa_tr_tb_stop,
4470     .disas_log          = hppa_tr_disas_log,
4471 };
4472 
4473 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
4474                            target_ulong pc, void *host_pc)
4475 {
4476     DisasContext ctx;
4477     translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);
4478 }
4479