xref: /openbmc/qemu/target/hppa/translate.c (revision d43f1670)
1 /*
2  * HPPA emulation cpu translation for qemu.
3  *
4  * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "exec/log.h"
31 
32 /* Since we have a distinction between register size and address size,
33    we need to redefine all of these.  */
34 
35 #undef TCGv
36 #undef tcg_temp_new
37 #undef tcg_global_mem_new
38 #undef tcg_temp_local_new
39 #undef tcg_temp_free
40 
41 #if TARGET_LONG_BITS == 64
42 #define TCGv_tl              TCGv_i64
43 #define tcg_temp_new_tl      tcg_temp_new_i64
44 #define tcg_temp_free_tl     tcg_temp_free_i64
45 #if TARGET_REGISTER_BITS == 64
46 #define tcg_gen_extu_reg_tl  tcg_gen_mov_i64
47 #else
48 #define tcg_gen_extu_reg_tl  tcg_gen_extu_i32_i64
49 #endif
50 #else
51 #define TCGv_tl              TCGv_i32
52 #define tcg_temp_new_tl      tcg_temp_new_i32
53 #define tcg_temp_free_tl     tcg_temp_free_i32
54 #define tcg_gen_extu_reg_tl  tcg_gen_mov_i32
55 #endif
56 
57 #if TARGET_REGISTER_BITS == 64
58 #define TCGv_reg             TCGv_i64
59 
60 #define tcg_temp_new         tcg_temp_new_i64
61 #define tcg_global_mem_new   tcg_global_mem_new_i64
62 #define tcg_temp_local_new   tcg_temp_local_new_i64
63 #define tcg_temp_free        tcg_temp_free_i64
64 
65 #define tcg_gen_movi_reg     tcg_gen_movi_i64
66 #define tcg_gen_mov_reg      tcg_gen_mov_i64
67 #define tcg_gen_ld8u_reg     tcg_gen_ld8u_i64
68 #define tcg_gen_ld8s_reg     tcg_gen_ld8s_i64
69 #define tcg_gen_ld16u_reg    tcg_gen_ld16u_i64
70 #define tcg_gen_ld16s_reg    tcg_gen_ld16s_i64
71 #define tcg_gen_ld32u_reg    tcg_gen_ld32u_i64
72 #define tcg_gen_ld32s_reg    tcg_gen_ld32s_i64
73 #define tcg_gen_ld_reg       tcg_gen_ld_i64
74 #define tcg_gen_st8_reg      tcg_gen_st8_i64
75 #define tcg_gen_st16_reg     tcg_gen_st16_i64
76 #define tcg_gen_st32_reg     tcg_gen_st32_i64
77 #define tcg_gen_st_reg       tcg_gen_st_i64
78 #define tcg_gen_add_reg      tcg_gen_add_i64
79 #define tcg_gen_addi_reg     tcg_gen_addi_i64
80 #define tcg_gen_sub_reg      tcg_gen_sub_i64
81 #define tcg_gen_neg_reg      tcg_gen_neg_i64
82 #define tcg_gen_subfi_reg    tcg_gen_subfi_i64
83 #define tcg_gen_subi_reg     tcg_gen_subi_i64
84 #define tcg_gen_and_reg      tcg_gen_and_i64
85 #define tcg_gen_andi_reg     tcg_gen_andi_i64
86 #define tcg_gen_or_reg       tcg_gen_or_i64
87 #define tcg_gen_ori_reg      tcg_gen_ori_i64
88 #define tcg_gen_xor_reg      tcg_gen_xor_i64
89 #define tcg_gen_xori_reg     tcg_gen_xori_i64
90 #define tcg_gen_not_reg      tcg_gen_not_i64
91 #define tcg_gen_shl_reg      tcg_gen_shl_i64
92 #define tcg_gen_shli_reg     tcg_gen_shli_i64
93 #define tcg_gen_shr_reg      tcg_gen_shr_i64
94 #define tcg_gen_shri_reg     tcg_gen_shri_i64
95 #define tcg_gen_sar_reg      tcg_gen_sar_i64
96 #define tcg_gen_sari_reg     tcg_gen_sari_i64
97 #define tcg_gen_brcond_reg   tcg_gen_brcond_i64
98 #define tcg_gen_brcondi_reg  tcg_gen_brcondi_i64
99 #define tcg_gen_setcond_reg  tcg_gen_setcond_i64
100 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i64
101 #define tcg_gen_mul_reg      tcg_gen_mul_i64
102 #define tcg_gen_muli_reg     tcg_gen_muli_i64
103 #define tcg_gen_div_reg      tcg_gen_div_i64
104 #define tcg_gen_rem_reg      tcg_gen_rem_i64
105 #define tcg_gen_divu_reg     tcg_gen_divu_i64
106 #define tcg_gen_remu_reg     tcg_gen_remu_i64
107 #define tcg_gen_discard_reg  tcg_gen_discard_i64
108 #define tcg_gen_trunc_reg_i32 tcg_gen_extrl_i64_i32
109 #define tcg_gen_trunc_i64_reg tcg_gen_mov_i64
110 #define tcg_gen_extu_i32_reg tcg_gen_extu_i32_i64
111 #define tcg_gen_ext_i32_reg  tcg_gen_ext_i32_i64
112 #define tcg_gen_extu_reg_i64 tcg_gen_mov_i64
113 #define tcg_gen_ext_reg_i64  tcg_gen_mov_i64
114 #define tcg_gen_ext8u_reg    tcg_gen_ext8u_i64
115 #define tcg_gen_ext8s_reg    tcg_gen_ext8s_i64
116 #define tcg_gen_ext16u_reg   tcg_gen_ext16u_i64
117 #define tcg_gen_ext16s_reg   tcg_gen_ext16s_i64
118 #define tcg_gen_ext32u_reg   tcg_gen_ext32u_i64
119 #define tcg_gen_ext32s_reg   tcg_gen_ext32s_i64
120 #define tcg_gen_bswap16_reg  tcg_gen_bswap16_i64
121 #define tcg_gen_bswap32_reg  tcg_gen_bswap32_i64
122 #define tcg_gen_bswap64_reg  tcg_gen_bswap64_i64
123 #define tcg_gen_concat_reg_i64 tcg_gen_concat32_i64
124 #define tcg_gen_andc_reg     tcg_gen_andc_i64
125 #define tcg_gen_eqv_reg      tcg_gen_eqv_i64
126 #define tcg_gen_nand_reg     tcg_gen_nand_i64
127 #define tcg_gen_nor_reg      tcg_gen_nor_i64
128 #define tcg_gen_orc_reg      tcg_gen_orc_i64
129 #define tcg_gen_clz_reg      tcg_gen_clz_i64
130 #define tcg_gen_ctz_reg      tcg_gen_ctz_i64
131 #define tcg_gen_clzi_reg     tcg_gen_clzi_i64
132 #define tcg_gen_ctzi_reg     tcg_gen_ctzi_i64
133 #define tcg_gen_clrsb_reg    tcg_gen_clrsb_i64
134 #define tcg_gen_ctpop_reg    tcg_gen_ctpop_i64
135 #define tcg_gen_rotl_reg     tcg_gen_rotl_i64
136 #define tcg_gen_rotli_reg    tcg_gen_rotli_i64
137 #define tcg_gen_rotr_reg     tcg_gen_rotr_i64
138 #define tcg_gen_rotri_reg    tcg_gen_rotri_i64
139 #define tcg_gen_deposit_reg  tcg_gen_deposit_i64
140 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i64
141 #define tcg_gen_extract_reg  tcg_gen_extract_i64
142 #define tcg_gen_sextract_reg tcg_gen_sextract_i64
143 #define tcg_const_reg        tcg_const_i64
144 #define tcg_const_local_reg  tcg_const_local_i64
145 #define tcg_constant_reg     tcg_constant_i64
146 #define tcg_gen_movcond_reg  tcg_gen_movcond_i64
147 #define tcg_gen_add2_reg     tcg_gen_add2_i64
148 #define tcg_gen_sub2_reg     tcg_gen_sub2_i64
149 #define tcg_gen_qemu_ld_reg  tcg_gen_qemu_ld_i64
150 #define tcg_gen_qemu_st_reg  tcg_gen_qemu_st_i64
151 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i64
152 #define tcg_gen_trunc_reg_ptr   tcg_gen_trunc_i64_ptr
153 #else
154 #define TCGv_reg             TCGv_i32
155 #define tcg_temp_new         tcg_temp_new_i32
156 #define tcg_global_mem_new   tcg_global_mem_new_i32
157 #define tcg_temp_local_new   tcg_temp_local_new_i32
158 #define tcg_temp_free        tcg_temp_free_i32
159 
160 #define tcg_gen_movi_reg     tcg_gen_movi_i32
161 #define tcg_gen_mov_reg      tcg_gen_mov_i32
162 #define tcg_gen_ld8u_reg     tcg_gen_ld8u_i32
163 #define tcg_gen_ld8s_reg     tcg_gen_ld8s_i32
164 #define tcg_gen_ld16u_reg    tcg_gen_ld16u_i32
165 #define tcg_gen_ld16s_reg    tcg_gen_ld16s_i32
166 #define tcg_gen_ld32u_reg    tcg_gen_ld_i32
167 #define tcg_gen_ld32s_reg    tcg_gen_ld_i32
168 #define tcg_gen_ld_reg       tcg_gen_ld_i32
169 #define tcg_gen_st8_reg      tcg_gen_st8_i32
170 #define tcg_gen_st16_reg     tcg_gen_st16_i32
171 #define tcg_gen_st32_reg     tcg_gen_st32_i32
172 #define tcg_gen_st_reg       tcg_gen_st_i32
173 #define tcg_gen_add_reg      tcg_gen_add_i32
174 #define tcg_gen_addi_reg     tcg_gen_addi_i32
175 #define tcg_gen_sub_reg      tcg_gen_sub_i32
176 #define tcg_gen_neg_reg      tcg_gen_neg_i32
177 #define tcg_gen_subfi_reg    tcg_gen_subfi_i32
178 #define tcg_gen_subi_reg     tcg_gen_subi_i32
179 #define tcg_gen_and_reg      tcg_gen_and_i32
180 #define tcg_gen_andi_reg     tcg_gen_andi_i32
181 #define tcg_gen_or_reg       tcg_gen_or_i32
182 #define tcg_gen_ori_reg      tcg_gen_ori_i32
183 #define tcg_gen_xor_reg      tcg_gen_xor_i32
184 #define tcg_gen_xori_reg     tcg_gen_xori_i32
185 #define tcg_gen_not_reg      tcg_gen_not_i32
186 #define tcg_gen_shl_reg      tcg_gen_shl_i32
187 #define tcg_gen_shli_reg     tcg_gen_shli_i32
188 #define tcg_gen_shr_reg      tcg_gen_shr_i32
189 #define tcg_gen_shri_reg     tcg_gen_shri_i32
190 #define tcg_gen_sar_reg      tcg_gen_sar_i32
191 #define tcg_gen_sari_reg     tcg_gen_sari_i32
192 #define tcg_gen_brcond_reg   tcg_gen_brcond_i32
193 #define tcg_gen_brcondi_reg  tcg_gen_brcondi_i32
194 #define tcg_gen_setcond_reg  tcg_gen_setcond_i32
195 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i32
196 #define tcg_gen_mul_reg      tcg_gen_mul_i32
197 #define tcg_gen_muli_reg     tcg_gen_muli_i32
198 #define tcg_gen_div_reg      tcg_gen_div_i32
199 #define tcg_gen_rem_reg      tcg_gen_rem_i32
200 #define tcg_gen_divu_reg     tcg_gen_divu_i32
201 #define tcg_gen_remu_reg     tcg_gen_remu_i32
202 #define tcg_gen_discard_reg  tcg_gen_discard_i32
203 #define tcg_gen_trunc_reg_i32 tcg_gen_mov_i32
204 #define tcg_gen_trunc_i64_reg tcg_gen_extrl_i64_i32
205 #define tcg_gen_extu_i32_reg tcg_gen_mov_i32
206 #define tcg_gen_ext_i32_reg  tcg_gen_mov_i32
207 #define tcg_gen_extu_reg_i64 tcg_gen_extu_i32_i64
208 #define tcg_gen_ext_reg_i64  tcg_gen_ext_i32_i64
209 #define tcg_gen_ext8u_reg    tcg_gen_ext8u_i32
210 #define tcg_gen_ext8s_reg    tcg_gen_ext8s_i32
211 #define tcg_gen_ext16u_reg   tcg_gen_ext16u_i32
212 #define tcg_gen_ext16s_reg   tcg_gen_ext16s_i32
213 #define tcg_gen_ext32u_reg   tcg_gen_mov_i32
214 #define tcg_gen_ext32s_reg   tcg_gen_mov_i32
215 #define tcg_gen_bswap16_reg  tcg_gen_bswap16_i32
216 #define tcg_gen_bswap32_reg  tcg_gen_bswap32_i32
217 #define tcg_gen_concat_reg_i64 tcg_gen_concat_i32_i64
218 #define tcg_gen_andc_reg     tcg_gen_andc_i32
219 #define tcg_gen_eqv_reg      tcg_gen_eqv_i32
220 #define tcg_gen_nand_reg     tcg_gen_nand_i32
221 #define tcg_gen_nor_reg      tcg_gen_nor_i32
222 #define tcg_gen_orc_reg      tcg_gen_orc_i32
223 #define tcg_gen_clz_reg      tcg_gen_clz_i32
224 #define tcg_gen_ctz_reg      tcg_gen_ctz_i32
225 #define tcg_gen_clzi_reg     tcg_gen_clzi_i32
226 #define tcg_gen_ctzi_reg     tcg_gen_ctzi_i32
227 #define tcg_gen_clrsb_reg    tcg_gen_clrsb_i32
228 #define tcg_gen_ctpop_reg    tcg_gen_ctpop_i32
229 #define tcg_gen_rotl_reg     tcg_gen_rotl_i32
230 #define tcg_gen_rotli_reg    tcg_gen_rotli_i32
231 #define tcg_gen_rotr_reg     tcg_gen_rotr_i32
232 #define tcg_gen_rotri_reg    tcg_gen_rotri_i32
233 #define tcg_gen_deposit_reg  tcg_gen_deposit_i32
234 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i32
235 #define tcg_gen_extract_reg  tcg_gen_extract_i32
236 #define tcg_gen_sextract_reg tcg_gen_sextract_i32
237 #define tcg_const_reg        tcg_const_i32
238 #define tcg_const_local_reg  tcg_const_local_i32
239 #define tcg_constant_reg     tcg_constant_i32
240 #define tcg_gen_movcond_reg  tcg_gen_movcond_i32
241 #define tcg_gen_add2_reg     tcg_gen_add2_i32
242 #define tcg_gen_sub2_reg     tcg_gen_sub2_i32
243 #define tcg_gen_qemu_ld_reg  tcg_gen_qemu_ld_i32
244 #define tcg_gen_qemu_st_reg  tcg_gen_qemu_st_i32
245 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i32
246 #define tcg_gen_trunc_reg_ptr   tcg_gen_ext_i32_ptr
247 #endif /* TARGET_REGISTER_BITS */
248 
249 typedef struct DisasCond {
250     TCGCond c;
251     TCGv_reg a0, a1;
252 } DisasCond;
253 
254 typedef struct DisasContext {
255     DisasContextBase base;
256     CPUState *cs;
257 
258     target_ureg iaoq_f;
259     target_ureg iaoq_b;
260     target_ureg iaoq_n;
261     TCGv_reg iaoq_n_var;
262 
263     int ntempr, ntempl;
264     TCGv_reg tempr[8];
265     TCGv_tl  templ[4];
266 
267     DisasCond null_cond;
268     TCGLabel *null_lab;
269 
270     uint32_t insn;
271     uint32_t tb_flags;
272     int mmu_idx;
273     int privilege;
274     bool psw_n_nonzero;
275 } DisasContext;
276 
277 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently.  */
278 static int expand_sm_imm(DisasContext *ctx, int val)
279 {
280     if (val & PSW_SM_E) {
281         val = (val & ~PSW_SM_E) | PSW_E;
282     }
283     if (val & PSW_SM_W) {
284         val = (val & ~PSW_SM_W) | PSW_W;
285     }
286     return val;
287 }
288 
289 /* Inverted space register indicates 0 means sr0 not inferred from base.  */
290 static int expand_sr3x(DisasContext *ctx, int val)
291 {
292     return ~val;
293 }
294 
295 /* Convert the M:A bits within a memory insn to the tri-state value
296    we use for the final M.  */
297 static int ma_to_m(DisasContext *ctx, int val)
298 {
299     return val & 2 ? (val & 1 ? -1 : 1) : 0;
300 }
301 
302 /* Convert the sign of the displacement to a pre or post-modify.  */
303 static int pos_to_m(DisasContext *ctx, int val)
304 {
305     return val ? 1 : -1;
306 }
307 
308 static int neg_to_m(DisasContext *ctx, int val)
309 {
310     return val ? -1 : 1;
311 }
312 
313 /* Used for branch targets and fp memory ops.  */
314 static int expand_shl2(DisasContext *ctx, int val)
315 {
316     return val << 2;
317 }
318 
319 /* Used for fp memory ops.  */
320 static int expand_shl3(DisasContext *ctx, int val)
321 {
322     return val << 3;
323 }
324 
325 /* Used for assemble_21.  */
326 static int expand_shl11(DisasContext *ctx, int val)
327 {
328     return val << 11;
329 }
330 
331 
332 /* Include the auto-generated decoder.  */
333 #include "decode-insns.c.inc"
334 
335 /* We are not using a goto_tb (for whatever reason), but have updated
336    the iaq (for whatever reason), so don't do it again on exit.  */
337 #define DISAS_IAQ_N_UPDATED  DISAS_TARGET_0
338 
339 /* We are exiting the TB, but have neither emitted a goto_tb, nor
340    updated the iaq for the next instruction to be executed.  */
341 #define DISAS_IAQ_N_STALE    DISAS_TARGET_1
342 
343 /* Similarly, but we want to return to the main loop immediately
344    to recognize unmasked interrupts.  */
345 #define DISAS_IAQ_N_STALE_EXIT      DISAS_TARGET_2
346 #define DISAS_EXIT                  DISAS_TARGET_3
347 
348 /* global register indexes */
349 static TCGv_reg cpu_gr[32];
350 static TCGv_i64 cpu_sr[4];
351 static TCGv_i64 cpu_srH;
352 static TCGv_reg cpu_iaoq_f;
353 static TCGv_reg cpu_iaoq_b;
354 static TCGv_i64 cpu_iasq_f;
355 static TCGv_i64 cpu_iasq_b;
356 static TCGv_reg cpu_sar;
357 static TCGv_reg cpu_psw_n;
358 static TCGv_reg cpu_psw_v;
359 static TCGv_reg cpu_psw_cb;
360 static TCGv_reg cpu_psw_cb_msb;
361 
362 #include "exec/gen-icount.h"
363 
364 void hppa_translate_init(void)
365 {
366 #define DEF_VAR(V)  { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
367 
368     typedef struct { TCGv_reg *var; const char *name; int ofs; } GlobalVar;
369     static const GlobalVar vars[] = {
370         { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
371         DEF_VAR(psw_n),
372         DEF_VAR(psw_v),
373         DEF_VAR(psw_cb),
374         DEF_VAR(psw_cb_msb),
375         DEF_VAR(iaoq_f),
376         DEF_VAR(iaoq_b),
377     };
378 
379 #undef DEF_VAR
380 
381     /* Use the symbolic register names that match the disassembler.  */
382     static const char gr_names[32][4] = {
383         "r0",  "r1",  "r2",  "r3",  "r4",  "r5",  "r6",  "r7",
384         "r8",  "r9",  "r10", "r11", "r12", "r13", "r14", "r15",
385         "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
386         "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
387     };
388     /* SR[4-7] are not global registers so that we can index them.  */
389     static const char sr_names[5][4] = {
390         "sr0", "sr1", "sr2", "sr3", "srH"
391     };
392 
393     int i;
394 
395     cpu_gr[0] = NULL;
396     for (i = 1; i < 32; i++) {
397         cpu_gr[i] = tcg_global_mem_new(cpu_env,
398                                        offsetof(CPUHPPAState, gr[i]),
399                                        gr_names[i]);
400     }
401     for (i = 0; i < 4; i++) {
402         cpu_sr[i] = tcg_global_mem_new_i64(cpu_env,
403                                            offsetof(CPUHPPAState, sr[i]),
404                                            sr_names[i]);
405     }
406     cpu_srH = tcg_global_mem_new_i64(cpu_env,
407                                      offsetof(CPUHPPAState, sr[4]),
408                                      sr_names[4]);
409 
410     for (i = 0; i < ARRAY_SIZE(vars); ++i) {
411         const GlobalVar *v = &vars[i];
412         *v->var = tcg_global_mem_new(cpu_env, v->ofs, v->name);
413     }
414 
415     cpu_iasq_f = tcg_global_mem_new_i64(cpu_env,
416                                         offsetof(CPUHPPAState, iasq_f),
417                                         "iasq_f");
418     cpu_iasq_b = tcg_global_mem_new_i64(cpu_env,
419                                         offsetof(CPUHPPAState, iasq_b),
420                                         "iasq_b");
421 }
422 
423 static DisasCond cond_make_f(void)
424 {
425     return (DisasCond){
426         .c = TCG_COND_NEVER,
427         .a0 = NULL,
428         .a1 = NULL,
429     };
430 }
431 
432 static DisasCond cond_make_t(void)
433 {
434     return (DisasCond){
435         .c = TCG_COND_ALWAYS,
436         .a0 = NULL,
437         .a1 = NULL,
438     };
439 }
440 
441 static DisasCond cond_make_n(void)
442 {
443     return (DisasCond){
444         .c = TCG_COND_NE,
445         .a0 = cpu_psw_n,
446         .a1 = tcg_constant_reg(0)
447     };
448 }
449 
450 static DisasCond cond_make_0_tmp(TCGCond c, TCGv_reg a0)
451 {
452     assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
453     return (DisasCond){
454         .c = c, .a0 = a0, .a1 = tcg_constant_reg(0)
455     };
456 }
457 
458 static DisasCond cond_make_0(TCGCond c, TCGv_reg a0)
459 {
460     TCGv_reg tmp = tcg_temp_new();
461     tcg_gen_mov_reg(tmp, a0);
462     return cond_make_0_tmp(c, tmp);
463 }
464 
465 static DisasCond cond_make(TCGCond c, TCGv_reg a0, TCGv_reg a1)
466 {
467     DisasCond r = { .c = c };
468 
469     assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
470     r.a0 = tcg_temp_new();
471     tcg_gen_mov_reg(r.a0, a0);
472     r.a1 = tcg_temp_new();
473     tcg_gen_mov_reg(r.a1, a1);
474 
475     return r;
476 }
477 
478 static void cond_free(DisasCond *cond)
479 {
480     switch (cond->c) {
481     default:
482         if (cond->a0 != cpu_psw_n) {
483             tcg_temp_free(cond->a0);
484         }
485         tcg_temp_free(cond->a1);
486         cond->a0 = NULL;
487         cond->a1 = NULL;
488         /* fallthru */
489     case TCG_COND_ALWAYS:
490         cond->c = TCG_COND_NEVER;
491         break;
492     case TCG_COND_NEVER:
493         break;
494     }
495 }
496 
497 static TCGv_reg get_temp(DisasContext *ctx)
498 {
499     unsigned i = ctx->ntempr++;
500     g_assert(i < ARRAY_SIZE(ctx->tempr));
501     return ctx->tempr[i] = tcg_temp_new();
502 }
503 
504 #ifndef CONFIG_USER_ONLY
505 static TCGv_tl get_temp_tl(DisasContext *ctx)
506 {
507     unsigned i = ctx->ntempl++;
508     g_assert(i < ARRAY_SIZE(ctx->templ));
509     return ctx->templ[i] = tcg_temp_new_tl();
510 }
511 #endif
512 
513 static TCGv_reg load_const(DisasContext *ctx, target_sreg v)
514 {
515     TCGv_reg t = get_temp(ctx);
516     tcg_gen_movi_reg(t, v);
517     return t;
518 }
519 
520 static TCGv_reg load_gpr(DisasContext *ctx, unsigned reg)
521 {
522     if (reg == 0) {
523         TCGv_reg t = get_temp(ctx);
524         tcg_gen_movi_reg(t, 0);
525         return t;
526     } else {
527         return cpu_gr[reg];
528     }
529 }
530 
531 static TCGv_reg dest_gpr(DisasContext *ctx, unsigned reg)
532 {
533     if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
534         return get_temp(ctx);
535     } else {
536         return cpu_gr[reg];
537     }
538 }
539 
540 static void save_or_nullify(DisasContext *ctx, TCGv_reg dest, TCGv_reg t)
541 {
542     if (ctx->null_cond.c != TCG_COND_NEVER) {
543         tcg_gen_movcond_reg(ctx->null_cond.c, dest, ctx->null_cond.a0,
544                             ctx->null_cond.a1, dest, t);
545     } else {
546         tcg_gen_mov_reg(dest, t);
547     }
548 }
549 
550 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_reg t)
551 {
552     if (reg != 0) {
553         save_or_nullify(ctx, cpu_gr[reg], t);
554     }
555 }
556 
557 #ifdef HOST_WORDS_BIGENDIAN
558 # define HI_OFS  0
559 # define LO_OFS  4
560 #else
561 # define HI_OFS  4
562 # define LO_OFS  0
563 #endif
564 
565 static TCGv_i32 load_frw_i32(unsigned rt)
566 {
567     TCGv_i32 ret = tcg_temp_new_i32();
568     tcg_gen_ld_i32(ret, cpu_env,
569                    offsetof(CPUHPPAState, fr[rt & 31])
570                    + (rt & 32 ? LO_OFS : HI_OFS));
571     return ret;
572 }
573 
574 static TCGv_i32 load_frw0_i32(unsigned rt)
575 {
576     if (rt == 0) {
577         return tcg_const_i32(0);
578     } else {
579         return load_frw_i32(rt);
580     }
581 }
582 
583 static TCGv_i64 load_frw0_i64(unsigned rt)
584 {
585     if (rt == 0) {
586         return tcg_const_i64(0);
587     } else {
588         TCGv_i64 ret = tcg_temp_new_i64();
589         tcg_gen_ld32u_i64(ret, cpu_env,
590                           offsetof(CPUHPPAState, fr[rt & 31])
591                           + (rt & 32 ? LO_OFS : HI_OFS));
592         return ret;
593     }
594 }
595 
596 static void save_frw_i32(unsigned rt, TCGv_i32 val)
597 {
598     tcg_gen_st_i32(val, cpu_env,
599                    offsetof(CPUHPPAState, fr[rt & 31])
600                    + (rt & 32 ? LO_OFS : HI_OFS));
601 }
602 
603 #undef HI_OFS
604 #undef LO_OFS
605 
606 static TCGv_i64 load_frd(unsigned rt)
607 {
608     TCGv_i64 ret = tcg_temp_new_i64();
609     tcg_gen_ld_i64(ret, cpu_env, offsetof(CPUHPPAState, fr[rt]));
610     return ret;
611 }
612 
613 static TCGv_i64 load_frd0(unsigned rt)
614 {
615     if (rt == 0) {
616         return tcg_const_i64(0);
617     } else {
618         return load_frd(rt);
619     }
620 }
621 
622 static void save_frd(unsigned rt, TCGv_i64 val)
623 {
624     tcg_gen_st_i64(val, cpu_env, offsetof(CPUHPPAState, fr[rt]));
625 }
626 
627 static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
628 {
629 #ifdef CONFIG_USER_ONLY
630     tcg_gen_movi_i64(dest, 0);
631 #else
632     if (reg < 4) {
633         tcg_gen_mov_i64(dest, cpu_sr[reg]);
634     } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
635         tcg_gen_mov_i64(dest, cpu_srH);
636     } else {
637         tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUHPPAState, sr[reg]));
638     }
639 #endif
640 }
641 
642 /* Skip over the implementation of an insn that has been nullified.
643    Use this when the insn is too complex for a conditional move.  */
644 static void nullify_over(DisasContext *ctx)
645 {
646     if (ctx->null_cond.c != TCG_COND_NEVER) {
647         /* The always condition should have been handled in the main loop.  */
648         assert(ctx->null_cond.c != TCG_COND_ALWAYS);
649 
650         ctx->null_lab = gen_new_label();
651 
652         /* If we're using PSW[N], copy it to a temp because... */
653         if (ctx->null_cond.a0 == cpu_psw_n) {
654             ctx->null_cond.a0 = tcg_temp_new();
655             tcg_gen_mov_reg(ctx->null_cond.a0, cpu_psw_n);
656         }
657         /* ... we clear it before branching over the implementation,
658            so that (1) it's clear after nullifying this insn and
659            (2) if this insn nullifies the next, PSW[N] is valid.  */
660         if (ctx->psw_n_nonzero) {
661             ctx->psw_n_nonzero = false;
662             tcg_gen_movi_reg(cpu_psw_n, 0);
663         }
664 
665         tcg_gen_brcond_reg(ctx->null_cond.c, ctx->null_cond.a0,
666                            ctx->null_cond.a1, ctx->null_lab);
667         cond_free(&ctx->null_cond);
668     }
669 }
670 
671 /* Save the current nullification state to PSW[N].  */
672 static void nullify_save(DisasContext *ctx)
673 {
674     if (ctx->null_cond.c == TCG_COND_NEVER) {
675         if (ctx->psw_n_nonzero) {
676             tcg_gen_movi_reg(cpu_psw_n, 0);
677         }
678         return;
679     }
680     if (ctx->null_cond.a0 != cpu_psw_n) {
681         tcg_gen_setcond_reg(ctx->null_cond.c, cpu_psw_n,
682                             ctx->null_cond.a0, ctx->null_cond.a1);
683         ctx->psw_n_nonzero = true;
684     }
685     cond_free(&ctx->null_cond);
686 }
687 
688 /* Set a PSW[N] to X.  The intention is that this is used immediately
689    before a goto_tb/exit_tb, so that there is no fallthru path to other
690    code within the TB.  Therefore we do not update psw_n_nonzero.  */
691 static void nullify_set(DisasContext *ctx, bool x)
692 {
693     if (ctx->psw_n_nonzero || x) {
694         tcg_gen_movi_reg(cpu_psw_n, x);
695     }
696 }
697 
698 /* Mark the end of an instruction that may have been nullified.
699    This is the pair to nullify_over.  Always returns true so that
700    it may be tail-called from a translate function.  */
701 static bool nullify_end(DisasContext *ctx)
702 {
703     TCGLabel *null_lab = ctx->null_lab;
704     DisasJumpType status = ctx->base.is_jmp;
705 
706     /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
707        For UPDATED, we cannot update on the nullified path.  */
708     assert(status != DISAS_IAQ_N_UPDATED);
709 
710     if (likely(null_lab == NULL)) {
711         /* The current insn wasn't conditional or handled the condition
712            applied to it without a branch, so the (new) setting of
713            NULL_COND can be applied directly to the next insn.  */
714         return true;
715     }
716     ctx->null_lab = NULL;
717 
718     if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
719         /* The next instruction will be unconditional,
720            and NULL_COND already reflects that.  */
721         gen_set_label(null_lab);
722     } else {
723         /* The insn that we just executed is itself nullifying the next
724            instruction.  Store the condition in the PSW[N] global.
725            We asserted PSW[N] = 0 in nullify_over, so that after the
726            label we have the proper value in place.  */
727         nullify_save(ctx);
728         gen_set_label(null_lab);
729         ctx->null_cond = cond_make_n();
730     }
731     if (status == DISAS_NORETURN) {
732         ctx->base.is_jmp = DISAS_NEXT;
733     }
734     return true;
735 }
736 
737 static void copy_iaoq_entry(TCGv_reg dest, target_ureg ival, TCGv_reg vval)
738 {
739     if (unlikely(ival == -1)) {
740         tcg_gen_mov_reg(dest, vval);
741     } else {
742         tcg_gen_movi_reg(dest, ival);
743     }
744 }
745 
746 static inline target_ureg iaoq_dest(DisasContext *ctx, target_sreg disp)
747 {
748     return ctx->iaoq_f + disp + 8;
749 }
750 
751 static void gen_excp_1(int exception)
752 {
753     gen_helper_excp(cpu_env, tcg_constant_i32(exception));
754 }
755 
756 static void gen_excp(DisasContext *ctx, int exception)
757 {
758     copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
759     copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
760     nullify_save(ctx);
761     gen_excp_1(exception);
762     ctx->base.is_jmp = DISAS_NORETURN;
763 }
764 
765 static bool gen_excp_iir(DisasContext *ctx, int exc)
766 {
767     nullify_over(ctx);
768     tcg_gen_st_reg(tcg_constant_reg(ctx->insn),
769                    cpu_env, offsetof(CPUHPPAState, cr[CR_IIR]));
770     gen_excp(ctx, exc);
771     return nullify_end(ctx);
772 }
773 
774 static bool gen_illegal(DisasContext *ctx)
775 {
776     return gen_excp_iir(ctx, EXCP_ILL);
777 }
778 
779 #ifdef CONFIG_USER_ONLY
780 #define CHECK_MOST_PRIVILEGED(EXCP) \
781     return gen_excp_iir(ctx, EXCP)
782 #else
783 #define CHECK_MOST_PRIVILEGED(EXCP) \
784     do {                                     \
785         if (ctx->privilege != 0) {           \
786             return gen_excp_iir(ctx, EXCP);  \
787         }                                    \
788     } while (0)
789 #endif
790 
791 static bool use_goto_tb(DisasContext *ctx, target_ureg dest)
792 {
793     return translator_use_goto_tb(&ctx->base, dest);
794 }
795 
796 /* If the next insn is to be nullified, and it's on the same page,
797    and we're not attempting to set a breakpoint on it, then we can
798    totally skip the nullified insn.  This avoids creating and
799    executing a TB that merely branches to the next TB.  */
800 static bool use_nullify_skip(DisasContext *ctx)
801 {
802     return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
803             && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
804 }
805 
806 static void gen_goto_tb(DisasContext *ctx, int which,
807                         target_ureg f, target_ureg b)
808 {
809     if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
810         tcg_gen_goto_tb(which);
811         tcg_gen_movi_reg(cpu_iaoq_f, f);
812         tcg_gen_movi_reg(cpu_iaoq_b, b);
813         tcg_gen_exit_tb(ctx->base.tb, which);
814     } else {
815         copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b);
816         copy_iaoq_entry(cpu_iaoq_b, b, ctx->iaoq_n_var);
817         if (ctx->base.singlestep_enabled) {
818             gen_excp_1(EXCP_DEBUG);
819         } else {
820             tcg_gen_lookup_and_goto_ptr();
821         }
822     }
823 }
824 
825 static bool cond_need_sv(int c)
826 {
827     return c == 2 || c == 3 || c == 6;
828 }
829 
830 static bool cond_need_cb(int c)
831 {
832     return c == 4 || c == 5;
833 }
834 
835 /*
836  * Compute conditional for arithmetic.  See Page 5-3, Table 5-1, of
837  * the Parisc 1.1 Architecture Reference Manual for details.
838  */
839 
840 static DisasCond do_cond(unsigned cf, TCGv_reg res,
841                          TCGv_reg cb_msb, TCGv_reg sv)
842 {
843     DisasCond cond;
844     TCGv_reg tmp;
845 
846     switch (cf >> 1) {
847     case 0: /* Never / TR    (0 / 1) */
848         cond = cond_make_f();
849         break;
850     case 1: /* = / <>        (Z / !Z) */
851         cond = cond_make_0(TCG_COND_EQ, res);
852         break;
853     case 2: /* < / >=        (N ^ V / !(N ^ V) */
854         tmp = tcg_temp_new();
855         tcg_gen_xor_reg(tmp, res, sv);
856         cond = cond_make_0_tmp(TCG_COND_LT, tmp);
857         break;
858     case 3: /* <= / >        (N ^ V) | Z / !((N ^ V) | Z) */
859         /*
860          * Simplify:
861          *   (N ^ V) | Z
862          *   ((res < 0) ^ (sv < 0)) | !res
863          *   ((res ^ sv) < 0) | !res
864          *   (~(res ^ sv) >= 0) | !res
865          *   !(~(res ^ sv) >> 31) | !res
866          *   !(~(res ^ sv) >> 31 & res)
867          */
868         tmp = tcg_temp_new();
869         tcg_gen_eqv_reg(tmp, res, sv);
870         tcg_gen_sari_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
871         tcg_gen_and_reg(tmp, tmp, res);
872         cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
873         break;
874     case 4: /* NUV / UV      (!C / C) */
875         cond = cond_make_0(TCG_COND_EQ, cb_msb);
876         break;
877     case 5: /* ZNV / VNZ     (!C | Z / C & !Z) */
878         tmp = tcg_temp_new();
879         tcg_gen_neg_reg(tmp, cb_msb);
880         tcg_gen_and_reg(tmp, tmp, res);
881         cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
882         break;
883     case 6: /* SV / NSV      (V / !V) */
884         cond = cond_make_0(TCG_COND_LT, sv);
885         break;
886     case 7: /* OD / EV */
887         tmp = tcg_temp_new();
888         tcg_gen_andi_reg(tmp, res, 1);
889         cond = cond_make_0_tmp(TCG_COND_NE, tmp);
890         break;
891     default:
892         g_assert_not_reached();
893     }
894     if (cf & 1) {
895         cond.c = tcg_invert_cond(cond.c);
896     }
897 
898     return cond;
899 }
900 
901 /* Similar, but for the special case of subtraction without borrow, we
902    can use the inputs directly.  This can allow other computation to be
903    deleted as unused.  */
904 
905 static DisasCond do_sub_cond(unsigned cf, TCGv_reg res,
906                              TCGv_reg in1, TCGv_reg in2, TCGv_reg sv)
907 {
908     DisasCond cond;
909 
910     switch (cf >> 1) {
911     case 1: /* = / <> */
912         cond = cond_make(TCG_COND_EQ, in1, in2);
913         break;
914     case 2: /* < / >= */
915         cond = cond_make(TCG_COND_LT, in1, in2);
916         break;
917     case 3: /* <= / > */
918         cond = cond_make(TCG_COND_LE, in1, in2);
919         break;
920     case 4: /* << / >>= */
921         cond = cond_make(TCG_COND_LTU, in1, in2);
922         break;
923     case 5: /* <<= / >> */
924         cond = cond_make(TCG_COND_LEU, in1, in2);
925         break;
926     default:
927         return do_cond(cf, res, NULL, sv);
928     }
929     if (cf & 1) {
930         cond.c = tcg_invert_cond(cond.c);
931     }
932 
933     return cond;
934 }
935 
936 /*
937  * Similar, but for logicals, where the carry and overflow bits are not
938  * computed, and use of them is undefined.
939  *
940  * Undefined or not, hardware does not trap.  It seems reasonable to
941  * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's
942  * how cases c={2,3} are treated.
943  */
944 
945 static DisasCond do_log_cond(unsigned cf, TCGv_reg res)
946 {
947     switch (cf) {
948     case 0:  /* never */
949     case 9:  /* undef, C */
950     case 11: /* undef, C & !Z */
951     case 12: /* undef, V */
952         return cond_make_f();
953 
954     case 1:  /* true */
955     case 8:  /* undef, !C */
956     case 10: /* undef, !C | Z */
957     case 13: /* undef, !V */
958         return cond_make_t();
959 
960     case 2:  /* == */
961         return cond_make_0(TCG_COND_EQ, res);
962     case 3:  /* <> */
963         return cond_make_0(TCG_COND_NE, res);
964     case 4:  /* < */
965         return cond_make_0(TCG_COND_LT, res);
966     case 5:  /* >= */
967         return cond_make_0(TCG_COND_GE, res);
968     case 6:  /* <= */
969         return cond_make_0(TCG_COND_LE, res);
970     case 7:  /* > */
971         return cond_make_0(TCG_COND_GT, res);
972 
973     case 14: /* OD */
974     case 15: /* EV */
975         return do_cond(cf, res, NULL, NULL);
976 
977     default:
978         g_assert_not_reached();
979     }
980 }
981 
982 /* Similar, but for shift/extract/deposit conditions.  */
983 
984 static DisasCond do_sed_cond(unsigned orig, TCGv_reg res)
985 {
986     unsigned c, f;
987 
988     /* Convert the compressed condition codes to standard.
989        0-2 are the same as logicals (nv,<,<=), while 3 is OD.
990        4-7 are the reverse of 0-3.  */
991     c = orig & 3;
992     if (c == 3) {
993         c = 7;
994     }
995     f = (orig & 4) / 4;
996 
997     return do_log_cond(c * 2 + f, res);
998 }
999 
1000 /* Similar, but for unit conditions.  */
1001 
1002 static DisasCond do_unit_cond(unsigned cf, TCGv_reg res,
1003                               TCGv_reg in1, TCGv_reg in2)
1004 {
1005     DisasCond cond;
1006     TCGv_reg tmp, cb = NULL;
1007 
1008     if (cf & 8) {
1009         /* Since we want to test lots of carry-out bits all at once, do not
1010          * do our normal thing and compute carry-in of bit B+1 since that
1011          * leaves us with carry bits spread across two words.
1012          */
1013         cb = tcg_temp_new();
1014         tmp = tcg_temp_new();
1015         tcg_gen_or_reg(cb, in1, in2);
1016         tcg_gen_and_reg(tmp, in1, in2);
1017         tcg_gen_andc_reg(cb, cb, res);
1018         tcg_gen_or_reg(cb, cb, tmp);
1019         tcg_temp_free(tmp);
1020     }
1021 
1022     switch (cf >> 1) {
1023     case 0: /* never / TR */
1024     case 1: /* undefined */
1025     case 5: /* undefined */
1026         cond = cond_make_f();
1027         break;
1028 
1029     case 2: /* SBZ / NBZ */
1030         /* See hasless(v,1) from
1031          * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
1032          */
1033         tmp = tcg_temp_new();
1034         tcg_gen_subi_reg(tmp, res, 0x01010101u);
1035         tcg_gen_andc_reg(tmp, tmp, res);
1036         tcg_gen_andi_reg(tmp, tmp, 0x80808080u);
1037         cond = cond_make_0(TCG_COND_NE, tmp);
1038         tcg_temp_free(tmp);
1039         break;
1040 
1041     case 3: /* SHZ / NHZ */
1042         tmp = tcg_temp_new();
1043         tcg_gen_subi_reg(tmp, res, 0x00010001u);
1044         tcg_gen_andc_reg(tmp, tmp, res);
1045         tcg_gen_andi_reg(tmp, tmp, 0x80008000u);
1046         cond = cond_make_0(TCG_COND_NE, tmp);
1047         tcg_temp_free(tmp);
1048         break;
1049 
1050     case 4: /* SDC / NDC */
1051         tcg_gen_andi_reg(cb, cb, 0x88888888u);
1052         cond = cond_make_0(TCG_COND_NE, cb);
1053         break;
1054 
1055     case 6: /* SBC / NBC */
1056         tcg_gen_andi_reg(cb, cb, 0x80808080u);
1057         cond = cond_make_0(TCG_COND_NE, cb);
1058         break;
1059 
1060     case 7: /* SHC / NHC */
1061         tcg_gen_andi_reg(cb, cb, 0x80008000u);
1062         cond = cond_make_0(TCG_COND_NE, cb);
1063         break;
1064 
1065     default:
1066         g_assert_not_reached();
1067     }
1068     if (cf & 8) {
1069         tcg_temp_free(cb);
1070     }
1071     if (cf & 1) {
1072         cond.c = tcg_invert_cond(cond.c);
1073     }
1074 
1075     return cond;
1076 }
1077 
1078 /* Compute signed overflow for addition.  */
1079 static TCGv_reg do_add_sv(DisasContext *ctx, TCGv_reg res,
1080                           TCGv_reg in1, TCGv_reg in2)
1081 {
1082     TCGv_reg sv = get_temp(ctx);
1083     TCGv_reg tmp = tcg_temp_new();
1084 
1085     tcg_gen_xor_reg(sv, res, in1);
1086     tcg_gen_xor_reg(tmp, in1, in2);
1087     tcg_gen_andc_reg(sv, sv, tmp);
1088     tcg_temp_free(tmp);
1089 
1090     return sv;
1091 }
1092 
1093 /* Compute signed overflow for subtraction.  */
1094 static TCGv_reg do_sub_sv(DisasContext *ctx, TCGv_reg res,
1095                           TCGv_reg in1, TCGv_reg in2)
1096 {
1097     TCGv_reg sv = get_temp(ctx);
1098     TCGv_reg tmp = tcg_temp_new();
1099 
1100     tcg_gen_xor_reg(sv, res, in1);
1101     tcg_gen_xor_reg(tmp, in1, in2);
1102     tcg_gen_and_reg(sv, sv, tmp);
1103     tcg_temp_free(tmp);
1104 
1105     return sv;
1106 }
1107 
1108 static void do_add(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1109                    TCGv_reg in2, unsigned shift, bool is_l,
1110                    bool is_tsv, bool is_tc, bool is_c, unsigned cf)
1111 {
1112     TCGv_reg dest, cb, cb_msb, sv, tmp;
1113     unsigned c = cf >> 1;
1114     DisasCond cond;
1115 
1116     dest = tcg_temp_new();
1117     cb = NULL;
1118     cb_msb = NULL;
1119 
1120     if (shift) {
1121         tmp = get_temp(ctx);
1122         tcg_gen_shli_reg(tmp, in1, shift);
1123         in1 = tmp;
1124     }
1125 
1126     if (!is_l || cond_need_cb(c)) {
1127         TCGv_reg zero = tcg_constant_reg(0);
1128         cb_msb = get_temp(ctx);
1129         tcg_gen_add2_reg(dest, cb_msb, in1, zero, in2, zero);
1130         if (is_c) {
1131             tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cpu_psw_cb_msb, zero);
1132         }
1133         if (!is_l) {
1134             cb = get_temp(ctx);
1135             tcg_gen_xor_reg(cb, in1, in2);
1136             tcg_gen_xor_reg(cb, cb, dest);
1137         }
1138     } else {
1139         tcg_gen_add_reg(dest, in1, in2);
1140         if (is_c) {
1141             tcg_gen_add_reg(dest, dest, cpu_psw_cb_msb);
1142         }
1143     }
1144 
1145     /* Compute signed overflow if required.  */
1146     sv = NULL;
1147     if (is_tsv || cond_need_sv(c)) {
1148         sv = do_add_sv(ctx, dest, in1, in2);
1149         if (is_tsv) {
1150             /* ??? Need to include overflow from shift.  */
1151             gen_helper_tsv(cpu_env, sv);
1152         }
1153     }
1154 
1155     /* Emit any conditional trap before any writeback.  */
1156     cond = do_cond(cf, dest, cb_msb, sv);
1157     if (is_tc) {
1158         tmp = tcg_temp_new();
1159         tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1160         gen_helper_tcond(cpu_env, tmp);
1161         tcg_temp_free(tmp);
1162     }
1163 
1164     /* Write back the result.  */
1165     if (!is_l) {
1166         save_or_nullify(ctx, cpu_psw_cb, cb);
1167         save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1168     }
1169     save_gpr(ctx, rt, dest);
1170     tcg_temp_free(dest);
1171 
1172     /* Install the new nullification.  */
1173     cond_free(&ctx->null_cond);
1174     ctx->null_cond = cond;
1175 }
1176 
1177 static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_sh *a,
1178                        bool is_l, bool is_tsv, bool is_tc, bool is_c)
1179 {
1180     TCGv_reg tcg_r1, tcg_r2;
1181 
1182     if (a->cf) {
1183         nullify_over(ctx);
1184     }
1185     tcg_r1 = load_gpr(ctx, a->r1);
1186     tcg_r2 = load_gpr(ctx, a->r2);
1187     do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l, is_tsv, is_tc, is_c, a->cf);
1188     return nullify_end(ctx);
1189 }
1190 
1191 static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a,
1192                        bool is_tsv, bool is_tc)
1193 {
1194     TCGv_reg tcg_im, tcg_r2;
1195 
1196     if (a->cf) {
1197         nullify_over(ctx);
1198     }
1199     tcg_im = load_const(ctx, a->i);
1200     tcg_r2 = load_gpr(ctx, a->r);
1201     do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf);
1202     return nullify_end(ctx);
1203 }
1204 
1205 static void do_sub(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1206                    TCGv_reg in2, bool is_tsv, bool is_b,
1207                    bool is_tc, unsigned cf)
1208 {
1209     TCGv_reg dest, sv, cb, cb_msb, zero, tmp;
1210     unsigned c = cf >> 1;
1211     DisasCond cond;
1212 
1213     dest = tcg_temp_new();
1214     cb = tcg_temp_new();
1215     cb_msb = tcg_temp_new();
1216 
1217     zero = tcg_constant_reg(0);
1218     if (is_b) {
1219         /* DEST,C = IN1 + ~IN2 + C.  */
1220         tcg_gen_not_reg(cb, in2);
1221         tcg_gen_add2_reg(dest, cb_msb, in1, zero, cpu_psw_cb_msb, zero);
1222         tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cb, zero);
1223         tcg_gen_xor_reg(cb, cb, in1);
1224         tcg_gen_xor_reg(cb, cb, dest);
1225     } else {
1226         /* DEST,C = IN1 + ~IN2 + 1.  We can produce the same result in fewer
1227            operations by seeding the high word with 1 and subtracting.  */
1228         tcg_gen_movi_reg(cb_msb, 1);
1229         tcg_gen_sub2_reg(dest, cb_msb, in1, cb_msb, in2, zero);
1230         tcg_gen_eqv_reg(cb, in1, in2);
1231         tcg_gen_xor_reg(cb, cb, dest);
1232     }
1233 
1234     /* Compute signed overflow if required.  */
1235     sv = NULL;
1236     if (is_tsv || cond_need_sv(c)) {
1237         sv = do_sub_sv(ctx, dest, in1, in2);
1238         if (is_tsv) {
1239             gen_helper_tsv(cpu_env, sv);
1240         }
1241     }
1242 
1243     /* Compute the condition.  We cannot use the special case for borrow.  */
1244     if (!is_b) {
1245         cond = do_sub_cond(cf, dest, in1, in2, sv);
1246     } else {
1247         cond = do_cond(cf, dest, cb_msb, sv);
1248     }
1249 
1250     /* Emit any conditional trap before any writeback.  */
1251     if (is_tc) {
1252         tmp = tcg_temp_new();
1253         tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1254         gen_helper_tcond(cpu_env, tmp);
1255         tcg_temp_free(tmp);
1256     }
1257 
1258     /* Write back the result.  */
1259     save_or_nullify(ctx, cpu_psw_cb, cb);
1260     save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1261     save_gpr(ctx, rt, dest);
1262     tcg_temp_free(dest);
1263     tcg_temp_free(cb);
1264     tcg_temp_free(cb_msb);
1265 
1266     /* Install the new nullification.  */
1267     cond_free(&ctx->null_cond);
1268     ctx->null_cond = cond;
1269 }
1270 
1271 static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf *a,
1272                        bool is_tsv, bool is_b, bool is_tc)
1273 {
1274     TCGv_reg tcg_r1, tcg_r2;
1275 
1276     if (a->cf) {
1277         nullify_over(ctx);
1278     }
1279     tcg_r1 = load_gpr(ctx, a->r1);
1280     tcg_r2 = load_gpr(ctx, a->r2);
1281     do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf);
1282     return nullify_end(ctx);
1283 }
1284 
1285 static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv)
1286 {
1287     TCGv_reg tcg_im, tcg_r2;
1288 
1289     if (a->cf) {
1290         nullify_over(ctx);
1291     }
1292     tcg_im = load_const(ctx, a->i);
1293     tcg_r2 = load_gpr(ctx, a->r);
1294     do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf);
1295     return nullify_end(ctx);
1296 }
1297 
1298 static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1299                       TCGv_reg in2, unsigned cf)
1300 {
1301     TCGv_reg dest, sv;
1302     DisasCond cond;
1303 
1304     dest = tcg_temp_new();
1305     tcg_gen_sub_reg(dest, in1, in2);
1306 
1307     /* Compute signed overflow if required.  */
1308     sv = NULL;
1309     if (cond_need_sv(cf >> 1)) {
1310         sv = do_sub_sv(ctx, dest, in1, in2);
1311     }
1312 
1313     /* Form the condition for the compare.  */
1314     cond = do_sub_cond(cf, dest, in1, in2, sv);
1315 
1316     /* Clear.  */
1317     tcg_gen_movi_reg(dest, 0);
1318     save_gpr(ctx, rt, dest);
1319     tcg_temp_free(dest);
1320 
1321     /* Install the new nullification.  */
1322     cond_free(&ctx->null_cond);
1323     ctx->null_cond = cond;
1324 }
1325 
1326 static void do_log(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1327                    TCGv_reg in2, unsigned cf,
1328                    void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1329 {
1330     TCGv_reg dest = dest_gpr(ctx, rt);
1331 
1332     /* Perform the operation, and writeback.  */
1333     fn(dest, in1, in2);
1334     save_gpr(ctx, rt, dest);
1335 
1336     /* Install the new nullification.  */
1337     cond_free(&ctx->null_cond);
1338     if (cf) {
1339         ctx->null_cond = do_log_cond(cf, dest);
1340     }
1341 }
1342 
1343 static bool do_log_reg(DisasContext *ctx, arg_rrr_cf *a,
1344                        void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1345 {
1346     TCGv_reg tcg_r1, tcg_r2;
1347 
1348     if (a->cf) {
1349         nullify_over(ctx);
1350     }
1351     tcg_r1 = load_gpr(ctx, a->r1);
1352     tcg_r2 = load_gpr(ctx, a->r2);
1353     do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, fn);
1354     return nullify_end(ctx);
1355 }
1356 
1357 static void do_unit(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1358                     TCGv_reg in2, unsigned cf, bool is_tc,
1359                     void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1360 {
1361     TCGv_reg dest;
1362     DisasCond cond;
1363 
1364     if (cf == 0) {
1365         dest = dest_gpr(ctx, rt);
1366         fn(dest, in1, in2);
1367         save_gpr(ctx, rt, dest);
1368         cond_free(&ctx->null_cond);
1369     } else {
1370         dest = tcg_temp_new();
1371         fn(dest, in1, in2);
1372 
1373         cond = do_unit_cond(cf, dest, in1, in2);
1374 
1375         if (is_tc) {
1376             TCGv_reg tmp = tcg_temp_new();
1377             tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1378             gen_helper_tcond(cpu_env, tmp);
1379             tcg_temp_free(tmp);
1380         }
1381         save_gpr(ctx, rt, dest);
1382 
1383         cond_free(&ctx->null_cond);
1384         ctx->null_cond = cond;
1385     }
1386 }
1387 
1388 #ifndef CONFIG_USER_ONLY
1389 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1390    from the top 2 bits of the base register.  There are a few system
1391    instructions that have a 3-bit space specifier, for which SR0 is
1392    not special.  To handle this, pass ~SP.  */
1393 static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_reg base)
1394 {
1395     TCGv_ptr ptr;
1396     TCGv_reg tmp;
1397     TCGv_i64 spc;
1398 
1399     if (sp != 0) {
1400         if (sp < 0) {
1401             sp = ~sp;
1402         }
1403         spc = get_temp_tl(ctx);
1404         load_spr(ctx, spc, sp);
1405         return spc;
1406     }
1407     if (ctx->tb_flags & TB_FLAG_SR_SAME) {
1408         return cpu_srH;
1409     }
1410 
1411     ptr = tcg_temp_new_ptr();
1412     tmp = tcg_temp_new();
1413     spc = get_temp_tl(ctx);
1414 
1415     tcg_gen_shri_reg(tmp, base, TARGET_REGISTER_BITS - 5);
1416     tcg_gen_andi_reg(tmp, tmp, 030);
1417     tcg_gen_trunc_reg_ptr(ptr, tmp);
1418     tcg_temp_free(tmp);
1419 
1420     tcg_gen_add_ptr(ptr, ptr, cpu_env);
1421     tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
1422     tcg_temp_free_ptr(ptr);
1423 
1424     return spc;
1425 }
1426 #endif
1427 
1428 static void form_gva(DisasContext *ctx, TCGv_tl *pgva, TCGv_reg *pofs,
1429                      unsigned rb, unsigned rx, int scale, target_sreg disp,
1430                      unsigned sp, int modify, bool is_phys)
1431 {
1432     TCGv_reg base = load_gpr(ctx, rb);
1433     TCGv_reg ofs;
1434 
1435     /* Note that RX is mutually exclusive with DISP.  */
1436     if (rx) {
1437         ofs = get_temp(ctx);
1438         tcg_gen_shli_reg(ofs, cpu_gr[rx], scale);
1439         tcg_gen_add_reg(ofs, ofs, base);
1440     } else if (disp || modify) {
1441         ofs = get_temp(ctx);
1442         tcg_gen_addi_reg(ofs, base, disp);
1443     } else {
1444         ofs = base;
1445     }
1446 
1447     *pofs = ofs;
1448 #ifdef CONFIG_USER_ONLY
1449     *pgva = (modify <= 0 ? ofs : base);
1450 #else
1451     TCGv_tl addr = get_temp_tl(ctx);
1452     tcg_gen_extu_reg_tl(addr, modify <= 0 ? ofs : base);
1453     if (ctx->tb_flags & PSW_W) {
1454         tcg_gen_andi_tl(addr, addr, 0x3fffffffffffffffull);
1455     }
1456     if (!is_phys) {
1457         tcg_gen_or_tl(addr, addr, space_select(ctx, sp, base));
1458     }
1459     *pgva = addr;
1460 #endif
1461 }
1462 
1463 /* Emit a memory load.  The modify parameter should be
1464  * < 0 for pre-modify,
1465  * > 0 for post-modify,
1466  * = 0 for no base register update.
1467  */
1468 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
1469                        unsigned rx, int scale, target_sreg disp,
1470                        unsigned sp, int modify, MemOp mop)
1471 {
1472     TCGv_reg ofs;
1473     TCGv_tl addr;
1474 
1475     /* Caller uses nullify_over/nullify_end.  */
1476     assert(ctx->null_cond.c == TCG_COND_NEVER);
1477 
1478     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1479              ctx->mmu_idx == MMU_PHYS_IDX);
1480     tcg_gen_qemu_ld_reg(dest, addr, ctx->mmu_idx, mop);
1481     if (modify) {
1482         save_gpr(ctx, rb, ofs);
1483     }
1484 }
1485 
1486 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1487                        unsigned rx, int scale, target_sreg disp,
1488                        unsigned sp, int modify, MemOp mop)
1489 {
1490     TCGv_reg ofs;
1491     TCGv_tl addr;
1492 
1493     /* Caller uses nullify_over/nullify_end.  */
1494     assert(ctx->null_cond.c == TCG_COND_NEVER);
1495 
1496     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1497              ctx->mmu_idx == MMU_PHYS_IDX);
1498     tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop);
1499     if (modify) {
1500         save_gpr(ctx, rb, ofs);
1501     }
1502 }
1503 
1504 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1505                         unsigned rx, int scale, target_sreg disp,
1506                         unsigned sp, int modify, MemOp mop)
1507 {
1508     TCGv_reg ofs;
1509     TCGv_tl addr;
1510 
1511     /* Caller uses nullify_over/nullify_end.  */
1512     assert(ctx->null_cond.c == TCG_COND_NEVER);
1513 
1514     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1515              ctx->mmu_idx == MMU_PHYS_IDX);
1516     tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop);
1517     if (modify) {
1518         save_gpr(ctx, rb, ofs);
1519     }
1520 }
1521 
1522 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1523                         unsigned rx, int scale, target_sreg disp,
1524                         unsigned sp, int modify, MemOp mop)
1525 {
1526     TCGv_reg ofs;
1527     TCGv_tl addr;
1528 
1529     /* Caller uses nullify_over/nullify_end.  */
1530     assert(ctx->null_cond.c == TCG_COND_NEVER);
1531 
1532     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1533              ctx->mmu_idx == MMU_PHYS_IDX);
1534     tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop);
1535     if (modify) {
1536         save_gpr(ctx, rb, ofs);
1537     }
1538 }
1539 
1540 #if TARGET_REGISTER_BITS == 64
1541 #define do_load_reg   do_load_64
1542 #define do_store_reg  do_store_64
1543 #else
1544 #define do_load_reg   do_load_32
1545 #define do_store_reg  do_store_32
1546 #endif
1547 
1548 static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1549                     unsigned rx, int scale, target_sreg disp,
1550                     unsigned sp, int modify, MemOp mop)
1551 {
1552     TCGv_reg dest;
1553 
1554     nullify_over(ctx);
1555 
1556     if (modify == 0) {
1557         /* No base register update.  */
1558         dest = dest_gpr(ctx, rt);
1559     } else {
1560         /* Make sure if RT == RB, we see the result of the load.  */
1561         dest = get_temp(ctx);
1562     }
1563     do_load_reg(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
1564     save_gpr(ctx, rt, dest);
1565 
1566     return nullify_end(ctx);
1567 }
1568 
1569 static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1570                       unsigned rx, int scale, target_sreg disp,
1571                       unsigned sp, int modify)
1572 {
1573     TCGv_i32 tmp;
1574 
1575     nullify_over(ctx);
1576 
1577     tmp = tcg_temp_new_i32();
1578     do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1579     save_frw_i32(rt, tmp);
1580     tcg_temp_free_i32(tmp);
1581 
1582     if (rt == 0) {
1583         gen_helper_loaded_fr0(cpu_env);
1584     }
1585 
1586     return nullify_end(ctx);
1587 }
1588 
1589 static bool trans_fldw(DisasContext *ctx, arg_ldst *a)
1590 {
1591     return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1592                      a->disp, a->sp, a->m);
1593 }
1594 
1595 static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1596                       unsigned rx, int scale, target_sreg disp,
1597                       unsigned sp, int modify)
1598 {
1599     TCGv_i64 tmp;
1600 
1601     nullify_over(ctx);
1602 
1603     tmp = tcg_temp_new_i64();
1604     do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEQ);
1605     save_frd(rt, tmp);
1606     tcg_temp_free_i64(tmp);
1607 
1608     if (rt == 0) {
1609         gen_helper_loaded_fr0(cpu_env);
1610     }
1611 
1612     return nullify_end(ctx);
1613 }
1614 
1615 static bool trans_fldd(DisasContext *ctx, arg_ldst *a)
1616 {
1617     return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1618                      a->disp, a->sp, a->m);
1619 }
1620 
1621 static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1622                      target_sreg disp, unsigned sp,
1623                      int modify, MemOp mop)
1624 {
1625     nullify_over(ctx);
1626     do_store_reg(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
1627     return nullify_end(ctx);
1628 }
1629 
1630 static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1631                        unsigned rx, int scale, target_sreg disp,
1632                        unsigned sp, int modify)
1633 {
1634     TCGv_i32 tmp;
1635 
1636     nullify_over(ctx);
1637 
1638     tmp = load_frw_i32(rt);
1639     do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1640     tcg_temp_free_i32(tmp);
1641 
1642     return nullify_end(ctx);
1643 }
1644 
1645 static bool trans_fstw(DisasContext *ctx, arg_ldst *a)
1646 {
1647     return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1648                       a->disp, a->sp, a->m);
1649 }
1650 
1651 static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1652                        unsigned rx, int scale, target_sreg disp,
1653                        unsigned sp, int modify)
1654 {
1655     TCGv_i64 tmp;
1656 
1657     nullify_over(ctx);
1658 
1659     tmp = load_frd(rt);
1660     do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEQ);
1661     tcg_temp_free_i64(tmp);
1662 
1663     return nullify_end(ctx);
1664 }
1665 
1666 static bool trans_fstd(DisasContext *ctx, arg_ldst *a)
1667 {
1668     return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1669                       a->disp, a->sp, a->m);
1670 }
1671 
1672 static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
1673                        void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
1674 {
1675     TCGv_i32 tmp;
1676 
1677     nullify_over(ctx);
1678     tmp = load_frw0_i32(ra);
1679 
1680     func(tmp, cpu_env, tmp);
1681 
1682     save_frw_i32(rt, tmp);
1683     tcg_temp_free_i32(tmp);
1684     return nullify_end(ctx);
1685 }
1686 
1687 static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
1688                        void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
1689 {
1690     TCGv_i32 dst;
1691     TCGv_i64 src;
1692 
1693     nullify_over(ctx);
1694     src = load_frd(ra);
1695     dst = tcg_temp_new_i32();
1696 
1697     func(dst, cpu_env, src);
1698 
1699     tcg_temp_free_i64(src);
1700     save_frw_i32(rt, dst);
1701     tcg_temp_free_i32(dst);
1702     return nullify_end(ctx);
1703 }
1704 
1705 static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
1706                        void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
1707 {
1708     TCGv_i64 tmp;
1709 
1710     nullify_over(ctx);
1711     tmp = load_frd0(ra);
1712 
1713     func(tmp, cpu_env, tmp);
1714 
1715     save_frd(rt, tmp);
1716     tcg_temp_free_i64(tmp);
1717     return nullify_end(ctx);
1718 }
1719 
1720 static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
1721                        void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
1722 {
1723     TCGv_i32 src;
1724     TCGv_i64 dst;
1725 
1726     nullify_over(ctx);
1727     src = load_frw0_i32(ra);
1728     dst = tcg_temp_new_i64();
1729 
1730     func(dst, cpu_env, src);
1731 
1732     tcg_temp_free_i32(src);
1733     save_frd(rt, dst);
1734     tcg_temp_free_i64(dst);
1735     return nullify_end(ctx);
1736 }
1737 
1738 static bool do_fop_weww(DisasContext *ctx, unsigned rt,
1739                         unsigned ra, unsigned rb,
1740                         void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
1741 {
1742     TCGv_i32 a, b;
1743 
1744     nullify_over(ctx);
1745     a = load_frw0_i32(ra);
1746     b = load_frw0_i32(rb);
1747 
1748     func(a, cpu_env, a, b);
1749 
1750     tcg_temp_free_i32(b);
1751     save_frw_i32(rt, a);
1752     tcg_temp_free_i32(a);
1753     return nullify_end(ctx);
1754 }
1755 
1756 static bool do_fop_dedd(DisasContext *ctx, unsigned rt,
1757                         unsigned ra, unsigned rb,
1758                         void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
1759 {
1760     TCGv_i64 a, b;
1761 
1762     nullify_over(ctx);
1763     a = load_frd0(ra);
1764     b = load_frd0(rb);
1765 
1766     func(a, cpu_env, a, b);
1767 
1768     tcg_temp_free_i64(b);
1769     save_frd(rt, a);
1770     tcg_temp_free_i64(a);
1771     return nullify_end(ctx);
1772 }
1773 
1774 /* Emit an unconditional branch to a direct target, which may or may not
1775    have already had nullification handled.  */
1776 static bool do_dbranch(DisasContext *ctx, target_ureg dest,
1777                        unsigned link, bool is_n)
1778 {
1779     if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1780         if (link != 0) {
1781             copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1782         }
1783         ctx->iaoq_n = dest;
1784         if (is_n) {
1785             ctx->null_cond.c = TCG_COND_ALWAYS;
1786         }
1787     } else {
1788         nullify_over(ctx);
1789 
1790         if (link != 0) {
1791             copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1792         }
1793 
1794         if (is_n && use_nullify_skip(ctx)) {
1795             nullify_set(ctx, 0);
1796             gen_goto_tb(ctx, 0, dest, dest + 4);
1797         } else {
1798             nullify_set(ctx, is_n);
1799             gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1800         }
1801 
1802         nullify_end(ctx);
1803 
1804         nullify_set(ctx, 0);
1805         gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
1806         ctx->base.is_jmp = DISAS_NORETURN;
1807     }
1808     return true;
1809 }
1810 
1811 /* Emit a conditional branch to a direct target.  If the branch itself
1812    is nullified, we should have already used nullify_over.  */
1813 static bool do_cbranch(DisasContext *ctx, target_sreg disp, bool is_n,
1814                        DisasCond *cond)
1815 {
1816     target_ureg dest = iaoq_dest(ctx, disp);
1817     TCGLabel *taken = NULL;
1818     TCGCond c = cond->c;
1819     bool n;
1820 
1821     assert(ctx->null_cond.c == TCG_COND_NEVER);
1822 
1823     /* Handle TRUE and NEVER as direct branches.  */
1824     if (c == TCG_COND_ALWAYS) {
1825         return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
1826     }
1827     if (c == TCG_COND_NEVER) {
1828         return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
1829     }
1830 
1831     taken = gen_new_label();
1832     tcg_gen_brcond_reg(c, cond->a0, cond->a1, taken);
1833     cond_free(cond);
1834 
1835     /* Not taken: Condition not satisfied; nullify on backward branches. */
1836     n = is_n && disp < 0;
1837     if (n && use_nullify_skip(ctx)) {
1838         nullify_set(ctx, 0);
1839         gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
1840     } else {
1841         if (!n && ctx->null_lab) {
1842             gen_set_label(ctx->null_lab);
1843             ctx->null_lab = NULL;
1844         }
1845         nullify_set(ctx, n);
1846         if (ctx->iaoq_n == -1) {
1847             /* The temporary iaoq_n_var died at the branch above.
1848                Regenerate it here instead of saving it.  */
1849             tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
1850         }
1851         gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
1852     }
1853 
1854     gen_set_label(taken);
1855 
1856     /* Taken: Condition satisfied; nullify on forward branches.  */
1857     n = is_n && disp >= 0;
1858     if (n && use_nullify_skip(ctx)) {
1859         nullify_set(ctx, 0);
1860         gen_goto_tb(ctx, 1, dest, dest + 4);
1861     } else {
1862         nullify_set(ctx, n);
1863         gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
1864     }
1865 
1866     /* Not taken: the branch itself was nullified.  */
1867     if (ctx->null_lab) {
1868         gen_set_label(ctx->null_lab);
1869         ctx->null_lab = NULL;
1870         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
1871     } else {
1872         ctx->base.is_jmp = DISAS_NORETURN;
1873     }
1874     return true;
1875 }
1876 
1877 /* Emit an unconditional branch to an indirect target.  This handles
1878    nullification of the branch itself.  */
1879 static bool do_ibranch(DisasContext *ctx, TCGv_reg dest,
1880                        unsigned link, bool is_n)
1881 {
1882     TCGv_reg a0, a1, next, tmp;
1883     TCGCond c;
1884 
1885     assert(ctx->null_lab == NULL);
1886 
1887     if (ctx->null_cond.c == TCG_COND_NEVER) {
1888         if (link != 0) {
1889             copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1890         }
1891         next = get_temp(ctx);
1892         tcg_gen_mov_reg(next, dest);
1893         if (is_n) {
1894             if (use_nullify_skip(ctx)) {
1895                 tcg_gen_mov_reg(cpu_iaoq_f, next);
1896                 tcg_gen_addi_reg(cpu_iaoq_b, next, 4);
1897                 nullify_set(ctx, 0);
1898                 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1899                 return true;
1900             }
1901             ctx->null_cond.c = TCG_COND_ALWAYS;
1902         }
1903         ctx->iaoq_n = -1;
1904         ctx->iaoq_n_var = next;
1905     } else if (is_n && use_nullify_skip(ctx)) {
1906         /* The (conditional) branch, B, nullifies the next insn, N,
1907            and we're allowed to skip execution N (no single-step or
1908            tracepoint in effect).  Since the goto_ptr that we must use
1909            for the indirect branch consumes no special resources, we
1910            can (conditionally) skip B and continue execution.  */
1911         /* The use_nullify_skip test implies we have a known control path.  */
1912         tcg_debug_assert(ctx->iaoq_b != -1);
1913         tcg_debug_assert(ctx->iaoq_n != -1);
1914 
1915         /* We do have to handle the non-local temporary, DEST, before
1916            branching.  Since IOAQ_F is not really live at this point, we
1917            can simply store DEST optimistically.  Similarly with IAOQ_B.  */
1918         tcg_gen_mov_reg(cpu_iaoq_f, dest);
1919         tcg_gen_addi_reg(cpu_iaoq_b, dest, 4);
1920 
1921         nullify_over(ctx);
1922         if (link != 0) {
1923             tcg_gen_movi_reg(cpu_gr[link], ctx->iaoq_n);
1924         }
1925         tcg_gen_lookup_and_goto_ptr();
1926         return nullify_end(ctx);
1927     } else {
1928         c = ctx->null_cond.c;
1929         a0 = ctx->null_cond.a0;
1930         a1 = ctx->null_cond.a1;
1931 
1932         tmp = tcg_temp_new();
1933         next = get_temp(ctx);
1934 
1935         copy_iaoq_entry(tmp, ctx->iaoq_n, ctx->iaoq_n_var);
1936         tcg_gen_movcond_reg(c, next, a0, a1, tmp, dest);
1937         ctx->iaoq_n = -1;
1938         ctx->iaoq_n_var = next;
1939 
1940         if (link != 0) {
1941             tcg_gen_movcond_reg(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
1942         }
1943 
1944         if (is_n) {
1945             /* The branch nullifies the next insn, which means the state of N
1946                after the branch is the inverse of the state of N that applied
1947                to the branch.  */
1948             tcg_gen_setcond_reg(tcg_invert_cond(c), cpu_psw_n, a0, a1);
1949             cond_free(&ctx->null_cond);
1950             ctx->null_cond = cond_make_n();
1951             ctx->psw_n_nonzero = true;
1952         } else {
1953             cond_free(&ctx->null_cond);
1954         }
1955     }
1956     return true;
1957 }
1958 
1959 /* Implement
1960  *    if (IAOQ_Front{30..31} < GR[b]{30..31})
1961  *      IAOQ_Next{30..31} ← GR[b]{30..31};
1962  *    else
1963  *      IAOQ_Next{30..31} ← IAOQ_Front{30..31};
1964  * which keeps the privilege level from being increased.
1965  */
1966 static TCGv_reg do_ibranch_priv(DisasContext *ctx, TCGv_reg offset)
1967 {
1968     TCGv_reg dest;
1969     switch (ctx->privilege) {
1970     case 0:
1971         /* Privilege 0 is maximum and is allowed to decrease.  */
1972         return offset;
1973     case 3:
1974         /* Privilege 3 is minimum and is never allowed to increase.  */
1975         dest = get_temp(ctx);
1976         tcg_gen_ori_reg(dest, offset, 3);
1977         break;
1978     default:
1979         dest = get_temp(ctx);
1980         tcg_gen_andi_reg(dest, offset, -4);
1981         tcg_gen_ori_reg(dest, dest, ctx->privilege);
1982         tcg_gen_movcond_reg(TCG_COND_GTU, dest, dest, offset, dest, offset);
1983         break;
1984     }
1985     return dest;
1986 }
1987 
1988 #ifdef CONFIG_USER_ONLY
1989 /* On Linux, page zero is normally marked execute only + gateway.
1990    Therefore normal read or write is supposed to fail, but specific
1991    offsets have kernel code mapped to raise permissions to implement
1992    system calls.  Handling this via an explicit check here, rather
1993    in than the "be disp(sr2,r0)" instruction that probably sent us
1994    here, is the easiest way to handle the branch delay slot on the
1995    aforementioned BE.  */
1996 static void do_page_zero(DisasContext *ctx)
1997 {
1998     /* If by some means we get here with PSW[N]=1, that implies that
1999        the B,GATE instruction would be skipped, and we'd fault on the
2000        next insn within the privilaged page.  */
2001     switch (ctx->null_cond.c) {
2002     case TCG_COND_NEVER:
2003         break;
2004     case TCG_COND_ALWAYS:
2005         tcg_gen_movi_reg(cpu_psw_n, 0);
2006         goto do_sigill;
2007     default:
2008         /* Since this is always the first (and only) insn within the
2009            TB, we should know the state of PSW[N] from TB->FLAGS.  */
2010         g_assert_not_reached();
2011     }
2012 
2013     /* Check that we didn't arrive here via some means that allowed
2014        non-sequential instruction execution.  Normally the PSW[B] bit
2015        detects this by disallowing the B,GATE instruction to execute
2016        under such conditions.  */
2017     if (ctx->iaoq_b != ctx->iaoq_f + 4) {
2018         goto do_sigill;
2019     }
2020 
2021     switch (ctx->iaoq_f & -4) {
2022     case 0x00: /* Null pointer call */
2023         gen_excp_1(EXCP_IMP);
2024         ctx->base.is_jmp = DISAS_NORETURN;
2025         break;
2026 
2027     case 0xb0: /* LWS */
2028         gen_excp_1(EXCP_SYSCALL_LWS);
2029         ctx->base.is_jmp = DISAS_NORETURN;
2030         break;
2031 
2032     case 0xe0: /* SET_THREAD_POINTER */
2033         tcg_gen_st_reg(cpu_gr[26], cpu_env, offsetof(CPUHPPAState, cr[27]));
2034         tcg_gen_ori_reg(cpu_iaoq_f, cpu_gr[31], 3);
2035         tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
2036         ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
2037         break;
2038 
2039     case 0x100: /* SYSCALL */
2040         gen_excp_1(EXCP_SYSCALL);
2041         ctx->base.is_jmp = DISAS_NORETURN;
2042         break;
2043 
2044     default:
2045     do_sigill:
2046         gen_excp_1(EXCP_ILL);
2047         ctx->base.is_jmp = DISAS_NORETURN;
2048         break;
2049     }
2050 }
2051 #endif
2052 
2053 static bool trans_nop(DisasContext *ctx, arg_nop *a)
2054 {
2055     cond_free(&ctx->null_cond);
2056     return true;
2057 }
2058 
2059 static bool trans_break(DisasContext *ctx, arg_break *a)
2060 {
2061     return gen_excp_iir(ctx, EXCP_BREAK);
2062 }
2063 
2064 static bool trans_sync(DisasContext *ctx, arg_sync *a)
2065 {
2066     /* No point in nullifying the memory barrier.  */
2067     tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
2068 
2069     cond_free(&ctx->null_cond);
2070     return true;
2071 }
2072 
2073 static bool trans_mfia(DisasContext *ctx, arg_mfia *a)
2074 {
2075     unsigned rt = a->t;
2076     TCGv_reg tmp = dest_gpr(ctx, rt);
2077     tcg_gen_movi_reg(tmp, ctx->iaoq_f);
2078     save_gpr(ctx, rt, tmp);
2079 
2080     cond_free(&ctx->null_cond);
2081     return true;
2082 }
2083 
2084 static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a)
2085 {
2086     unsigned rt = a->t;
2087     unsigned rs = a->sp;
2088     TCGv_i64 t0 = tcg_temp_new_i64();
2089     TCGv_reg t1 = tcg_temp_new();
2090 
2091     load_spr(ctx, t0, rs);
2092     tcg_gen_shri_i64(t0, t0, 32);
2093     tcg_gen_trunc_i64_reg(t1, t0);
2094 
2095     save_gpr(ctx, rt, t1);
2096     tcg_temp_free(t1);
2097     tcg_temp_free_i64(t0);
2098 
2099     cond_free(&ctx->null_cond);
2100     return true;
2101 }
2102 
2103 static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
2104 {
2105     unsigned rt = a->t;
2106     unsigned ctl = a->r;
2107     TCGv_reg tmp;
2108 
2109     switch (ctl) {
2110     case CR_SAR:
2111 #ifdef TARGET_HPPA64
2112         if (a->e == 0) {
2113             /* MFSAR without ,W masks low 5 bits.  */
2114             tmp = dest_gpr(ctx, rt);
2115             tcg_gen_andi_reg(tmp, cpu_sar, 31);
2116             save_gpr(ctx, rt, tmp);
2117             goto done;
2118         }
2119 #endif
2120         save_gpr(ctx, rt, cpu_sar);
2121         goto done;
2122     case CR_IT: /* Interval Timer */
2123         /* FIXME: Respect PSW_S bit.  */
2124         nullify_over(ctx);
2125         tmp = dest_gpr(ctx, rt);
2126         if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
2127             gen_io_start();
2128             gen_helper_read_interval_timer(tmp);
2129             ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2130         } else {
2131             gen_helper_read_interval_timer(tmp);
2132         }
2133         save_gpr(ctx, rt, tmp);
2134         return nullify_end(ctx);
2135     case 26:
2136     case 27:
2137         break;
2138     default:
2139         /* All other control registers are privileged.  */
2140         CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2141         break;
2142     }
2143 
2144     tmp = get_temp(ctx);
2145     tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2146     save_gpr(ctx, rt, tmp);
2147 
2148  done:
2149     cond_free(&ctx->null_cond);
2150     return true;
2151 }
2152 
2153 static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a)
2154 {
2155     unsigned rr = a->r;
2156     unsigned rs = a->sp;
2157     TCGv_i64 t64;
2158 
2159     if (rs >= 5) {
2160         CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2161     }
2162     nullify_over(ctx);
2163 
2164     t64 = tcg_temp_new_i64();
2165     tcg_gen_extu_reg_i64(t64, load_gpr(ctx, rr));
2166     tcg_gen_shli_i64(t64, t64, 32);
2167 
2168     if (rs >= 4) {
2169         tcg_gen_st_i64(t64, cpu_env, offsetof(CPUHPPAState, sr[rs]));
2170         ctx->tb_flags &= ~TB_FLAG_SR_SAME;
2171     } else {
2172         tcg_gen_mov_i64(cpu_sr[rs], t64);
2173     }
2174     tcg_temp_free_i64(t64);
2175 
2176     return nullify_end(ctx);
2177 }
2178 
2179 static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
2180 {
2181     unsigned ctl = a->t;
2182     TCGv_reg reg;
2183     TCGv_reg tmp;
2184 
2185     if (ctl == CR_SAR) {
2186         reg = load_gpr(ctx, a->r);
2187         tmp = tcg_temp_new();
2188         tcg_gen_andi_reg(tmp, reg, TARGET_REGISTER_BITS - 1);
2189         save_or_nullify(ctx, cpu_sar, tmp);
2190         tcg_temp_free(tmp);
2191 
2192         cond_free(&ctx->null_cond);
2193         return true;
2194     }
2195 
2196     /* All other control registers are privileged or read-only.  */
2197     CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2198 
2199 #ifndef CONFIG_USER_ONLY
2200     nullify_over(ctx);
2201     reg = load_gpr(ctx, a->r);
2202 
2203     switch (ctl) {
2204     case CR_IT:
2205         gen_helper_write_interval_timer(cpu_env, reg);
2206         break;
2207     case CR_EIRR:
2208         gen_helper_write_eirr(cpu_env, reg);
2209         break;
2210     case CR_EIEM:
2211         gen_helper_write_eiem(cpu_env, reg);
2212         ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2213         break;
2214 
2215     case CR_IIASQ:
2216     case CR_IIAOQ:
2217         /* FIXME: Respect PSW_Q bit */
2218         /* The write advances the queue and stores to the back element.  */
2219         tmp = get_temp(ctx);
2220         tcg_gen_ld_reg(tmp, cpu_env,
2221                        offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2222         tcg_gen_st_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2223         tcg_gen_st_reg(reg, cpu_env,
2224                        offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2225         break;
2226 
2227     case CR_PID1:
2228     case CR_PID2:
2229     case CR_PID3:
2230     case CR_PID4:
2231         tcg_gen_st_reg(reg, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2232 #ifndef CONFIG_USER_ONLY
2233         gen_helper_change_prot_id(cpu_env);
2234 #endif
2235         break;
2236 
2237     default:
2238         tcg_gen_st_reg(reg, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2239         break;
2240     }
2241     return nullify_end(ctx);
2242 #endif
2243 }
2244 
2245 static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a)
2246 {
2247     TCGv_reg tmp = tcg_temp_new();
2248 
2249     tcg_gen_not_reg(tmp, load_gpr(ctx, a->r));
2250     tcg_gen_andi_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
2251     save_or_nullify(ctx, cpu_sar, tmp);
2252     tcg_temp_free(tmp);
2253 
2254     cond_free(&ctx->null_cond);
2255     return true;
2256 }
2257 
2258 static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a)
2259 {
2260     TCGv_reg dest = dest_gpr(ctx, a->t);
2261 
2262 #ifdef CONFIG_USER_ONLY
2263     /* We don't implement space registers in user mode. */
2264     tcg_gen_movi_reg(dest, 0);
2265 #else
2266     TCGv_i64 t0 = tcg_temp_new_i64();
2267 
2268     tcg_gen_mov_i64(t0, space_select(ctx, a->sp, load_gpr(ctx, a->b)));
2269     tcg_gen_shri_i64(t0, t0, 32);
2270     tcg_gen_trunc_i64_reg(dest, t0);
2271 
2272     tcg_temp_free_i64(t0);
2273 #endif
2274     save_gpr(ctx, a->t, dest);
2275 
2276     cond_free(&ctx->null_cond);
2277     return true;
2278 }
2279 
2280 static bool trans_rsm(DisasContext *ctx, arg_rsm *a)
2281 {
2282     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2283 #ifndef CONFIG_USER_ONLY
2284     TCGv_reg tmp;
2285 
2286     nullify_over(ctx);
2287 
2288     tmp = get_temp(ctx);
2289     tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw));
2290     tcg_gen_andi_reg(tmp, tmp, ~a->i);
2291     gen_helper_swap_system_mask(tmp, cpu_env, tmp);
2292     save_gpr(ctx, a->t, tmp);
2293 
2294     /* Exit the TB to recognize new interrupts, e.g. PSW_M.  */
2295     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2296     return nullify_end(ctx);
2297 #endif
2298 }
2299 
2300 static bool trans_ssm(DisasContext *ctx, arg_ssm *a)
2301 {
2302     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2303 #ifndef CONFIG_USER_ONLY
2304     TCGv_reg tmp;
2305 
2306     nullify_over(ctx);
2307 
2308     tmp = get_temp(ctx);
2309     tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw));
2310     tcg_gen_ori_reg(tmp, tmp, a->i);
2311     gen_helper_swap_system_mask(tmp, cpu_env, tmp);
2312     save_gpr(ctx, a->t, tmp);
2313 
2314     /* Exit the TB to recognize new interrupts, e.g. PSW_I.  */
2315     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2316     return nullify_end(ctx);
2317 #endif
2318 }
2319 
2320 static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a)
2321 {
2322     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2323 #ifndef CONFIG_USER_ONLY
2324     TCGv_reg tmp, reg;
2325     nullify_over(ctx);
2326 
2327     reg = load_gpr(ctx, a->r);
2328     tmp = get_temp(ctx);
2329     gen_helper_swap_system_mask(tmp, cpu_env, reg);
2330 
2331     /* Exit the TB to recognize new interrupts.  */
2332     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2333     return nullify_end(ctx);
2334 #endif
2335 }
2336 
2337 static bool do_rfi(DisasContext *ctx, bool rfi_r)
2338 {
2339     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2340 #ifndef CONFIG_USER_ONLY
2341     nullify_over(ctx);
2342 
2343     if (rfi_r) {
2344         gen_helper_rfi_r(cpu_env);
2345     } else {
2346         gen_helper_rfi(cpu_env);
2347     }
2348     /* Exit the TB to recognize new interrupts.  */
2349     if (ctx->base.singlestep_enabled) {
2350         gen_excp_1(EXCP_DEBUG);
2351     } else {
2352         tcg_gen_exit_tb(NULL, 0);
2353     }
2354     ctx->base.is_jmp = DISAS_NORETURN;
2355 
2356     return nullify_end(ctx);
2357 #endif
2358 }
2359 
2360 static bool trans_rfi(DisasContext *ctx, arg_rfi *a)
2361 {
2362     return do_rfi(ctx, false);
2363 }
2364 
2365 static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a)
2366 {
2367     return do_rfi(ctx, true);
2368 }
2369 
2370 static bool trans_halt(DisasContext *ctx, arg_halt *a)
2371 {
2372     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2373 #ifndef CONFIG_USER_ONLY
2374     nullify_over(ctx);
2375     gen_helper_halt(cpu_env);
2376     ctx->base.is_jmp = DISAS_NORETURN;
2377     return nullify_end(ctx);
2378 #endif
2379 }
2380 
2381 static bool trans_reset(DisasContext *ctx, arg_reset *a)
2382 {
2383     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2384 #ifndef CONFIG_USER_ONLY
2385     nullify_over(ctx);
2386     gen_helper_reset(cpu_env);
2387     ctx->base.is_jmp = DISAS_NORETURN;
2388     return nullify_end(ctx);
2389 #endif
2390 }
2391 
2392 static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a)
2393 {
2394     if (a->m) {
2395         TCGv_reg dest = dest_gpr(ctx, a->b);
2396         TCGv_reg src1 = load_gpr(ctx, a->b);
2397         TCGv_reg src2 = load_gpr(ctx, a->x);
2398 
2399         /* The only thing we need to do is the base register modification.  */
2400         tcg_gen_add_reg(dest, src1, src2);
2401         save_gpr(ctx, a->b, dest);
2402     }
2403     cond_free(&ctx->null_cond);
2404     return true;
2405 }
2406 
2407 static bool trans_probe(DisasContext *ctx, arg_probe *a)
2408 {
2409     TCGv_reg dest, ofs;
2410     TCGv_i32 level, want;
2411     TCGv_tl addr;
2412 
2413     nullify_over(ctx);
2414 
2415     dest = dest_gpr(ctx, a->t);
2416     form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2417 
2418     if (a->imm) {
2419         level = tcg_constant_i32(a->ri);
2420     } else {
2421         level = tcg_temp_new_i32();
2422         tcg_gen_trunc_reg_i32(level, load_gpr(ctx, a->ri));
2423         tcg_gen_andi_i32(level, level, 3);
2424     }
2425     want = tcg_constant_i32(a->write ? PAGE_WRITE : PAGE_READ);
2426 
2427     gen_helper_probe(dest, cpu_env, addr, level, want);
2428 
2429     tcg_temp_free_i32(level);
2430 
2431     save_gpr(ctx, a->t, dest);
2432     return nullify_end(ctx);
2433 }
2434 
2435 static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a)
2436 {
2437     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2438 #ifndef CONFIG_USER_ONLY
2439     TCGv_tl addr;
2440     TCGv_reg ofs, reg;
2441 
2442     nullify_over(ctx);
2443 
2444     form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2445     reg = load_gpr(ctx, a->r);
2446     if (a->addr) {
2447         gen_helper_itlba(cpu_env, addr, reg);
2448     } else {
2449         gen_helper_itlbp(cpu_env, addr, reg);
2450     }
2451 
2452     /* Exit TB for TLB change if mmu is enabled.  */
2453     if (ctx->tb_flags & PSW_C) {
2454         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2455     }
2456     return nullify_end(ctx);
2457 #endif
2458 }
2459 
2460 static bool trans_pxtlbx(DisasContext *ctx, arg_pxtlbx *a)
2461 {
2462     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2463 #ifndef CONFIG_USER_ONLY
2464     TCGv_tl addr;
2465     TCGv_reg ofs;
2466 
2467     nullify_over(ctx);
2468 
2469     form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2470     if (a->m) {
2471         save_gpr(ctx, a->b, ofs);
2472     }
2473     if (a->local) {
2474         gen_helper_ptlbe(cpu_env);
2475     } else {
2476         gen_helper_ptlb(cpu_env, addr);
2477     }
2478 
2479     /* Exit TB for TLB change if mmu is enabled.  */
2480     if (ctx->tb_flags & PSW_C) {
2481         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2482     }
2483     return nullify_end(ctx);
2484 #endif
2485 }
2486 
2487 /*
2488  * Implement the pcxl and pcxl2 Fast TLB Insert instructions.
2489  * See
2490  *     https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf
2491  *     page 13-9 (195/206)
2492  */
2493 static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a)
2494 {
2495     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2496 #ifndef CONFIG_USER_ONLY
2497     TCGv_tl addr, atl, stl;
2498     TCGv_reg reg;
2499 
2500     nullify_over(ctx);
2501 
2502     /*
2503      * FIXME:
2504      *  if (not (pcxl or pcxl2))
2505      *    return gen_illegal(ctx);
2506      *
2507      * Note for future: these are 32-bit systems; no hppa64.
2508      */
2509 
2510     atl = tcg_temp_new_tl();
2511     stl = tcg_temp_new_tl();
2512     addr = tcg_temp_new_tl();
2513 
2514     tcg_gen_ld32u_i64(stl, cpu_env,
2515                       a->data ? offsetof(CPUHPPAState, cr[CR_ISR])
2516                       : offsetof(CPUHPPAState, cr[CR_IIASQ]));
2517     tcg_gen_ld32u_i64(atl, cpu_env,
2518                       a->data ? offsetof(CPUHPPAState, cr[CR_IOR])
2519                       : offsetof(CPUHPPAState, cr[CR_IIAOQ]));
2520     tcg_gen_shli_i64(stl, stl, 32);
2521     tcg_gen_or_tl(addr, atl, stl);
2522     tcg_temp_free_tl(atl);
2523     tcg_temp_free_tl(stl);
2524 
2525     reg = load_gpr(ctx, a->r);
2526     if (a->addr) {
2527         gen_helper_itlba(cpu_env, addr, reg);
2528     } else {
2529         gen_helper_itlbp(cpu_env, addr, reg);
2530     }
2531     tcg_temp_free_tl(addr);
2532 
2533     /* Exit TB for TLB change if mmu is enabled.  */
2534     if (ctx->tb_flags & PSW_C) {
2535         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2536     }
2537     return nullify_end(ctx);
2538 #endif
2539 }
2540 
2541 static bool trans_lpa(DisasContext *ctx, arg_ldst *a)
2542 {
2543     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2544 #ifndef CONFIG_USER_ONLY
2545     TCGv_tl vaddr;
2546     TCGv_reg ofs, paddr;
2547 
2548     nullify_over(ctx);
2549 
2550     form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2551 
2552     paddr = tcg_temp_new();
2553     gen_helper_lpa(paddr, cpu_env, vaddr);
2554 
2555     /* Note that physical address result overrides base modification.  */
2556     if (a->m) {
2557         save_gpr(ctx, a->b, ofs);
2558     }
2559     save_gpr(ctx, a->t, paddr);
2560     tcg_temp_free(paddr);
2561 
2562     return nullify_end(ctx);
2563 #endif
2564 }
2565 
2566 static bool trans_lci(DisasContext *ctx, arg_lci *a)
2567 {
2568     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2569 
2570     /* The Coherence Index is an implementation-defined function of the
2571        physical address.  Two addresses with the same CI have a coherent
2572        view of the cache.  Our implementation is to return 0 for all,
2573        since the entire address space is coherent.  */
2574     save_gpr(ctx, a->t, tcg_constant_reg(0));
2575 
2576     cond_free(&ctx->null_cond);
2577     return true;
2578 }
2579 
2580 static bool trans_add(DisasContext *ctx, arg_rrr_cf_sh *a)
2581 {
2582     return do_add_reg(ctx, a, false, false, false, false);
2583 }
2584 
2585 static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_sh *a)
2586 {
2587     return do_add_reg(ctx, a, true, false, false, false);
2588 }
2589 
2590 static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_sh *a)
2591 {
2592     return do_add_reg(ctx, a, false, true, false, false);
2593 }
2594 
2595 static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_sh *a)
2596 {
2597     return do_add_reg(ctx, a, false, false, false, true);
2598 }
2599 
2600 static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_sh *a)
2601 {
2602     return do_add_reg(ctx, a, false, true, false, true);
2603 }
2604 
2605 static bool trans_sub(DisasContext *ctx, arg_rrr_cf *a)
2606 {
2607     return do_sub_reg(ctx, a, false, false, false);
2608 }
2609 
2610 static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf *a)
2611 {
2612     return do_sub_reg(ctx, a, true, false, false);
2613 }
2614 
2615 static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf *a)
2616 {
2617     return do_sub_reg(ctx, a, false, false, true);
2618 }
2619 
2620 static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf *a)
2621 {
2622     return do_sub_reg(ctx, a, true, false, true);
2623 }
2624 
2625 static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf *a)
2626 {
2627     return do_sub_reg(ctx, a, false, true, false);
2628 }
2629 
2630 static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf *a)
2631 {
2632     return do_sub_reg(ctx, a, true, true, false);
2633 }
2634 
2635 static bool trans_andcm(DisasContext *ctx, arg_rrr_cf *a)
2636 {
2637     return do_log_reg(ctx, a, tcg_gen_andc_reg);
2638 }
2639 
2640 static bool trans_and(DisasContext *ctx, arg_rrr_cf *a)
2641 {
2642     return do_log_reg(ctx, a, tcg_gen_and_reg);
2643 }
2644 
2645 static bool trans_or(DisasContext *ctx, arg_rrr_cf *a)
2646 {
2647     if (a->cf == 0) {
2648         unsigned r2 = a->r2;
2649         unsigned r1 = a->r1;
2650         unsigned rt = a->t;
2651 
2652         if (rt == 0) { /* NOP */
2653             cond_free(&ctx->null_cond);
2654             return true;
2655         }
2656         if (r2 == 0) { /* COPY */
2657             if (r1 == 0) {
2658                 TCGv_reg dest = dest_gpr(ctx, rt);
2659                 tcg_gen_movi_reg(dest, 0);
2660                 save_gpr(ctx, rt, dest);
2661             } else {
2662                 save_gpr(ctx, rt, cpu_gr[r1]);
2663             }
2664             cond_free(&ctx->null_cond);
2665             return true;
2666         }
2667 #ifndef CONFIG_USER_ONLY
2668         /* These are QEMU extensions and are nops in the real architecture:
2669          *
2670          * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2671          * or %r31,%r31,%r31 -- death loop; offline cpu
2672          *                      currently implemented as idle.
2673          */
2674         if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */
2675             /* No need to check for supervisor, as userland can only pause
2676                until the next timer interrupt.  */
2677             nullify_over(ctx);
2678 
2679             /* Advance the instruction queue.  */
2680             copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
2681             copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
2682             nullify_set(ctx, 0);
2683 
2684             /* Tell the qemu main loop to halt until this cpu has work.  */
2685             tcg_gen_st_i32(tcg_constant_i32(1), cpu_env,
2686                            offsetof(CPUState, halted) - offsetof(HPPACPU, env));
2687             gen_excp_1(EXCP_HALTED);
2688             ctx->base.is_jmp = DISAS_NORETURN;
2689 
2690             return nullify_end(ctx);
2691         }
2692 #endif
2693     }
2694     return do_log_reg(ctx, a, tcg_gen_or_reg);
2695 }
2696 
2697 static bool trans_xor(DisasContext *ctx, arg_rrr_cf *a)
2698 {
2699     return do_log_reg(ctx, a, tcg_gen_xor_reg);
2700 }
2701 
2702 static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf *a)
2703 {
2704     TCGv_reg tcg_r1, tcg_r2;
2705 
2706     if (a->cf) {
2707         nullify_over(ctx);
2708     }
2709     tcg_r1 = load_gpr(ctx, a->r1);
2710     tcg_r2 = load_gpr(ctx, a->r2);
2711     do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf);
2712     return nullify_end(ctx);
2713 }
2714 
2715 static bool trans_uxor(DisasContext *ctx, arg_rrr_cf *a)
2716 {
2717     TCGv_reg tcg_r1, tcg_r2;
2718 
2719     if (a->cf) {
2720         nullify_over(ctx);
2721     }
2722     tcg_r1 = load_gpr(ctx, a->r1);
2723     tcg_r2 = load_gpr(ctx, a->r2);
2724     do_unit(ctx, a->t, tcg_r1, tcg_r2, a->cf, false, tcg_gen_xor_reg);
2725     return nullify_end(ctx);
2726 }
2727 
2728 static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf *a, bool is_tc)
2729 {
2730     TCGv_reg tcg_r1, tcg_r2, tmp;
2731 
2732     if (a->cf) {
2733         nullify_over(ctx);
2734     }
2735     tcg_r1 = load_gpr(ctx, a->r1);
2736     tcg_r2 = load_gpr(ctx, a->r2);
2737     tmp = get_temp(ctx);
2738     tcg_gen_not_reg(tmp, tcg_r2);
2739     do_unit(ctx, a->t, tcg_r1, tmp, a->cf, is_tc, tcg_gen_add_reg);
2740     return nullify_end(ctx);
2741 }
2742 
2743 static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf *a)
2744 {
2745     return do_uaddcm(ctx, a, false);
2746 }
2747 
2748 static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf *a)
2749 {
2750     return do_uaddcm(ctx, a, true);
2751 }
2752 
2753 static bool do_dcor(DisasContext *ctx, arg_rr_cf *a, bool is_i)
2754 {
2755     TCGv_reg tmp;
2756 
2757     nullify_over(ctx);
2758 
2759     tmp = get_temp(ctx);
2760     tcg_gen_shri_reg(tmp, cpu_psw_cb, 3);
2761     if (!is_i) {
2762         tcg_gen_not_reg(tmp, tmp);
2763     }
2764     tcg_gen_andi_reg(tmp, tmp, 0x11111111);
2765     tcg_gen_muli_reg(tmp, tmp, 6);
2766     do_unit(ctx, a->t, load_gpr(ctx, a->r), tmp, a->cf, false,
2767             is_i ? tcg_gen_add_reg : tcg_gen_sub_reg);
2768     return nullify_end(ctx);
2769 }
2770 
2771 static bool trans_dcor(DisasContext *ctx, arg_rr_cf *a)
2772 {
2773     return do_dcor(ctx, a, false);
2774 }
2775 
2776 static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf *a)
2777 {
2778     return do_dcor(ctx, a, true);
2779 }
2780 
2781 static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a)
2782 {
2783     TCGv_reg dest, add1, add2, addc, zero, in1, in2;
2784 
2785     nullify_over(ctx);
2786 
2787     in1 = load_gpr(ctx, a->r1);
2788     in2 = load_gpr(ctx, a->r2);
2789 
2790     add1 = tcg_temp_new();
2791     add2 = tcg_temp_new();
2792     addc = tcg_temp_new();
2793     dest = tcg_temp_new();
2794     zero = tcg_constant_reg(0);
2795 
2796     /* Form R1 << 1 | PSW[CB]{8}.  */
2797     tcg_gen_add_reg(add1, in1, in1);
2798     tcg_gen_add_reg(add1, add1, cpu_psw_cb_msb);
2799 
2800     /* Add or subtract R2, depending on PSW[V].  Proper computation of
2801        carry{8} requires that we subtract via + ~R2 + 1, as described in
2802        the manual.  By extracting and masking V, we can produce the
2803        proper inputs to the addition without movcond.  */
2804     tcg_gen_sari_reg(addc, cpu_psw_v, TARGET_REGISTER_BITS - 1);
2805     tcg_gen_xor_reg(add2, in2, addc);
2806     tcg_gen_andi_reg(addc, addc, 1);
2807     /* ??? This is only correct for 32-bit.  */
2808     tcg_gen_add2_i32(dest, cpu_psw_cb_msb, add1, zero, add2, zero);
2809     tcg_gen_add2_i32(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero);
2810 
2811     tcg_temp_free(addc);
2812 
2813     /* Write back the result register.  */
2814     save_gpr(ctx, a->t, dest);
2815 
2816     /* Write back PSW[CB].  */
2817     tcg_gen_xor_reg(cpu_psw_cb, add1, add2);
2818     tcg_gen_xor_reg(cpu_psw_cb, cpu_psw_cb, dest);
2819 
2820     /* Write back PSW[V] for the division step.  */
2821     tcg_gen_neg_reg(cpu_psw_v, cpu_psw_cb_msb);
2822     tcg_gen_xor_reg(cpu_psw_v, cpu_psw_v, in2);
2823 
2824     /* Install the new nullification.  */
2825     if (a->cf) {
2826         TCGv_reg sv = NULL;
2827         if (cond_need_sv(a->cf >> 1)) {
2828             /* ??? The lshift is supposed to contribute to overflow.  */
2829             sv = do_add_sv(ctx, dest, add1, add2);
2830         }
2831         ctx->null_cond = do_cond(a->cf, dest, cpu_psw_cb_msb, sv);
2832     }
2833 
2834     tcg_temp_free(add1);
2835     tcg_temp_free(add2);
2836     tcg_temp_free(dest);
2837 
2838     return nullify_end(ctx);
2839 }
2840 
2841 static bool trans_addi(DisasContext *ctx, arg_rri_cf *a)
2842 {
2843     return do_add_imm(ctx, a, false, false);
2844 }
2845 
2846 static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a)
2847 {
2848     return do_add_imm(ctx, a, true, false);
2849 }
2850 
2851 static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a)
2852 {
2853     return do_add_imm(ctx, a, false, true);
2854 }
2855 
2856 static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a)
2857 {
2858     return do_add_imm(ctx, a, true, true);
2859 }
2860 
2861 static bool trans_subi(DisasContext *ctx, arg_rri_cf *a)
2862 {
2863     return do_sub_imm(ctx, a, false);
2864 }
2865 
2866 static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a)
2867 {
2868     return do_sub_imm(ctx, a, true);
2869 }
2870 
2871 static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf *a)
2872 {
2873     TCGv_reg tcg_im, tcg_r2;
2874 
2875     if (a->cf) {
2876         nullify_over(ctx);
2877     }
2878 
2879     tcg_im = load_const(ctx, a->i);
2880     tcg_r2 = load_gpr(ctx, a->r);
2881     do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf);
2882 
2883     return nullify_end(ctx);
2884 }
2885 
2886 static bool trans_ld(DisasContext *ctx, arg_ldst *a)
2887 {
2888     return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0,
2889                    a->disp, a->sp, a->m, a->size | MO_TE);
2890 }
2891 
2892 static bool trans_st(DisasContext *ctx, arg_ldst *a)
2893 {
2894     assert(a->x == 0 && a->scale == 0);
2895     return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE);
2896 }
2897 
2898 static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
2899 {
2900     MemOp mop = MO_TE | MO_ALIGN | a->size;
2901     TCGv_reg zero, dest, ofs;
2902     TCGv_tl addr;
2903 
2904     nullify_over(ctx);
2905 
2906     if (a->m) {
2907         /* Base register modification.  Make sure if RT == RB,
2908            we see the result of the load.  */
2909         dest = get_temp(ctx);
2910     } else {
2911         dest = dest_gpr(ctx, a->t);
2912     }
2913 
2914     form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? a->size : 0,
2915              a->disp, a->sp, a->m, ctx->mmu_idx == MMU_PHYS_IDX);
2916 
2917     /*
2918      * For hppa1.1, LDCW is undefined unless aligned mod 16.
2919      * However actual hardware succeeds with aligned mod 4.
2920      * Detect this case and log a GUEST_ERROR.
2921      *
2922      * TODO: HPPA64 relaxes the over-alignment requirement
2923      * with the ,co completer.
2924      */
2925     gen_helper_ldc_check(addr);
2926 
2927     zero = tcg_constant_reg(0);
2928     tcg_gen_atomic_xchg_reg(dest, addr, zero, ctx->mmu_idx, mop);
2929 
2930     if (a->m) {
2931         save_gpr(ctx, a->b, ofs);
2932     }
2933     save_gpr(ctx, a->t, dest);
2934 
2935     return nullify_end(ctx);
2936 }
2937 
2938 static bool trans_stby(DisasContext *ctx, arg_stby *a)
2939 {
2940     TCGv_reg ofs, val;
2941     TCGv_tl addr;
2942 
2943     nullify_over(ctx);
2944 
2945     form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
2946              ctx->mmu_idx == MMU_PHYS_IDX);
2947     val = load_gpr(ctx, a->r);
2948     if (a->a) {
2949         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2950             gen_helper_stby_e_parallel(cpu_env, addr, val);
2951         } else {
2952             gen_helper_stby_e(cpu_env, addr, val);
2953         }
2954     } else {
2955         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2956             gen_helper_stby_b_parallel(cpu_env, addr, val);
2957         } else {
2958             gen_helper_stby_b(cpu_env, addr, val);
2959         }
2960     }
2961     if (a->m) {
2962         tcg_gen_andi_reg(ofs, ofs, ~3);
2963         save_gpr(ctx, a->b, ofs);
2964     }
2965 
2966     return nullify_end(ctx);
2967 }
2968 
2969 static bool trans_lda(DisasContext *ctx, arg_ldst *a)
2970 {
2971     int hold_mmu_idx = ctx->mmu_idx;
2972 
2973     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2974     ctx->mmu_idx = MMU_PHYS_IDX;
2975     trans_ld(ctx, a);
2976     ctx->mmu_idx = hold_mmu_idx;
2977     return true;
2978 }
2979 
2980 static bool trans_sta(DisasContext *ctx, arg_ldst *a)
2981 {
2982     int hold_mmu_idx = ctx->mmu_idx;
2983 
2984     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2985     ctx->mmu_idx = MMU_PHYS_IDX;
2986     trans_st(ctx, a);
2987     ctx->mmu_idx = hold_mmu_idx;
2988     return true;
2989 }
2990 
2991 static bool trans_ldil(DisasContext *ctx, arg_ldil *a)
2992 {
2993     TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
2994 
2995     tcg_gen_movi_reg(tcg_rt, a->i);
2996     save_gpr(ctx, a->t, tcg_rt);
2997     cond_free(&ctx->null_cond);
2998     return true;
2999 }
3000 
3001 static bool trans_addil(DisasContext *ctx, arg_addil *a)
3002 {
3003     TCGv_reg tcg_rt = load_gpr(ctx, a->r);
3004     TCGv_reg tcg_r1 = dest_gpr(ctx, 1);
3005 
3006     tcg_gen_addi_reg(tcg_r1, tcg_rt, a->i);
3007     save_gpr(ctx, 1, tcg_r1);
3008     cond_free(&ctx->null_cond);
3009     return true;
3010 }
3011 
3012 static bool trans_ldo(DisasContext *ctx, arg_ldo *a)
3013 {
3014     TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
3015 
3016     /* Special case rb == 0, for the LDI pseudo-op.
3017        The COPY pseudo-op is handled for free within tcg_gen_addi_tl.  */
3018     if (a->b == 0) {
3019         tcg_gen_movi_reg(tcg_rt, a->i);
3020     } else {
3021         tcg_gen_addi_reg(tcg_rt, cpu_gr[a->b], a->i);
3022     }
3023     save_gpr(ctx, a->t, tcg_rt);
3024     cond_free(&ctx->null_cond);
3025     return true;
3026 }
3027 
3028 static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_reg in1,
3029                     unsigned c, unsigned f, unsigned n, int disp)
3030 {
3031     TCGv_reg dest, in2, sv;
3032     DisasCond cond;
3033 
3034     in2 = load_gpr(ctx, r);
3035     dest = get_temp(ctx);
3036 
3037     tcg_gen_sub_reg(dest, in1, in2);
3038 
3039     sv = NULL;
3040     if (cond_need_sv(c)) {
3041         sv = do_sub_sv(ctx, dest, in1, in2);
3042     }
3043 
3044     cond = do_sub_cond(c * 2 + f, dest, in1, in2, sv);
3045     return do_cbranch(ctx, disp, n, &cond);
3046 }
3047 
3048 static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a)
3049 {
3050     nullify_over(ctx);
3051     return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3052 }
3053 
3054 static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a)
3055 {
3056     nullify_over(ctx);
3057     return do_cmpb(ctx, a->r, load_const(ctx, a->i), a->c, a->f, a->n, a->disp);
3058 }
3059 
3060 static bool do_addb(DisasContext *ctx, unsigned r, TCGv_reg in1,
3061                     unsigned c, unsigned f, unsigned n, int disp)
3062 {
3063     TCGv_reg dest, in2, sv, cb_msb;
3064     DisasCond cond;
3065 
3066     in2 = load_gpr(ctx, r);
3067     dest = tcg_temp_new();
3068     sv = NULL;
3069     cb_msb = NULL;
3070 
3071     if (cond_need_cb(c)) {
3072         cb_msb = get_temp(ctx);
3073         tcg_gen_movi_reg(cb_msb, 0);
3074         tcg_gen_add2_reg(dest, cb_msb, in1, cb_msb, in2, cb_msb);
3075     } else {
3076         tcg_gen_add_reg(dest, in1, in2);
3077     }
3078     if (cond_need_sv(c)) {
3079         sv = do_add_sv(ctx, dest, in1, in2);
3080     }
3081 
3082     cond = do_cond(c * 2 + f, dest, cb_msb, sv);
3083     save_gpr(ctx, r, dest);
3084     tcg_temp_free(dest);
3085     return do_cbranch(ctx, disp, n, &cond);
3086 }
3087 
3088 static bool trans_addb(DisasContext *ctx, arg_addb *a)
3089 {
3090     nullify_over(ctx);
3091     return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3092 }
3093 
3094 static bool trans_addbi(DisasContext *ctx, arg_addbi *a)
3095 {
3096     nullify_over(ctx);
3097     return do_addb(ctx, a->r, load_const(ctx, a->i), a->c, a->f, a->n, a->disp);
3098 }
3099 
3100 static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a)
3101 {
3102     TCGv_reg tmp, tcg_r;
3103     DisasCond cond;
3104 
3105     nullify_over(ctx);
3106 
3107     tmp = tcg_temp_new();
3108     tcg_r = load_gpr(ctx, a->r);
3109     tcg_gen_shl_reg(tmp, tcg_r, cpu_sar);
3110 
3111     cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3112     tcg_temp_free(tmp);
3113     return do_cbranch(ctx, a->disp, a->n, &cond);
3114 }
3115 
3116 static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a)
3117 {
3118     TCGv_reg tmp, tcg_r;
3119     DisasCond cond;
3120 
3121     nullify_over(ctx);
3122 
3123     tmp = tcg_temp_new();
3124     tcg_r = load_gpr(ctx, a->r);
3125     tcg_gen_shli_reg(tmp, tcg_r, a->p);
3126 
3127     cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3128     tcg_temp_free(tmp);
3129     return do_cbranch(ctx, a->disp, a->n, &cond);
3130 }
3131 
3132 static bool trans_movb(DisasContext *ctx, arg_movb *a)
3133 {
3134     TCGv_reg dest;
3135     DisasCond cond;
3136 
3137     nullify_over(ctx);
3138 
3139     dest = dest_gpr(ctx, a->r2);
3140     if (a->r1 == 0) {
3141         tcg_gen_movi_reg(dest, 0);
3142     } else {
3143         tcg_gen_mov_reg(dest, cpu_gr[a->r1]);
3144     }
3145 
3146     cond = do_sed_cond(a->c, dest);
3147     return do_cbranch(ctx, a->disp, a->n, &cond);
3148 }
3149 
3150 static bool trans_movbi(DisasContext *ctx, arg_movbi *a)
3151 {
3152     TCGv_reg dest;
3153     DisasCond cond;
3154 
3155     nullify_over(ctx);
3156 
3157     dest = dest_gpr(ctx, a->r);
3158     tcg_gen_movi_reg(dest, a->i);
3159 
3160     cond = do_sed_cond(a->c, dest);
3161     return do_cbranch(ctx, a->disp, a->n, &cond);
3162 }
3163 
3164 static bool trans_shrpw_sar(DisasContext *ctx, arg_shrpw_sar *a)
3165 {
3166     TCGv_reg dest;
3167 
3168     if (a->c) {
3169         nullify_over(ctx);
3170     }
3171 
3172     dest = dest_gpr(ctx, a->t);
3173     if (a->r1 == 0) {
3174         tcg_gen_ext32u_reg(dest, load_gpr(ctx, a->r2));
3175         tcg_gen_shr_reg(dest, dest, cpu_sar);
3176     } else if (a->r1 == a->r2) {
3177         TCGv_i32 t32 = tcg_temp_new_i32();
3178         tcg_gen_trunc_reg_i32(t32, load_gpr(ctx, a->r2));
3179         tcg_gen_rotr_i32(t32, t32, cpu_sar);
3180         tcg_gen_extu_i32_reg(dest, t32);
3181         tcg_temp_free_i32(t32);
3182     } else {
3183         TCGv_i64 t = tcg_temp_new_i64();
3184         TCGv_i64 s = tcg_temp_new_i64();
3185 
3186         tcg_gen_concat_reg_i64(t, load_gpr(ctx, a->r2), load_gpr(ctx, a->r1));
3187         tcg_gen_extu_reg_i64(s, cpu_sar);
3188         tcg_gen_shr_i64(t, t, s);
3189         tcg_gen_trunc_i64_reg(dest, t);
3190 
3191         tcg_temp_free_i64(t);
3192         tcg_temp_free_i64(s);
3193     }
3194     save_gpr(ctx, a->t, dest);
3195 
3196     /* Install the new nullification.  */
3197     cond_free(&ctx->null_cond);
3198     if (a->c) {
3199         ctx->null_cond = do_sed_cond(a->c, dest);
3200     }
3201     return nullify_end(ctx);
3202 }
3203 
3204 static bool trans_shrpw_imm(DisasContext *ctx, arg_shrpw_imm *a)
3205 {
3206     unsigned sa = 31 - a->cpos;
3207     TCGv_reg dest, t2;
3208 
3209     if (a->c) {
3210         nullify_over(ctx);
3211     }
3212 
3213     dest = dest_gpr(ctx, a->t);
3214     t2 = load_gpr(ctx, a->r2);
3215     if (a->r1 == a->r2) {
3216         TCGv_i32 t32 = tcg_temp_new_i32();
3217         tcg_gen_trunc_reg_i32(t32, t2);
3218         tcg_gen_rotri_i32(t32, t32, sa);
3219         tcg_gen_extu_i32_reg(dest, t32);
3220         tcg_temp_free_i32(t32);
3221     } else if (a->r1 == 0) {
3222         tcg_gen_extract_reg(dest, t2, sa, 32 - sa);
3223     } else {
3224         TCGv_reg t0 = tcg_temp_new();
3225         tcg_gen_extract_reg(t0, t2, sa, 32 - sa);
3226         tcg_gen_deposit_reg(dest, t0, cpu_gr[a->r1], 32 - sa, sa);
3227         tcg_temp_free(t0);
3228     }
3229     save_gpr(ctx, a->t, dest);
3230 
3231     /* Install the new nullification.  */
3232     cond_free(&ctx->null_cond);
3233     if (a->c) {
3234         ctx->null_cond = do_sed_cond(a->c, dest);
3235     }
3236     return nullify_end(ctx);
3237 }
3238 
3239 static bool trans_extrw_sar(DisasContext *ctx, arg_extrw_sar *a)
3240 {
3241     unsigned len = 32 - a->clen;
3242     TCGv_reg dest, src, tmp;
3243 
3244     if (a->c) {
3245         nullify_over(ctx);
3246     }
3247 
3248     dest = dest_gpr(ctx, a->t);
3249     src = load_gpr(ctx, a->r);
3250     tmp = tcg_temp_new();
3251 
3252     /* Recall that SAR is using big-endian bit numbering.  */
3253     tcg_gen_xori_reg(tmp, cpu_sar, TARGET_REGISTER_BITS - 1);
3254     if (a->se) {
3255         tcg_gen_sar_reg(dest, src, tmp);
3256         tcg_gen_sextract_reg(dest, dest, 0, len);
3257     } else {
3258         tcg_gen_shr_reg(dest, src, tmp);
3259         tcg_gen_extract_reg(dest, dest, 0, len);
3260     }
3261     tcg_temp_free(tmp);
3262     save_gpr(ctx, a->t, dest);
3263 
3264     /* Install the new nullification.  */
3265     cond_free(&ctx->null_cond);
3266     if (a->c) {
3267         ctx->null_cond = do_sed_cond(a->c, dest);
3268     }
3269     return nullify_end(ctx);
3270 }
3271 
3272 static bool trans_extrw_imm(DisasContext *ctx, arg_extrw_imm *a)
3273 {
3274     unsigned len = 32 - a->clen;
3275     unsigned cpos = 31 - a->pos;
3276     TCGv_reg dest, src;
3277 
3278     if (a->c) {
3279         nullify_over(ctx);
3280     }
3281 
3282     dest = dest_gpr(ctx, a->t);
3283     src = load_gpr(ctx, a->r);
3284     if (a->se) {
3285         tcg_gen_sextract_reg(dest, src, cpos, len);
3286     } else {
3287         tcg_gen_extract_reg(dest, src, cpos, len);
3288     }
3289     save_gpr(ctx, a->t, dest);
3290 
3291     /* Install the new nullification.  */
3292     cond_free(&ctx->null_cond);
3293     if (a->c) {
3294         ctx->null_cond = do_sed_cond(a->c, dest);
3295     }
3296     return nullify_end(ctx);
3297 }
3298 
3299 static bool trans_depwi_imm(DisasContext *ctx, arg_depwi_imm *a)
3300 {
3301     unsigned len = 32 - a->clen;
3302     target_sreg mask0, mask1;
3303     TCGv_reg dest;
3304 
3305     if (a->c) {
3306         nullify_over(ctx);
3307     }
3308     if (a->cpos + len > 32) {
3309         len = 32 - a->cpos;
3310     }
3311 
3312     dest = dest_gpr(ctx, a->t);
3313     mask0 = deposit64(0, a->cpos, len, a->i);
3314     mask1 = deposit64(-1, a->cpos, len, a->i);
3315 
3316     if (a->nz) {
3317         TCGv_reg src = load_gpr(ctx, a->t);
3318         if (mask1 != -1) {
3319             tcg_gen_andi_reg(dest, src, mask1);
3320             src = dest;
3321         }
3322         tcg_gen_ori_reg(dest, src, mask0);
3323     } else {
3324         tcg_gen_movi_reg(dest, mask0);
3325     }
3326     save_gpr(ctx, a->t, dest);
3327 
3328     /* Install the new nullification.  */
3329     cond_free(&ctx->null_cond);
3330     if (a->c) {
3331         ctx->null_cond = do_sed_cond(a->c, dest);
3332     }
3333     return nullify_end(ctx);
3334 }
3335 
3336 static bool trans_depw_imm(DisasContext *ctx, arg_depw_imm *a)
3337 {
3338     unsigned rs = a->nz ? a->t : 0;
3339     unsigned len = 32 - a->clen;
3340     TCGv_reg dest, val;
3341 
3342     if (a->c) {
3343         nullify_over(ctx);
3344     }
3345     if (a->cpos + len > 32) {
3346         len = 32 - a->cpos;
3347     }
3348 
3349     dest = dest_gpr(ctx, a->t);
3350     val = load_gpr(ctx, a->r);
3351     if (rs == 0) {
3352         tcg_gen_deposit_z_reg(dest, val, a->cpos, len);
3353     } else {
3354         tcg_gen_deposit_reg(dest, cpu_gr[rs], val, a->cpos, len);
3355     }
3356     save_gpr(ctx, a->t, dest);
3357 
3358     /* Install the new nullification.  */
3359     cond_free(&ctx->null_cond);
3360     if (a->c) {
3361         ctx->null_cond = do_sed_cond(a->c, dest);
3362     }
3363     return nullify_end(ctx);
3364 }
3365 
3366 static bool do_depw_sar(DisasContext *ctx, unsigned rt, unsigned c,
3367                         unsigned nz, unsigned clen, TCGv_reg val)
3368 {
3369     unsigned rs = nz ? rt : 0;
3370     unsigned len = 32 - clen;
3371     TCGv_reg mask, tmp, shift, dest;
3372     unsigned msb = 1U << (len - 1);
3373 
3374     dest = dest_gpr(ctx, rt);
3375     shift = tcg_temp_new();
3376     tmp = tcg_temp_new();
3377 
3378     /* Convert big-endian bit numbering in SAR to left-shift.  */
3379     tcg_gen_xori_reg(shift, cpu_sar, TARGET_REGISTER_BITS - 1);
3380 
3381     mask = tcg_const_reg(msb + (msb - 1));
3382     tcg_gen_and_reg(tmp, val, mask);
3383     if (rs) {
3384         tcg_gen_shl_reg(mask, mask, shift);
3385         tcg_gen_shl_reg(tmp, tmp, shift);
3386         tcg_gen_andc_reg(dest, cpu_gr[rs], mask);
3387         tcg_gen_or_reg(dest, dest, tmp);
3388     } else {
3389         tcg_gen_shl_reg(dest, tmp, shift);
3390     }
3391     tcg_temp_free(shift);
3392     tcg_temp_free(mask);
3393     tcg_temp_free(tmp);
3394     save_gpr(ctx, rt, dest);
3395 
3396     /* Install the new nullification.  */
3397     cond_free(&ctx->null_cond);
3398     if (c) {
3399         ctx->null_cond = do_sed_cond(c, dest);
3400     }
3401     return nullify_end(ctx);
3402 }
3403 
3404 static bool trans_depw_sar(DisasContext *ctx, arg_depw_sar *a)
3405 {
3406     if (a->c) {
3407         nullify_over(ctx);
3408     }
3409     return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_gpr(ctx, a->r));
3410 }
3411 
3412 static bool trans_depwi_sar(DisasContext *ctx, arg_depwi_sar *a)
3413 {
3414     if (a->c) {
3415         nullify_over(ctx);
3416     }
3417     return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_const(ctx, a->i));
3418 }
3419 
3420 static bool trans_be(DisasContext *ctx, arg_be *a)
3421 {
3422     TCGv_reg tmp;
3423 
3424 #ifdef CONFIG_USER_ONLY
3425     /* ??? It seems like there should be a good way of using
3426        "be disp(sr2, r0)", the canonical gateway entry mechanism
3427        to our advantage.  But that appears to be inconvenient to
3428        manage along side branch delay slots.  Therefore we handle
3429        entry into the gateway page via absolute address.  */
3430     /* Since we don't implement spaces, just branch.  Do notice the special
3431        case of "be disp(*,r0)" using a direct branch to disp, so that we can
3432        goto_tb to the TB containing the syscall.  */
3433     if (a->b == 0) {
3434         return do_dbranch(ctx, a->disp, a->l, a->n);
3435     }
3436 #else
3437     nullify_over(ctx);
3438 #endif
3439 
3440     tmp = get_temp(ctx);
3441     tcg_gen_addi_reg(tmp, load_gpr(ctx, a->b), a->disp);
3442     tmp = do_ibranch_priv(ctx, tmp);
3443 
3444 #ifdef CONFIG_USER_ONLY
3445     return do_ibranch(ctx, tmp, a->l, a->n);
3446 #else
3447     TCGv_i64 new_spc = tcg_temp_new_i64();
3448 
3449     load_spr(ctx, new_spc, a->sp);
3450     if (a->l) {
3451         copy_iaoq_entry(cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
3452         tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f);
3453     }
3454     if (a->n && use_nullify_skip(ctx)) {
3455         tcg_gen_mov_reg(cpu_iaoq_f, tmp);
3456         tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
3457         tcg_gen_mov_i64(cpu_iasq_f, new_spc);
3458         tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f);
3459     } else {
3460         copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3461         if (ctx->iaoq_b == -1) {
3462             tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3463         }
3464         tcg_gen_mov_reg(cpu_iaoq_b, tmp);
3465         tcg_gen_mov_i64(cpu_iasq_b, new_spc);
3466         nullify_set(ctx, a->n);
3467     }
3468     tcg_temp_free_i64(new_spc);
3469     tcg_gen_lookup_and_goto_ptr();
3470     ctx->base.is_jmp = DISAS_NORETURN;
3471     return nullify_end(ctx);
3472 #endif
3473 }
3474 
3475 static bool trans_bl(DisasContext *ctx, arg_bl *a)
3476 {
3477     return do_dbranch(ctx, iaoq_dest(ctx, a->disp), a->l, a->n);
3478 }
3479 
3480 static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a)
3481 {
3482     target_ureg dest = iaoq_dest(ctx, a->disp);
3483 
3484     nullify_over(ctx);
3485 
3486     /* Make sure the caller hasn't done something weird with the queue.
3487      * ??? This is not quite the same as the PSW[B] bit, which would be
3488      * expensive to track.  Real hardware will trap for
3489      *    b  gateway
3490      *    b  gateway+4  (in delay slot of first branch)
3491      * However, checking for a non-sequential instruction queue *will*
3492      * diagnose the security hole
3493      *    b  gateway
3494      *    b  evil
3495      * in which instructions at evil would run with increased privs.
3496      */
3497     if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) {
3498         return gen_illegal(ctx);
3499     }
3500 
3501 #ifndef CONFIG_USER_ONLY
3502     if (ctx->tb_flags & PSW_C) {
3503         CPUHPPAState *env = ctx->cs->env_ptr;
3504         int type = hppa_artype_for_page(env, ctx->base.pc_next);
3505         /* If we could not find a TLB entry, then we need to generate an
3506            ITLB miss exception so the kernel will provide it.
3507            The resulting TLB fill operation will invalidate this TB and
3508            we will re-translate, at which point we *will* be able to find
3509            the TLB entry and determine if this is in fact a gateway page.  */
3510         if (type < 0) {
3511             gen_excp(ctx, EXCP_ITLB_MISS);
3512             return true;
3513         }
3514         /* No change for non-gateway pages or for priv decrease.  */
3515         if (type >= 4 && type - 4 < ctx->privilege) {
3516             dest = deposit32(dest, 0, 2, type - 4);
3517         }
3518     } else {
3519         dest &= -4;  /* priv = 0 */
3520     }
3521 #endif
3522 
3523     if (a->l) {
3524         TCGv_reg tmp = dest_gpr(ctx, a->l);
3525         if (ctx->privilege < 3) {
3526             tcg_gen_andi_reg(tmp, tmp, -4);
3527         }
3528         tcg_gen_ori_reg(tmp, tmp, ctx->privilege);
3529         save_gpr(ctx, a->l, tmp);
3530     }
3531 
3532     return do_dbranch(ctx, dest, 0, a->n);
3533 }
3534 
3535 static bool trans_blr(DisasContext *ctx, arg_blr *a)
3536 {
3537     if (a->x) {
3538         TCGv_reg tmp = get_temp(ctx);
3539         tcg_gen_shli_reg(tmp, load_gpr(ctx, a->x), 3);
3540         tcg_gen_addi_reg(tmp, tmp, ctx->iaoq_f + 8);
3541         /* The computation here never changes privilege level.  */
3542         return do_ibranch(ctx, tmp, a->l, a->n);
3543     } else {
3544         /* BLR R0,RX is a good way to load PC+8 into RX.  */
3545         return do_dbranch(ctx, ctx->iaoq_f + 8, a->l, a->n);
3546     }
3547 }
3548 
3549 static bool trans_bv(DisasContext *ctx, arg_bv *a)
3550 {
3551     TCGv_reg dest;
3552 
3553     if (a->x == 0) {
3554         dest = load_gpr(ctx, a->b);
3555     } else {
3556         dest = get_temp(ctx);
3557         tcg_gen_shli_reg(dest, load_gpr(ctx, a->x), 3);
3558         tcg_gen_add_reg(dest, dest, load_gpr(ctx, a->b));
3559     }
3560     dest = do_ibranch_priv(ctx, dest);
3561     return do_ibranch(ctx, dest, 0, a->n);
3562 }
3563 
3564 static bool trans_bve(DisasContext *ctx, arg_bve *a)
3565 {
3566     TCGv_reg dest;
3567 
3568 #ifdef CONFIG_USER_ONLY
3569     dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3570     return do_ibranch(ctx, dest, a->l, a->n);
3571 #else
3572     nullify_over(ctx);
3573     dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3574 
3575     copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3576     if (ctx->iaoq_b == -1) {
3577         tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3578     }
3579     copy_iaoq_entry(cpu_iaoq_b, -1, dest);
3580     tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest));
3581     if (a->l) {
3582         copy_iaoq_entry(cpu_gr[a->l], ctx->iaoq_n, ctx->iaoq_n_var);
3583     }
3584     nullify_set(ctx, a->n);
3585     tcg_gen_lookup_and_goto_ptr();
3586     ctx->base.is_jmp = DISAS_NORETURN;
3587     return nullify_end(ctx);
3588 #endif
3589 }
3590 
3591 /*
3592  * Float class 0
3593  */
3594 
3595 static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3596 {
3597     tcg_gen_mov_i32(dst, src);
3598 }
3599 
3600 static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a)
3601 {
3602     return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f);
3603 }
3604 
3605 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3606 {
3607     tcg_gen_mov_i64(dst, src);
3608 }
3609 
3610 static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a)
3611 {
3612     return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d);
3613 }
3614 
3615 static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3616 {
3617     tcg_gen_andi_i32(dst, src, INT32_MAX);
3618 }
3619 
3620 static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a)
3621 {
3622     return do_fop_wew(ctx, a->t, a->r, gen_fabs_f);
3623 }
3624 
3625 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3626 {
3627     tcg_gen_andi_i64(dst, src, INT64_MAX);
3628 }
3629 
3630 static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a)
3631 {
3632     return do_fop_ded(ctx, a->t, a->r, gen_fabs_d);
3633 }
3634 
3635 static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a)
3636 {
3637     return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s);
3638 }
3639 
3640 static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a)
3641 {
3642     return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d);
3643 }
3644 
3645 static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a)
3646 {
3647     return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s);
3648 }
3649 
3650 static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a)
3651 {
3652     return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d);
3653 }
3654 
3655 static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3656 {
3657     tcg_gen_xori_i32(dst, src, INT32_MIN);
3658 }
3659 
3660 static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a)
3661 {
3662     return do_fop_wew(ctx, a->t, a->r, gen_fneg_f);
3663 }
3664 
3665 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3666 {
3667     tcg_gen_xori_i64(dst, src, INT64_MIN);
3668 }
3669 
3670 static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a)
3671 {
3672     return do_fop_ded(ctx, a->t, a->r, gen_fneg_d);
3673 }
3674 
3675 static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3676 {
3677     tcg_gen_ori_i32(dst, src, INT32_MIN);
3678 }
3679 
3680 static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a)
3681 {
3682     return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f);
3683 }
3684 
3685 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3686 {
3687     tcg_gen_ori_i64(dst, src, INT64_MIN);
3688 }
3689 
3690 static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a)
3691 {
3692     return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d);
3693 }
3694 
3695 /*
3696  * Float class 1
3697  */
3698 
3699 static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a)
3700 {
3701     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s);
3702 }
3703 
3704 static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a)
3705 {
3706     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d);
3707 }
3708 
3709 static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a)
3710 {
3711     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s);
3712 }
3713 
3714 static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a)
3715 {
3716     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s);
3717 }
3718 
3719 static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a)
3720 {
3721     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d);
3722 }
3723 
3724 static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a)
3725 {
3726     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d);
3727 }
3728 
3729 static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a)
3730 {
3731     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w);
3732 }
3733 
3734 static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a)
3735 {
3736     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w);
3737 }
3738 
3739 static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a)
3740 {
3741     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw);
3742 }
3743 
3744 static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a)
3745 {
3746     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw);
3747 }
3748 
3749 static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a)
3750 {
3751     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w);
3752 }
3753 
3754 static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a)
3755 {
3756     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w);
3757 }
3758 
3759 static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a)
3760 {
3761     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw);
3762 }
3763 
3764 static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a)
3765 {
3766     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw);
3767 }
3768 
3769 static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a)
3770 {
3771     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s);
3772 }
3773 
3774 static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a)
3775 {
3776     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s);
3777 }
3778 
3779 static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a)
3780 {
3781     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d);
3782 }
3783 
3784 static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a)
3785 {
3786     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d);
3787 }
3788 
3789 static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a)
3790 {
3791     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw);
3792 }
3793 
3794 static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a)
3795 {
3796     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw);
3797 }
3798 
3799 static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a)
3800 {
3801     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw);
3802 }
3803 
3804 static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a)
3805 {
3806     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw);
3807 }
3808 
3809 static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a)
3810 {
3811     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw);
3812 }
3813 
3814 static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a)
3815 {
3816     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw);
3817 }
3818 
3819 static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a)
3820 {
3821     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw);
3822 }
3823 
3824 static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a)
3825 {
3826     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw);
3827 }
3828 
3829 /*
3830  * Float class 2
3831  */
3832 
3833 static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a)
3834 {
3835     TCGv_i32 ta, tb, tc, ty;
3836 
3837     nullify_over(ctx);
3838 
3839     ta = load_frw0_i32(a->r1);
3840     tb = load_frw0_i32(a->r2);
3841     ty = tcg_constant_i32(a->y);
3842     tc = tcg_constant_i32(a->c);
3843 
3844     gen_helper_fcmp_s(cpu_env, ta, tb, ty, tc);
3845 
3846     tcg_temp_free_i32(ta);
3847     tcg_temp_free_i32(tb);
3848 
3849     return nullify_end(ctx);
3850 }
3851 
3852 static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a)
3853 {
3854     TCGv_i64 ta, tb;
3855     TCGv_i32 tc, ty;
3856 
3857     nullify_over(ctx);
3858 
3859     ta = load_frd0(a->r1);
3860     tb = load_frd0(a->r2);
3861     ty = tcg_constant_i32(a->y);
3862     tc = tcg_constant_i32(a->c);
3863 
3864     gen_helper_fcmp_d(cpu_env, ta, tb, ty, tc);
3865 
3866     tcg_temp_free_i64(ta);
3867     tcg_temp_free_i64(tb);
3868 
3869     return nullify_end(ctx);
3870 }
3871 
3872 static bool trans_ftest(DisasContext *ctx, arg_ftest *a)
3873 {
3874     TCGv_reg t;
3875 
3876     nullify_over(ctx);
3877 
3878     t = get_temp(ctx);
3879     tcg_gen_ld32u_reg(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
3880 
3881     if (a->y == 1) {
3882         int mask;
3883         bool inv = false;
3884 
3885         switch (a->c) {
3886         case 0: /* simple */
3887             tcg_gen_andi_reg(t, t, 0x4000000);
3888             ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3889             goto done;
3890         case 2: /* rej */
3891             inv = true;
3892             /* fallthru */
3893         case 1: /* acc */
3894             mask = 0x43ff800;
3895             break;
3896         case 6: /* rej8 */
3897             inv = true;
3898             /* fallthru */
3899         case 5: /* acc8 */
3900             mask = 0x43f8000;
3901             break;
3902         case 9: /* acc6 */
3903             mask = 0x43e0000;
3904             break;
3905         case 13: /* acc4 */
3906             mask = 0x4380000;
3907             break;
3908         case 17: /* acc2 */
3909             mask = 0x4200000;
3910             break;
3911         default:
3912             gen_illegal(ctx);
3913             return true;
3914         }
3915         if (inv) {
3916             TCGv_reg c = load_const(ctx, mask);
3917             tcg_gen_or_reg(t, t, c);
3918             ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
3919         } else {
3920             tcg_gen_andi_reg(t, t, mask);
3921             ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
3922         }
3923     } else {
3924         unsigned cbit = (a->y ^ 1) - 1;
3925 
3926         tcg_gen_extract_reg(t, t, 21 - cbit, 1);
3927         ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3928         tcg_temp_free(t);
3929     }
3930 
3931  done:
3932     return nullify_end(ctx);
3933 }
3934 
3935 /*
3936  * Float class 2
3937  */
3938 
3939 static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a)
3940 {
3941     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s);
3942 }
3943 
3944 static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a)
3945 {
3946     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d);
3947 }
3948 
3949 static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a)
3950 {
3951     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s);
3952 }
3953 
3954 static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a)
3955 {
3956     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d);
3957 }
3958 
3959 static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a)
3960 {
3961     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s);
3962 }
3963 
3964 static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a)
3965 {
3966     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d);
3967 }
3968 
3969 static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a)
3970 {
3971     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s);
3972 }
3973 
3974 static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a)
3975 {
3976     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d);
3977 }
3978 
3979 static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a)
3980 {
3981     TCGv_i64 x, y;
3982 
3983     nullify_over(ctx);
3984 
3985     x = load_frw0_i64(a->r1);
3986     y = load_frw0_i64(a->r2);
3987     tcg_gen_mul_i64(x, x, y);
3988     save_frd(a->t, x);
3989     tcg_temp_free_i64(x);
3990     tcg_temp_free_i64(y);
3991 
3992     return nullify_end(ctx);
3993 }
3994 
3995 /* Convert the fmpyadd single-precision register encodings to standard.  */
3996 static inline int fmpyadd_s_reg(unsigned r)
3997 {
3998     return (r & 16) * 2 + 16 + (r & 15);
3999 }
4000 
4001 static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4002 {
4003     int tm = fmpyadd_s_reg(a->tm);
4004     int ra = fmpyadd_s_reg(a->ra);
4005     int ta = fmpyadd_s_reg(a->ta);
4006     int rm2 = fmpyadd_s_reg(a->rm2);
4007     int rm1 = fmpyadd_s_reg(a->rm1);
4008 
4009     nullify_over(ctx);
4010 
4011     do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
4012     do_fop_weww(ctx, ta, ta, ra,
4013                 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
4014 
4015     return nullify_end(ctx);
4016 }
4017 
4018 static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a)
4019 {
4020     return do_fmpyadd_s(ctx, a, false);
4021 }
4022 
4023 static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a)
4024 {
4025     return do_fmpyadd_s(ctx, a, true);
4026 }
4027 
4028 static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4029 {
4030     nullify_over(ctx);
4031 
4032     do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d);
4033     do_fop_dedd(ctx, a->ta, a->ta, a->ra,
4034                 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
4035 
4036     return nullify_end(ctx);
4037 }
4038 
4039 static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a)
4040 {
4041     return do_fmpyadd_d(ctx, a, false);
4042 }
4043 
4044 static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a)
4045 {
4046     return do_fmpyadd_d(ctx, a, true);
4047 }
4048 
4049 static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a)
4050 {
4051     TCGv_i32 x, y, z;
4052 
4053     nullify_over(ctx);
4054     x = load_frw0_i32(a->rm1);
4055     y = load_frw0_i32(a->rm2);
4056     z = load_frw0_i32(a->ra3);
4057 
4058     if (a->neg) {
4059         gen_helper_fmpynfadd_s(x, cpu_env, x, y, z);
4060     } else {
4061         gen_helper_fmpyfadd_s(x, cpu_env, x, y, z);
4062     }
4063 
4064     tcg_temp_free_i32(y);
4065     tcg_temp_free_i32(z);
4066     save_frw_i32(a->t, x);
4067     tcg_temp_free_i32(x);
4068     return nullify_end(ctx);
4069 }
4070 
4071 static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a)
4072 {
4073     TCGv_i64 x, y, z;
4074 
4075     nullify_over(ctx);
4076     x = load_frd0(a->rm1);
4077     y = load_frd0(a->rm2);
4078     z = load_frd0(a->ra3);
4079 
4080     if (a->neg) {
4081         gen_helper_fmpynfadd_d(x, cpu_env, x, y, z);
4082     } else {
4083         gen_helper_fmpyfadd_d(x, cpu_env, x, y, z);
4084     }
4085 
4086     tcg_temp_free_i64(y);
4087     tcg_temp_free_i64(z);
4088     save_frd(a->t, x);
4089     tcg_temp_free_i64(x);
4090     return nullify_end(ctx);
4091 }
4092 
4093 static bool trans_diag(DisasContext *ctx, arg_diag *a)
4094 {
4095     qemu_log_mask(LOG_UNIMP, "DIAG opcode ignored\n");
4096     cond_free(&ctx->null_cond);
4097     return true;
4098 }
4099 
4100 static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
4101 {
4102     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4103     int bound;
4104 
4105     ctx->cs = cs;
4106     ctx->tb_flags = ctx->base.tb->flags;
4107 
4108 #ifdef CONFIG_USER_ONLY
4109     ctx->privilege = MMU_USER_IDX;
4110     ctx->mmu_idx = MMU_USER_IDX;
4111     ctx->iaoq_f = ctx->base.pc_first | MMU_USER_IDX;
4112     ctx->iaoq_b = ctx->base.tb->cs_base | MMU_USER_IDX;
4113 #else
4114     ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
4115     ctx->mmu_idx = (ctx->tb_flags & PSW_D ? ctx->privilege : MMU_PHYS_IDX);
4116 
4117     /* Recover the IAOQ values from the GVA + PRIV.  */
4118     uint64_t cs_base = ctx->base.tb->cs_base;
4119     uint64_t iasq_f = cs_base & ~0xffffffffull;
4120     int32_t diff = cs_base;
4121 
4122     ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege;
4123     ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1);
4124 #endif
4125     ctx->iaoq_n = -1;
4126     ctx->iaoq_n_var = NULL;
4127 
4128     /* Bound the number of instructions by those left on the page.  */
4129     bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
4130     ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
4131 
4132     ctx->ntempr = 0;
4133     ctx->ntempl = 0;
4134     memset(ctx->tempr, 0, sizeof(ctx->tempr));
4135     memset(ctx->templ, 0, sizeof(ctx->templ));
4136 }
4137 
4138 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
4139 {
4140     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4141 
4142     /* Seed the nullification status from PSW[N], as saved in TB->FLAGS.  */
4143     ctx->null_cond = cond_make_f();
4144     ctx->psw_n_nonzero = false;
4145     if (ctx->tb_flags & PSW_N) {
4146         ctx->null_cond.c = TCG_COND_ALWAYS;
4147         ctx->psw_n_nonzero = true;
4148     }
4149     ctx->null_lab = NULL;
4150 }
4151 
4152 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4153 {
4154     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4155 
4156     tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b);
4157 }
4158 
4159 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4160 {
4161     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4162     CPUHPPAState *env = cs->env_ptr;
4163     DisasJumpType ret;
4164     int i, n;
4165 
4166     /* Execute one insn.  */
4167 #ifdef CONFIG_USER_ONLY
4168     if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
4169         do_page_zero(ctx);
4170         ret = ctx->base.is_jmp;
4171         assert(ret != DISAS_NEXT);
4172     } else
4173 #endif
4174     {
4175         /* Always fetch the insn, even if nullified, so that we check
4176            the page permissions for execute.  */
4177         uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
4178 
4179         /* Set up the IA queue for the next insn.
4180            This will be overwritten by a branch.  */
4181         if (ctx->iaoq_b == -1) {
4182             ctx->iaoq_n = -1;
4183             ctx->iaoq_n_var = get_temp(ctx);
4184             tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
4185         } else {
4186             ctx->iaoq_n = ctx->iaoq_b + 4;
4187             ctx->iaoq_n_var = NULL;
4188         }
4189 
4190         if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
4191             ctx->null_cond.c = TCG_COND_NEVER;
4192             ret = DISAS_NEXT;
4193         } else {
4194             ctx->insn = insn;
4195             if (!decode(ctx, insn)) {
4196                 gen_illegal(ctx);
4197             }
4198             ret = ctx->base.is_jmp;
4199             assert(ctx->null_lab == NULL);
4200         }
4201     }
4202 
4203     /* Free any temporaries allocated.  */
4204     for (i = 0, n = ctx->ntempr; i < n; ++i) {
4205         tcg_temp_free(ctx->tempr[i]);
4206         ctx->tempr[i] = NULL;
4207     }
4208     for (i = 0, n = ctx->ntempl; i < n; ++i) {
4209         tcg_temp_free_tl(ctx->templ[i]);
4210         ctx->templ[i] = NULL;
4211     }
4212     ctx->ntempr = 0;
4213     ctx->ntempl = 0;
4214 
4215     /* Advance the insn queue.  Note that this check also detects
4216        a priority change within the instruction queue.  */
4217     if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
4218         if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1
4219             && use_goto_tb(ctx, ctx->iaoq_b)
4220             && (ctx->null_cond.c == TCG_COND_NEVER
4221                 || ctx->null_cond.c == TCG_COND_ALWAYS)) {
4222             nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
4223             gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
4224             ctx->base.is_jmp = ret = DISAS_NORETURN;
4225         } else {
4226             ctx->base.is_jmp = ret = DISAS_IAQ_N_STALE;
4227         }
4228     }
4229     ctx->iaoq_f = ctx->iaoq_b;
4230     ctx->iaoq_b = ctx->iaoq_n;
4231     ctx->base.pc_next += 4;
4232 
4233     switch (ret) {
4234     case DISAS_NORETURN:
4235     case DISAS_IAQ_N_UPDATED:
4236         break;
4237 
4238     case DISAS_NEXT:
4239     case DISAS_IAQ_N_STALE:
4240     case DISAS_IAQ_N_STALE_EXIT:
4241         if (ctx->iaoq_f == -1) {
4242             tcg_gen_mov_reg(cpu_iaoq_f, cpu_iaoq_b);
4243             copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
4244 #ifndef CONFIG_USER_ONLY
4245             tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
4246 #endif
4247             nullify_save(ctx);
4248             ctx->base.is_jmp = (ret == DISAS_IAQ_N_STALE_EXIT
4249                                 ? DISAS_EXIT
4250                                 : DISAS_IAQ_N_UPDATED);
4251         } else if (ctx->iaoq_b == -1) {
4252             tcg_gen_mov_reg(cpu_iaoq_b, ctx->iaoq_n_var);
4253         }
4254         break;
4255 
4256     default:
4257         g_assert_not_reached();
4258     }
4259 }
4260 
4261 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4262 {
4263     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4264     DisasJumpType is_jmp = ctx->base.is_jmp;
4265 
4266     switch (is_jmp) {
4267     case DISAS_NORETURN:
4268         break;
4269     case DISAS_TOO_MANY:
4270     case DISAS_IAQ_N_STALE:
4271     case DISAS_IAQ_N_STALE_EXIT:
4272         copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
4273         copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
4274         nullify_save(ctx);
4275         /* FALLTHRU */
4276     case DISAS_IAQ_N_UPDATED:
4277         if (ctx->base.singlestep_enabled) {
4278             gen_excp_1(EXCP_DEBUG);
4279         } else if (is_jmp != DISAS_IAQ_N_STALE_EXIT) {
4280             tcg_gen_lookup_and_goto_ptr();
4281         }
4282         /* FALLTHRU */
4283     case DISAS_EXIT:
4284         tcg_gen_exit_tb(NULL, 0);
4285         break;
4286     default:
4287         g_assert_not_reached();
4288     }
4289 }
4290 
4291 static void hppa_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
4292 {
4293     target_ulong pc = dcbase->pc_first;
4294 
4295 #ifdef CONFIG_USER_ONLY
4296     switch (pc) {
4297     case 0x00:
4298         qemu_log("IN:\n0x00000000:  (null)\n");
4299         return;
4300     case 0xb0:
4301         qemu_log("IN:\n0x000000b0:  light-weight-syscall\n");
4302         return;
4303     case 0xe0:
4304         qemu_log("IN:\n0x000000e0:  set-thread-pointer-syscall\n");
4305         return;
4306     case 0x100:
4307         qemu_log("IN:\n0x00000100:  syscall\n");
4308         return;
4309     }
4310 #endif
4311 
4312     qemu_log("IN: %s\n", lookup_symbol(pc));
4313     log_target_disas(cs, pc, dcbase->tb->size);
4314 }
4315 
4316 static const TranslatorOps hppa_tr_ops = {
4317     .init_disas_context = hppa_tr_init_disas_context,
4318     .tb_start           = hppa_tr_tb_start,
4319     .insn_start         = hppa_tr_insn_start,
4320     .translate_insn     = hppa_tr_translate_insn,
4321     .tb_stop            = hppa_tr_tb_stop,
4322     .disas_log          = hppa_tr_disas_log,
4323 };
4324 
4325 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
4326 {
4327     DisasContext ctx;
4328     translator_loop(&hppa_tr_ops, &ctx.base, cs, tb, max_insns);
4329 }
4330 
4331 void restore_state_to_opc(CPUHPPAState *env, TranslationBlock *tb,
4332                           target_ulong *data)
4333 {
4334     env->iaoq_f = data[0];
4335     if (data[1] != (target_ureg)-1) {
4336         env->iaoq_b = data[1];
4337     }
4338     /* Since we were executing the instruction at IAOQ_F, and took some
4339        sort of action that provoked the cpu_restore_state, we can infer
4340        that the instruction was not nullified.  */
4341     env->psw_n = 0;
4342 }
4343