xref: /openbmc/qemu/target/hppa/translate.c (revision b4b9a0e3)
1 /*
2  * HPPA emulation cpu translation for qemu.
3  *
4  * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "exec/log.h"
31 
32 /* Since we have a distinction between register size and address size,
33    we need to redefine all of these.  */
34 
35 #undef TCGv
36 #undef tcg_temp_new
37 #undef tcg_global_mem_new
38 #undef tcg_temp_local_new
39 #undef tcg_temp_free
40 
41 #if TARGET_LONG_BITS == 64
42 #define TCGv_tl              TCGv_i64
43 #define tcg_temp_new_tl      tcg_temp_new_i64
44 #define tcg_temp_free_tl     tcg_temp_free_i64
45 #if TARGET_REGISTER_BITS == 64
46 #define tcg_gen_extu_reg_tl  tcg_gen_mov_i64
47 #else
48 #define tcg_gen_extu_reg_tl  tcg_gen_extu_i32_i64
49 #endif
50 #else
51 #define TCGv_tl              TCGv_i32
52 #define tcg_temp_new_tl      tcg_temp_new_i32
53 #define tcg_temp_free_tl     tcg_temp_free_i32
54 #define tcg_gen_extu_reg_tl  tcg_gen_mov_i32
55 #endif
56 
57 #if TARGET_REGISTER_BITS == 64
58 #define TCGv_reg             TCGv_i64
59 
60 #define tcg_temp_new         tcg_temp_new_i64
61 #define tcg_global_mem_new   tcg_global_mem_new_i64
62 #define tcg_temp_local_new   tcg_temp_local_new_i64
63 #define tcg_temp_free        tcg_temp_free_i64
64 
65 #define tcg_gen_movi_reg     tcg_gen_movi_i64
66 #define tcg_gen_mov_reg      tcg_gen_mov_i64
67 #define tcg_gen_ld8u_reg     tcg_gen_ld8u_i64
68 #define tcg_gen_ld8s_reg     tcg_gen_ld8s_i64
69 #define tcg_gen_ld16u_reg    tcg_gen_ld16u_i64
70 #define tcg_gen_ld16s_reg    tcg_gen_ld16s_i64
71 #define tcg_gen_ld32u_reg    tcg_gen_ld32u_i64
72 #define tcg_gen_ld32s_reg    tcg_gen_ld32s_i64
73 #define tcg_gen_ld_reg       tcg_gen_ld_i64
74 #define tcg_gen_st8_reg      tcg_gen_st8_i64
75 #define tcg_gen_st16_reg     tcg_gen_st16_i64
76 #define tcg_gen_st32_reg     tcg_gen_st32_i64
77 #define tcg_gen_st_reg       tcg_gen_st_i64
78 #define tcg_gen_add_reg      tcg_gen_add_i64
79 #define tcg_gen_addi_reg     tcg_gen_addi_i64
80 #define tcg_gen_sub_reg      tcg_gen_sub_i64
81 #define tcg_gen_neg_reg      tcg_gen_neg_i64
82 #define tcg_gen_subfi_reg    tcg_gen_subfi_i64
83 #define tcg_gen_subi_reg     tcg_gen_subi_i64
84 #define tcg_gen_and_reg      tcg_gen_and_i64
85 #define tcg_gen_andi_reg     tcg_gen_andi_i64
86 #define tcg_gen_or_reg       tcg_gen_or_i64
87 #define tcg_gen_ori_reg      tcg_gen_ori_i64
88 #define tcg_gen_xor_reg      tcg_gen_xor_i64
89 #define tcg_gen_xori_reg     tcg_gen_xori_i64
90 #define tcg_gen_not_reg      tcg_gen_not_i64
91 #define tcg_gen_shl_reg      tcg_gen_shl_i64
92 #define tcg_gen_shli_reg     tcg_gen_shli_i64
93 #define tcg_gen_shr_reg      tcg_gen_shr_i64
94 #define tcg_gen_shri_reg     tcg_gen_shri_i64
95 #define tcg_gen_sar_reg      tcg_gen_sar_i64
96 #define tcg_gen_sari_reg     tcg_gen_sari_i64
97 #define tcg_gen_brcond_reg   tcg_gen_brcond_i64
98 #define tcg_gen_brcondi_reg  tcg_gen_brcondi_i64
99 #define tcg_gen_setcond_reg  tcg_gen_setcond_i64
100 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i64
101 #define tcg_gen_mul_reg      tcg_gen_mul_i64
102 #define tcg_gen_muli_reg     tcg_gen_muli_i64
103 #define tcg_gen_div_reg      tcg_gen_div_i64
104 #define tcg_gen_rem_reg      tcg_gen_rem_i64
105 #define tcg_gen_divu_reg     tcg_gen_divu_i64
106 #define tcg_gen_remu_reg     tcg_gen_remu_i64
107 #define tcg_gen_discard_reg  tcg_gen_discard_i64
108 #define tcg_gen_trunc_reg_i32 tcg_gen_extrl_i64_i32
109 #define tcg_gen_trunc_i64_reg tcg_gen_mov_i64
110 #define tcg_gen_extu_i32_reg tcg_gen_extu_i32_i64
111 #define tcg_gen_ext_i32_reg  tcg_gen_ext_i32_i64
112 #define tcg_gen_extu_reg_i64 tcg_gen_mov_i64
113 #define tcg_gen_ext_reg_i64  tcg_gen_mov_i64
114 #define tcg_gen_ext8u_reg    tcg_gen_ext8u_i64
115 #define tcg_gen_ext8s_reg    tcg_gen_ext8s_i64
116 #define tcg_gen_ext16u_reg   tcg_gen_ext16u_i64
117 #define tcg_gen_ext16s_reg   tcg_gen_ext16s_i64
118 #define tcg_gen_ext32u_reg   tcg_gen_ext32u_i64
119 #define tcg_gen_ext32s_reg   tcg_gen_ext32s_i64
120 #define tcg_gen_bswap16_reg  tcg_gen_bswap16_i64
121 #define tcg_gen_bswap32_reg  tcg_gen_bswap32_i64
122 #define tcg_gen_bswap64_reg  tcg_gen_bswap64_i64
123 #define tcg_gen_concat_reg_i64 tcg_gen_concat32_i64
124 #define tcg_gen_andc_reg     tcg_gen_andc_i64
125 #define tcg_gen_eqv_reg      tcg_gen_eqv_i64
126 #define tcg_gen_nand_reg     tcg_gen_nand_i64
127 #define tcg_gen_nor_reg      tcg_gen_nor_i64
128 #define tcg_gen_orc_reg      tcg_gen_orc_i64
129 #define tcg_gen_clz_reg      tcg_gen_clz_i64
130 #define tcg_gen_ctz_reg      tcg_gen_ctz_i64
131 #define tcg_gen_clzi_reg     tcg_gen_clzi_i64
132 #define tcg_gen_ctzi_reg     tcg_gen_ctzi_i64
133 #define tcg_gen_clrsb_reg    tcg_gen_clrsb_i64
134 #define tcg_gen_ctpop_reg    tcg_gen_ctpop_i64
135 #define tcg_gen_rotl_reg     tcg_gen_rotl_i64
136 #define tcg_gen_rotli_reg    tcg_gen_rotli_i64
137 #define tcg_gen_rotr_reg     tcg_gen_rotr_i64
138 #define tcg_gen_rotri_reg    tcg_gen_rotri_i64
139 #define tcg_gen_deposit_reg  tcg_gen_deposit_i64
140 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i64
141 #define tcg_gen_extract_reg  tcg_gen_extract_i64
142 #define tcg_gen_sextract_reg tcg_gen_sextract_i64
143 #define tcg_const_reg        tcg_const_i64
144 #define tcg_const_local_reg  tcg_const_local_i64
145 #define tcg_constant_reg     tcg_constant_i64
146 #define tcg_gen_movcond_reg  tcg_gen_movcond_i64
147 #define tcg_gen_add2_reg     tcg_gen_add2_i64
148 #define tcg_gen_sub2_reg     tcg_gen_sub2_i64
149 #define tcg_gen_qemu_ld_reg  tcg_gen_qemu_ld_i64
150 #define tcg_gen_qemu_st_reg  tcg_gen_qemu_st_i64
151 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i64
152 #define tcg_gen_trunc_reg_ptr   tcg_gen_trunc_i64_ptr
153 #else
154 #define TCGv_reg             TCGv_i32
155 #define tcg_temp_new         tcg_temp_new_i32
156 #define tcg_global_mem_new   tcg_global_mem_new_i32
157 #define tcg_temp_local_new   tcg_temp_local_new_i32
158 #define tcg_temp_free        tcg_temp_free_i32
159 
160 #define tcg_gen_movi_reg     tcg_gen_movi_i32
161 #define tcg_gen_mov_reg      tcg_gen_mov_i32
162 #define tcg_gen_ld8u_reg     tcg_gen_ld8u_i32
163 #define tcg_gen_ld8s_reg     tcg_gen_ld8s_i32
164 #define tcg_gen_ld16u_reg    tcg_gen_ld16u_i32
165 #define tcg_gen_ld16s_reg    tcg_gen_ld16s_i32
166 #define tcg_gen_ld32u_reg    tcg_gen_ld_i32
167 #define tcg_gen_ld32s_reg    tcg_gen_ld_i32
168 #define tcg_gen_ld_reg       tcg_gen_ld_i32
169 #define tcg_gen_st8_reg      tcg_gen_st8_i32
170 #define tcg_gen_st16_reg     tcg_gen_st16_i32
171 #define tcg_gen_st32_reg     tcg_gen_st32_i32
172 #define tcg_gen_st_reg       tcg_gen_st_i32
173 #define tcg_gen_add_reg      tcg_gen_add_i32
174 #define tcg_gen_addi_reg     tcg_gen_addi_i32
175 #define tcg_gen_sub_reg      tcg_gen_sub_i32
176 #define tcg_gen_neg_reg      tcg_gen_neg_i32
177 #define tcg_gen_subfi_reg    tcg_gen_subfi_i32
178 #define tcg_gen_subi_reg     tcg_gen_subi_i32
179 #define tcg_gen_and_reg      tcg_gen_and_i32
180 #define tcg_gen_andi_reg     tcg_gen_andi_i32
181 #define tcg_gen_or_reg       tcg_gen_or_i32
182 #define tcg_gen_ori_reg      tcg_gen_ori_i32
183 #define tcg_gen_xor_reg      tcg_gen_xor_i32
184 #define tcg_gen_xori_reg     tcg_gen_xori_i32
185 #define tcg_gen_not_reg      tcg_gen_not_i32
186 #define tcg_gen_shl_reg      tcg_gen_shl_i32
187 #define tcg_gen_shli_reg     tcg_gen_shli_i32
188 #define tcg_gen_shr_reg      tcg_gen_shr_i32
189 #define tcg_gen_shri_reg     tcg_gen_shri_i32
190 #define tcg_gen_sar_reg      tcg_gen_sar_i32
191 #define tcg_gen_sari_reg     tcg_gen_sari_i32
192 #define tcg_gen_brcond_reg   tcg_gen_brcond_i32
193 #define tcg_gen_brcondi_reg  tcg_gen_brcondi_i32
194 #define tcg_gen_setcond_reg  tcg_gen_setcond_i32
195 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i32
196 #define tcg_gen_mul_reg      tcg_gen_mul_i32
197 #define tcg_gen_muli_reg     tcg_gen_muli_i32
198 #define tcg_gen_div_reg      tcg_gen_div_i32
199 #define tcg_gen_rem_reg      tcg_gen_rem_i32
200 #define tcg_gen_divu_reg     tcg_gen_divu_i32
201 #define tcg_gen_remu_reg     tcg_gen_remu_i32
202 #define tcg_gen_discard_reg  tcg_gen_discard_i32
203 #define tcg_gen_trunc_reg_i32 tcg_gen_mov_i32
204 #define tcg_gen_trunc_i64_reg tcg_gen_extrl_i64_i32
205 #define tcg_gen_extu_i32_reg tcg_gen_mov_i32
206 #define tcg_gen_ext_i32_reg  tcg_gen_mov_i32
207 #define tcg_gen_extu_reg_i64 tcg_gen_extu_i32_i64
208 #define tcg_gen_ext_reg_i64  tcg_gen_ext_i32_i64
209 #define tcg_gen_ext8u_reg    tcg_gen_ext8u_i32
210 #define tcg_gen_ext8s_reg    tcg_gen_ext8s_i32
211 #define tcg_gen_ext16u_reg   tcg_gen_ext16u_i32
212 #define tcg_gen_ext16s_reg   tcg_gen_ext16s_i32
213 #define tcg_gen_ext32u_reg   tcg_gen_mov_i32
214 #define tcg_gen_ext32s_reg   tcg_gen_mov_i32
215 #define tcg_gen_bswap16_reg  tcg_gen_bswap16_i32
216 #define tcg_gen_bswap32_reg  tcg_gen_bswap32_i32
217 #define tcg_gen_concat_reg_i64 tcg_gen_concat_i32_i64
218 #define tcg_gen_andc_reg     tcg_gen_andc_i32
219 #define tcg_gen_eqv_reg      tcg_gen_eqv_i32
220 #define tcg_gen_nand_reg     tcg_gen_nand_i32
221 #define tcg_gen_nor_reg      tcg_gen_nor_i32
222 #define tcg_gen_orc_reg      tcg_gen_orc_i32
223 #define tcg_gen_clz_reg      tcg_gen_clz_i32
224 #define tcg_gen_ctz_reg      tcg_gen_ctz_i32
225 #define tcg_gen_clzi_reg     tcg_gen_clzi_i32
226 #define tcg_gen_ctzi_reg     tcg_gen_ctzi_i32
227 #define tcg_gen_clrsb_reg    tcg_gen_clrsb_i32
228 #define tcg_gen_ctpop_reg    tcg_gen_ctpop_i32
229 #define tcg_gen_rotl_reg     tcg_gen_rotl_i32
230 #define tcg_gen_rotli_reg    tcg_gen_rotli_i32
231 #define tcg_gen_rotr_reg     tcg_gen_rotr_i32
232 #define tcg_gen_rotri_reg    tcg_gen_rotri_i32
233 #define tcg_gen_deposit_reg  tcg_gen_deposit_i32
234 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i32
235 #define tcg_gen_extract_reg  tcg_gen_extract_i32
236 #define tcg_gen_sextract_reg tcg_gen_sextract_i32
237 #define tcg_const_reg        tcg_const_i32
238 #define tcg_const_local_reg  tcg_const_local_i32
239 #define tcg_constant_reg     tcg_constant_i32
240 #define tcg_gen_movcond_reg  tcg_gen_movcond_i32
241 #define tcg_gen_add2_reg     tcg_gen_add2_i32
242 #define tcg_gen_sub2_reg     tcg_gen_sub2_i32
243 #define tcg_gen_qemu_ld_reg  tcg_gen_qemu_ld_i32
244 #define tcg_gen_qemu_st_reg  tcg_gen_qemu_st_i32
245 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i32
246 #define tcg_gen_trunc_reg_ptr   tcg_gen_ext_i32_ptr
247 #endif /* TARGET_REGISTER_BITS */
248 
249 typedef struct DisasCond {
250     TCGCond c;
251     TCGv_reg a0, a1;
252 } DisasCond;
253 
254 typedef struct DisasContext {
255     DisasContextBase base;
256     CPUState *cs;
257 
258     target_ureg iaoq_f;
259     target_ureg iaoq_b;
260     target_ureg iaoq_n;
261     TCGv_reg iaoq_n_var;
262 
263     int ntempr, ntempl;
264     TCGv_reg tempr[8];
265     TCGv_tl  templ[4];
266 
267     DisasCond null_cond;
268     TCGLabel *null_lab;
269 
270     uint32_t insn;
271     uint32_t tb_flags;
272     int mmu_idx;
273     int privilege;
274     bool psw_n_nonzero;
275 } DisasContext;
276 
277 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently.  */
278 static int expand_sm_imm(DisasContext *ctx, int val)
279 {
280     if (val & PSW_SM_E) {
281         val = (val & ~PSW_SM_E) | PSW_E;
282     }
283     if (val & PSW_SM_W) {
284         val = (val & ~PSW_SM_W) | PSW_W;
285     }
286     return val;
287 }
288 
289 /* Inverted space register indicates 0 means sr0 not inferred from base.  */
290 static int expand_sr3x(DisasContext *ctx, int val)
291 {
292     return ~val;
293 }
294 
295 /* Convert the M:A bits within a memory insn to the tri-state value
296    we use for the final M.  */
297 static int ma_to_m(DisasContext *ctx, int val)
298 {
299     return val & 2 ? (val & 1 ? -1 : 1) : 0;
300 }
301 
302 /* Convert the sign of the displacement to a pre or post-modify.  */
303 static int pos_to_m(DisasContext *ctx, int val)
304 {
305     return val ? 1 : -1;
306 }
307 
308 static int neg_to_m(DisasContext *ctx, int val)
309 {
310     return val ? -1 : 1;
311 }
312 
313 /* Used for branch targets and fp memory ops.  */
314 static int expand_shl2(DisasContext *ctx, int val)
315 {
316     return val << 2;
317 }
318 
319 /* Used for fp memory ops.  */
320 static int expand_shl3(DisasContext *ctx, int val)
321 {
322     return val << 3;
323 }
324 
325 /* Used for assemble_21.  */
326 static int expand_shl11(DisasContext *ctx, int val)
327 {
328     return val << 11;
329 }
330 
331 
332 /* Include the auto-generated decoder.  */
333 #include "decode-insns.c.inc"
334 
335 /* We are not using a goto_tb (for whatever reason), but have updated
336    the iaq (for whatever reason), so don't do it again on exit.  */
337 #define DISAS_IAQ_N_UPDATED  DISAS_TARGET_0
338 
339 /* We are exiting the TB, but have neither emitted a goto_tb, nor
340    updated the iaq for the next instruction to be executed.  */
341 #define DISAS_IAQ_N_STALE    DISAS_TARGET_1
342 
343 /* Similarly, but we want to return to the main loop immediately
344    to recognize unmasked interrupts.  */
345 #define DISAS_IAQ_N_STALE_EXIT      DISAS_TARGET_2
346 #define DISAS_EXIT                  DISAS_TARGET_3
347 
348 /* global register indexes */
349 static TCGv_reg cpu_gr[32];
350 static TCGv_i64 cpu_sr[4];
351 static TCGv_i64 cpu_srH;
352 static TCGv_reg cpu_iaoq_f;
353 static TCGv_reg cpu_iaoq_b;
354 static TCGv_i64 cpu_iasq_f;
355 static TCGv_i64 cpu_iasq_b;
356 static TCGv_reg cpu_sar;
357 static TCGv_reg cpu_psw_n;
358 static TCGv_reg cpu_psw_v;
359 static TCGv_reg cpu_psw_cb;
360 static TCGv_reg cpu_psw_cb_msb;
361 
362 #include "exec/gen-icount.h"
363 
364 void hppa_translate_init(void)
365 {
366 #define DEF_VAR(V)  { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
367 
368     typedef struct { TCGv_reg *var; const char *name; int ofs; } GlobalVar;
369     static const GlobalVar vars[] = {
370         { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
371         DEF_VAR(psw_n),
372         DEF_VAR(psw_v),
373         DEF_VAR(psw_cb),
374         DEF_VAR(psw_cb_msb),
375         DEF_VAR(iaoq_f),
376         DEF_VAR(iaoq_b),
377     };
378 
379 #undef DEF_VAR
380 
381     /* Use the symbolic register names that match the disassembler.  */
382     static const char gr_names[32][4] = {
383         "r0",  "r1",  "r2",  "r3",  "r4",  "r5",  "r6",  "r7",
384         "r8",  "r9",  "r10", "r11", "r12", "r13", "r14", "r15",
385         "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
386         "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
387     };
388     /* SR[4-7] are not global registers so that we can index them.  */
389     static const char sr_names[5][4] = {
390         "sr0", "sr1", "sr2", "sr3", "srH"
391     };
392 
393     int i;
394 
395     cpu_gr[0] = NULL;
396     for (i = 1; i < 32; i++) {
397         cpu_gr[i] = tcg_global_mem_new(cpu_env,
398                                        offsetof(CPUHPPAState, gr[i]),
399                                        gr_names[i]);
400     }
401     for (i = 0; i < 4; i++) {
402         cpu_sr[i] = tcg_global_mem_new_i64(cpu_env,
403                                            offsetof(CPUHPPAState, sr[i]),
404                                            sr_names[i]);
405     }
406     cpu_srH = tcg_global_mem_new_i64(cpu_env,
407                                      offsetof(CPUHPPAState, sr[4]),
408                                      sr_names[4]);
409 
410     for (i = 0; i < ARRAY_SIZE(vars); ++i) {
411         const GlobalVar *v = &vars[i];
412         *v->var = tcg_global_mem_new(cpu_env, v->ofs, v->name);
413     }
414 
415     cpu_iasq_f = tcg_global_mem_new_i64(cpu_env,
416                                         offsetof(CPUHPPAState, iasq_f),
417                                         "iasq_f");
418     cpu_iasq_b = tcg_global_mem_new_i64(cpu_env,
419                                         offsetof(CPUHPPAState, iasq_b),
420                                         "iasq_b");
421 }
422 
423 static DisasCond cond_make_f(void)
424 {
425     return (DisasCond){
426         .c = TCG_COND_NEVER,
427         .a0 = NULL,
428         .a1 = NULL,
429     };
430 }
431 
432 static DisasCond cond_make_t(void)
433 {
434     return (DisasCond){
435         .c = TCG_COND_ALWAYS,
436         .a0 = NULL,
437         .a1 = NULL,
438     };
439 }
440 
441 static DisasCond cond_make_n(void)
442 {
443     return (DisasCond){
444         .c = TCG_COND_NE,
445         .a0 = cpu_psw_n,
446         .a1 = tcg_constant_reg(0)
447     };
448 }
449 
450 static DisasCond cond_make_0_tmp(TCGCond c, TCGv_reg a0)
451 {
452     assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
453     return (DisasCond){
454         .c = c, .a0 = a0, .a1 = tcg_constant_reg(0)
455     };
456 }
457 
458 static DisasCond cond_make_0(TCGCond c, TCGv_reg a0)
459 {
460     TCGv_reg tmp = tcg_temp_new();
461     tcg_gen_mov_reg(tmp, a0);
462     return cond_make_0_tmp(c, tmp);
463 }
464 
465 static DisasCond cond_make(TCGCond c, TCGv_reg a0, TCGv_reg a1)
466 {
467     DisasCond r = { .c = c };
468 
469     assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
470     r.a0 = tcg_temp_new();
471     tcg_gen_mov_reg(r.a0, a0);
472     r.a1 = tcg_temp_new();
473     tcg_gen_mov_reg(r.a1, a1);
474 
475     return r;
476 }
477 
478 static void cond_free(DisasCond *cond)
479 {
480     switch (cond->c) {
481     default:
482         if (cond->a0 != cpu_psw_n) {
483             tcg_temp_free(cond->a0);
484         }
485         tcg_temp_free(cond->a1);
486         cond->a0 = NULL;
487         cond->a1 = NULL;
488         /* fallthru */
489     case TCG_COND_ALWAYS:
490         cond->c = TCG_COND_NEVER;
491         break;
492     case TCG_COND_NEVER:
493         break;
494     }
495 }
496 
497 static TCGv_reg get_temp(DisasContext *ctx)
498 {
499     unsigned i = ctx->ntempr++;
500     g_assert(i < ARRAY_SIZE(ctx->tempr));
501     return ctx->tempr[i] = tcg_temp_new();
502 }
503 
504 #ifndef CONFIG_USER_ONLY
505 static TCGv_tl get_temp_tl(DisasContext *ctx)
506 {
507     unsigned i = ctx->ntempl++;
508     g_assert(i < ARRAY_SIZE(ctx->templ));
509     return ctx->templ[i] = tcg_temp_new_tl();
510 }
511 #endif
512 
513 static TCGv_reg load_const(DisasContext *ctx, target_sreg v)
514 {
515     TCGv_reg t = get_temp(ctx);
516     tcg_gen_movi_reg(t, v);
517     return t;
518 }
519 
520 static TCGv_reg load_gpr(DisasContext *ctx, unsigned reg)
521 {
522     if (reg == 0) {
523         TCGv_reg t = get_temp(ctx);
524         tcg_gen_movi_reg(t, 0);
525         return t;
526     } else {
527         return cpu_gr[reg];
528     }
529 }
530 
531 static TCGv_reg dest_gpr(DisasContext *ctx, unsigned reg)
532 {
533     if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
534         return get_temp(ctx);
535     } else {
536         return cpu_gr[reg];
537     }
538 }
539 
540 static void save_or_nullify(DisasContext *ctx, TCGv_reg dest, TCGv_reg t)
541 {
542     if (ctx->null_cond.c != TCG_COND_NEVER) {
543         tcg_gen_movcond_reg(ctx->null_cond.c, dest, ctx->null_cond.a0,
544                             ctx->null_cond.a1, dest, t);
545     } else {
546         tcg_gen_mov_reg(dest, t);
547     }
548 }
549 
550 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_reg t)
551 {
552     if (reg != 0) {
553         save_or_nullify(ctx, cpu_gr[reg], t);
554     }
555 }
556 
557 #ifdef HOST_WORDS_BIGENDIAN
558 # define HI_OFS  0
559 # define LO_OFS  4
560 #else
561 # define HI_OFS  4
562 # define LO_OFS  0
563 #endif
564 
565 static TCGv_i32 load_frw_i32(unsigned rt)
566 {
567     TCGv_i32 ret = tcg_temp_new_i32();
568     tcg_gen_ld_i32(ret, cpu_env,
569                    offsetof(CPUHPPAState, fr[rt & 31])
570                    + (rt & 32 ? LO_OFS : HI_OFS));
571     return ret;
572 }
573 
574 static TCGv_i32 load_frw0_i32(unsigned rt)
575 {
576     if (rt == 0) {
577         return tcg_const_i32(0);
578     } else {
579         return load_frw_i32(rt);
580     }
581 }
582 
583 static TCGv_i64 load_frw0_i64(unsigned rt)
584 {
585     if (rt == 0) {
586         return tcg_const_i64(0);
587     } else {
588         TCGv_i64 ret = tcg_temp_new_i64();
589         tcg_gen_ld32u_i64(ret, cpu_env,
590                           offsetof(CPUHPPAState, fr[rt & 31])
591                           + (rt & 32 ? LO_OFS : HI_OFS));
592         return ret;
593     }
594 }
595 
596 static void save_frw_i32(unsigned rt, TCGv_i32 val)
597 {
598     tcg_gen_st_i32(val, cpu_env,
599                    offsetof(CPUHPPAState, fr[rt & 31])
600                    + (rt & 32 ? LO_OFS : HI_OFS));
601 }
602 
603 #undef HI_OFS
604 #undef LO_OFS
605 
606 static TCGv_i64 load_frd(unsigned rt)
607 {
608     TCGv_i64 ret = tcg_temp_new_i64();
609     tcg_gen_ld_i64(ret, cpu_env, offsetof(CPUHPPAState, fr[rt]));
610     return ret;
611 }
612 
613 static TCGv_i64 load_frd0(unsigned rt)
614 {
615     if (rt == 0) {
616         return tcg_const_i64(0);
617     } else {
618         return load_frd(rt);
619     }
620 }
621 
622 static void save_frd(unsigned rt, TCGv_i64 val)
623 {
624     tcg_gen_st_i64(val, cpu_env, offsetof(CPUHPPAState, fr[rt]));
625 }
626 
627 static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
628 {
629 #ifdef CONFIG_USER_ONLY
630     tcg_gen_movi_i64(dest, 0);
631 #else
632     if (reg < 4) {
633         tcg_gen_mov_i64(dest, cpu_sr[reg]);
634     } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
635         tcg_gen_mov_i64(dest, cpu_srH);
636     } else {
637         tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUHPPAState, sr[reg]));
638     }
639 #endif
640 }
641 
642 /* Skip over the implementation of an insn that has been nullified.
643    Use this when the insn is too complex for a conditional move.  */
644 static void nullify_over(DisasContext *ctx)
645 {
646     if (ctx->null_cond.c != TCG_COND_NEVER) {
647         /* The always condition should have been handled in the main loop.  */
648         assert(ctx->null_cond.c != TCG_COND_ALWAYS);
649 
650         ctx->null_lab = gen_new_label();
651 
652         /* If we're using PSW[N], copy it to a temp because... */
653         if (ctx->null_cond.a0 == cpu_psw_n) {
654             ctx->null_cond.a0 = tcg_temp_new();
655             tcg_gen_mov_reg(ctx->null_cond.a0, cpu_psw_n);
656         }
657         /* ... we clear it before branching over the implementation,
658            so that (1) it's clear after nullifying this insn and
659            (2) if this insn nullifies the next, PSW[N] is valid.  */
660         if (ctx->psw_n_nonzero) {
661             ctx->psw_n_nonzero = false;
662             tcg_gen_movi_reg(cpu_psw_n, 0);
663         }
664 
665         tcg_gen_brcond_reg(ctx->null_cond.c, ctx->null_cond.a0,
666                            ctx->null_cond.a1, ctx->null_lab);
667         cond_free(&ctx->null_cond);
668     }
669 }
670 
671 /* Save the current nullification state to PSW[N].  */
672 static void nullify_save(DisasContext *ctx)
673 {
674     if (ctx->null_cond.c == TCG_COND_NEVER) {
675         if (ctx->psw_n_nonzero) {
676             tcg_gen_movi_reg(cpu_psw_n, 0);
677         }
678         return;
679     }
680     if (ctx->null_cond.a0 != cpu_psw_n) {
681         tcg_gen_setcond_reg(ctx->null_cond.c, cpu_psw_n,
682                             ctx->null_cond.a0, ctx->null_cond.a1);
683         ctx->psw_n_nonzero = true;
684     }
685     cond_free(&ctx->null_cond);
686 }
687 
688 /* Set a PSW[N] to X.  The intention is that this is used immediately
689    before a goto_tb/exit_tb, so that there is no fallthru path to other
690    code within the TB.  Therefore we do not update psw_n_nonzero.  */
691 static void nullify_set(DisasContext *ctx, bool x)
692 {
693     if (ctx->psw_n_nonzero || x) {
694         tcg_gen_movi_reg(cpu_psw_n, x);
695     }
696 }
697 
698 /* Mark the end of an instruction that may have been nullified.
699    This is the pair to nullify_over.  Always returns true so that
700    it may be tail-called from a translate function.  */
701 static bool nullify_end(DisasContext *ctx)
702 {
703     TCGLabel *null_lab = ctx->null_lab;
704     DisasJumpType status = ctx->base.is_jmp;
705 
706     /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
707        For UPDATED, we cannot update on the nullified path.  */
708     assert(status != DISAS_IAQ_N_UPDATED);
709 
710     if (likely(null_lab == NULL)) {
711         /* The current insn wasn't conditional or handled the condition
712            applied to it without a branch, so the (new) setting of
713            NULL_COND can be applied directly to the next insn.  */
714         return true;
715     }
716     ctx->null_lab = NULL;
717 
718     if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
719         /* The next instruction will be unconditional,
720            and NULL_COND already reflects that.  */
721         gen_set_label(null_lab);
722     } else {
723         /* The insn that we just executed is itself nullifying the next
724            instruction.  Store the condition in the PSW[N] global.
725            We asserted PSW[N] = 0 in nullify_over, so that after the
726            label we have the proper value in place.  */
727         nullify_save(ctx);
728         gen_set_label(null_lab);
729         ctx->null_cond = cond_make_n();
730     }
731     if (status == DISAS_NORETURN) {
732         ctx->base.is_jmp = DISAS_NEXT;
733     }
734     return true;
735 }
736 
737 static void copy_iaoq_entry(TCGv_reg dest, target_ureg ival, TCGv_reg vval)
738 {
739     if (unlikely(ival == -1)) {
740         tcg_gen_mov_reg(dest, vval);
741     } else {
742         tcg_gen_movi_reg(dest, ival);
743     }
744 }
745 
746 static inline target_ureg iaoq_dest(DisasContext *ctx, target_sreg disp)
747 {
748     return ctx->iaoq_f + disp + 8;
749 }
750 
751 static void gen_excp_1(int exception)
752 {
753     gen_helper_excp(cpu_env, tcg_constant_i32(exception));
754 }
755 
756 static void gen_excp(DisasContext *ctx, int exception)
757 {
758     copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
759     copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
760     nullify_save(ctx);
761     gen_excp_1(exception);
762     ctx->base.is_jmp = DISAS_NORETURN;
763 }
764 
765 static bool gen_excp_iir(DisasContext *ctx, int exc)
766 {
767     nullify_over(ctx);
768     tcg_gen_st_reg(tcg_constant_reg(ctx->insn),
769                    cpu_env, offsetof(CPUHPPAState, cr[CR_IIR]));
770     gen_excp(ctx, exc);
771     return nullify_end(ctx);
772 }
773 
774 static bool gen_illegal(DisasContext *ctx)
775 {
776     return gen_excp_iir(ctx, EXCP_ILL);
777 }
778 
779 #ifdef CONFIG_USER_ONLY
780 #define CHECK_MOST_PRIVILEGED(EXCP) \
781     return gen_excp_iir(ctx, EXCP)
782 #else
783 #define CHECK_MOST_PRIVILEGED(EXCP) \
784     do {                                     \
785         if (ctx->privilege != 0) {           \
786             return gen_excp_iir(ctx, EXCP);  \
787         }                                    \
788     } while (0)
789 #endif
790 
791 static bool use_goto_tb(DisasContext *ctx, target_ureg dest)
792 {
793     return translator_use_goto_tb(&ctx->base, dest);
794 }
795 
796 /* If the next insn is to be nullified, and it's on the same page,
797    and we're not attempting to set a breakpoint on it, then we can
798    totally skip the nullified insn.  This avoids creating and
799    executing a TB that merely branches to the next TB.  */
800 static bool use_nullify_skip(DisasContext *ctx)
801 {
802     return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
803             && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
804 }
805 
806 static void gen_goto_tb(DisasContext *ctx, int which,
807                         target_ureg f, target_ureg b)
808 {
809     if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
810         tcg_gen_goto_tb(which);
811         tcg_gen_movi_reg(cpu_iaoq_f, f);
812         tcg_gen_movi_reg(cpu_iaoq_b, b);
813         tcg_gen_exit_tb(ctx->base.tb, which);
814     } else {
815         copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b);
816         copy_iaoq_entry(cpu_iaoq_b, b, ctx->iaoq_n_var);
817         tcg_gen_lookup_and_goto_ptr();
818     }
819 }
820 
821 static bool cond_need_sv(int c)
822 {
823     return c == 2 || c == 3 || c == 6;
824 }
825 
826 static bool cond_need_cb(int c)
827 {
828     return c == 4 || c == 5;
829 }
830 
831 /*
832  * Compute conditional for arithmetic.  See Page 5-3, Table 5-1, of
833  * the Parisc 1.1 Architecture Reference Manual for details.
834  */
835 
836 static DisasCond do_cond(unsigned cf, TCGv_reg res,
837                          TCGv_reg cb_msb, TCGv_reg sv)
838 {
839     DisasCond cond;
840     TCGv_reg tmp;
841 
842     switch (cf >> 1) {
843     case 0: /* Never / TR    (0 / 1) */
844         cond = cond_make_f();
845         break;
846     case 1: /* = / <>        (Z / !Z) */
847         cond = cond_make_0(TCG_COND_EQ, res);
848         break;
849     case 2: /* < / >=        (N ^ V / !(N ^ V) */
850         tmp = tcg_temp_new();
851         tcg_gen_xor_reg(tmp, res, sv);
852         cond = cond_make_0_tmp(TCG_COND_LT, tmp);
853         break;
854     case 3: /* <= / >        (N ^ V) | Z / !((N ^ V) | Z) */
855         /*
856          * Simplify:
857          *   (N ^ V) | Z
858          *   ((res < 0) ^ (sv < 0)) | !res
859          *   ((res ^ sv) < 0) | !res
860          *   (~(res ^ sv) >= 0) | !res
861          *   !(~(res ^ sv) >> 31) | !res
862          *   !(~(res ^ sv) >> 31 & res)
863          */
864         tmp = tcg_temp_new();
865         tcg_gen_eqv_reg(tmp, res, sv);
866         tcg_gen_sari_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
867         tcg_gen_and_reg(tmp, tmp, res);
868         cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
869         break;
870     case 4: /* NUV / UV      (!C / C) */
871         cond = cond_make_0(TCG_COND_EQ, cb_msb);
872         break;
873     case 5: /* ZNV / VNZ     (!C | Z / C & !Z) */
874         tmp = tcg_temp_new();
875         tcg_gen_neg_reg(tmp, cb_msb);
876         tcg_gen_and_reg(tmp, tmp, res);
877         cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
878         break;
879     case 6: /* SV / NSV      (V / !V) */
880         cond = cond_make_0(TCG_COND_LT, sv);
881         break;
882     case 7: /* OD / EV */
883         tmp = tcg_temp_new();
884         tcg_gen_andi_reg(tmp, res, 1);
885         cond = cond_make_0_tmp(TCG_COND_NE, tmp);
886         break;
887     default:
888         g_assert_not_reached();
889     }
890     if (cf & 1) {
891         cond.c = tcg_invert_cond(cond.c);
892     }
893 
894     return cond;
895 }
896 
897 /* Similar, but for the special case of subtraction without borrow, we
898    can use the inputs directly.  This can allow other computation to be
899    deleted as unused.  */
900 
901 static DisasCond do_sub_cond(unsigned cf, TCGv_reg res,
902                              TCGv_reg in1, TCGv_reg in2, TCGv_reg sv)
903 {
904     DisasCond cond;
905 
906     switch (cf >> 1) {
907     case 1: /* = / <> */
908         cond = cond_make(TCG_COND_EQ, in1, in2);
909         break;
910     case 2: /* < / >= */
911         cond = cond_make(TCG_COND_LT, in1, in2);
912         break;
913     case 3: /* <= / > */
914         cond = cond_make(TCG_COND_LE, in1, in2);
915         break;
916     case 4: /* << / >>= */
917         cond = cond_make(TCG_COND_LTU, in1, in2);
918         break;
919     case 5: /* <<= / >> */
920         cond = cond_make(TCG_COND_LEU, in1, in2);
921         break;
922     default:
923         return do_cond(cf, res, NULL, sv);
924     }
925     if (cf & 1) {
926         cond.c = tcg_invert_cond(cond.c);
927     }
928 
929     return cond;
930 }
931 
932 /*
933  * Similar, but for logicals, where the carry and overflow bits are not
934  * computed, and use of them is undefined.
935  *
936  * Undefined or not, hardware does not trap.  It seems reasonable to
937  * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's
938  * how cases c={2,3} are treated.
939  */
940 
941 static DisasCond do_log_cond(unsigned cf, TCGv_reg res)
942 {
943     switch (cf) {
944     case 0:  /* never */
945     case 9:  /* undef, C */
946     case 11: /* undef, C & !Z */
947     case 12: /* undef, V */
948         return cond_make_f();
949 
950     case 1:  /* true */
951     case 8:  /* undef, !C */
952     case 10: /* undef, !C | Z */
953     case 13: /* undef, !V */
954         return cond_make_t();
955 
956     case 2:  /* == */
957         return cond_make_0(TCG_COND_EQ, res);
958     case 3:  /* <> */
959         return cond_make_0(TCG_COND_NE, res);
960     case 4:  /* < */
961         return cond_make_0(TCG_COND_LT, res);
962     case 5:  /* >= */
963         return cond_make_0(TCG_COND_GE, res);
964     case 6:  /* <= */
965         return cond_make_0(TCG_COND_LE, res);
966     case 7:  /* > */
967         return cond_make_0(TCG_COND_GT, res);
968 
969     case 14: /* OD */
970     case 15: /* EV */
971         return do_cond(cf, res, NULL, NULL);
972 
973     default:
974         g_assert_not_reached();
975     }
976 }
977 
978 /* Similar, but for shift/extract/deposit conditions.  */
979 
980 static DisasCond do_sed_cond(unsigned orig, TCGv_reg res)
981 {
982     unsigned c, f;
983 
984     /* Convert the compressed condition codes to standard.
985        0-2 are the same as logicals (nv,<,<=), while 3 is OD.
986        4-7 are the reverse of 0-3.  */
987     c = orig & 3;
988     if (c == 3) {
989         c = 7;
990     }
991     f = (orig & 4) / 4;
992 
993     return do_log_cond(c * 2 + f, res);
994 }
995 
996 /* Similar, but for unit conditions.  */
997 
998 static DisasCond do_unit_cond(unsigned cf, TCGv_reg res,
999                               TCGv_reg in1, TCGv_reg in2)
1000 {
1001     DisasCond cond;
1002     TCGv_reg tmp, cb = NULL;
1003 
1004     if (cf & 8) {
1005         /* Since we want to test lots of carry-out bits all at once, do not
1006          * do our normal thing and compute carry-in of bit B+1 since that
1007          * leaves us with carry bits spread across two words.
1008          */
1009         cb = tcg_temp_new();
1010         tmp = tcg_temp_new();
1011         tcg_gen_or_reg(cb, in1, in2);
1012         tcg_gen_and_reg(tmp, in1, in2);
1013         tcg_gen_andc_reg(cb, cb, res);
1014         tcg_gen_or_reg(cb, cb, tmp);
1015         tcg_temp_free(tmp);
1016     }
1017 
1018     switch (cf >> 1) {
1019     case 0: /* never / TR */
1020     case 1: /* undefined */
1021     case 5: /* undefined */
1022         cond = cond_make_f();
1023         break;
1024 
1025     case 2: /* SBZ / NBZ */
1026         /* See hasless(v,1) from
1027          * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
1028          */
1029         tmp = tcg_temp_new();
1030         tcg_gen_subi_reg(tmp, res, 0x01010101u);
1031         tcg_gen_andc_reg(tmp, tmp, res);
1032         tcg_gen_andi_reg(tmp, tmp, 0x80808080u);
1033         cond = cond_make_0(TCG_COND_NE, tmp);
1034         tcg_temp_free(tmp);
1035         break;
1036 
1037     case 3: /* SHZ / NHZ */
1038         tmp = tcg_temp_new();
1039         tcg_gen_subi_reg(tmp, res, 0x00010001u);
1040         tcg_gen_andc_reg(tmp, tmp, res);
1041         tcg_gen_andi_reg(tmp, tmp, 0x80008000u);
1042         cond = cond_make_0(TCG_COND_NE, tmp);
1043         tcg_temp_free(tmp);
1044         break;
1045 
1046     case 4: /* SDC / NDC */
1047         tcg_gen_andi_reg(cb, cb, 0x88888888u);
1048         cond = cond_make_0(TCG_COND_NE, cb);
1049         break;
1050 
1051     case 6: /* SBC / NBC */
1052         tcg_gen_andi_reg(cb, cb, 0x80808080u);
1053         cond = cond_make_0(TCG_COND_NE, cb);
1054         break;
1055 
1056     case 7: /* SHC / NHC */
1057         tcg_gen_andi_reg(cb, cb, 0x80008000u);
1058         cond = cond_make_0(TCG_COND_NE, cb);
1059         break;
1060 
1061     default:
1062         g_assert_not_reached();
1063     }
1064     if (cf & 8) {
1065         tcg_temp_free(cb);
1066     }
1067     if (cf & 1) {
1068         cond.c = tcg_invert_cond(cond.c);
1069     }
1070 
1071     return cond;
1072 }
1073 
1074 /* Compute signed overflow for addition.  */
1075 static TCGv_reg do_add_sv(DisasContext *ctx, TCGv_reg res,
1076                           TCGv_reg in1, TCGv_reg in2)
1077 {
1078     TCGv_reg sv = get_temp(ctx);
1079     TCGv_reg tmp = tcg_temp_new();
1080 
1081     tcg_gen_xor_reg(sv, res, in1);
1082     tcg_gen_xor_reg(tmp, in1, in2);
1083     tcg_gen_andc_reg(sv, sv, tmp);
1084     tcg_temp_free(tmp);
1085 
1086     return sv;
1087 }
1088 
1089 /* Compute signed overflow for subtraction.  */
1090 static TCGv_reg do_sub_sv(DisasContext *ctx, TCGv_reg res,
1091                           TCGv_reg in1, TCGv_reg in2)
1092 {
1093     TCGv_reg sv = get_temp(ctx);
1094     TCGv_reg tmp = tcg_temp_new();
1095 
1096     tcg_gen_xor_reg(sv, res, in1);
1097     tcg_gen_xor_reg(tmp, in1, in2);
1098     tcg_gen_and_reg(sv, sv, tmp);
1099     tcg_temp_free(tmp);
1100 
1101     return sv;
1102 }
1103 
1104 static void do_add(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1105                    TCGv_reg in2, unsigned shift, bool is_l,
1106                    bool is_tsv, bool is_tc, bool is_c, unsigned cf)
1107 {
1108     TCGv_reg dest, cb, cb_msb, sv, tmp;
1109     unsigned c = cf >> 1;
1110     DisasCond cond;
1111 
1112     dest = tcg_temp_new();
1113     cb = NULL;
1114     cb_msb = NULL;
1115 
1116     if (shift) {
1117         tmp = get_temp(ctx);
1118         tcg_gen_shli_reg(tmp, in1, shift);
1119         in1 = tmp;
1120     }
1121 
1122     if (!is_l || cond_need_cb(c)) {
1123         TCGv_reg zero = tcg_constant_reg(0);
1124         cb_msb = get_temp(ctx);
1125         tcg_gen_add2_reg(dest, cb_msb, in1, zero, in2, zero);
1126         if (is_c) {
1127             tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cpu_psw_cb_msb, zero);
1128         }
1129         if (!is_l) {
1130             cb = get_temp(ctx);
1131             tcg_gen_xor_reg(cb, in1, in2);
1132             tcg_gen_xor_reg(cb, cb, dest);
1133         }
1134     } else {
1135         tcg_gen_add_reg(dest, in1, in2);
1136         if (is_c) {
1137             tcg_gen_add_reg(dest, dest, cpu_psw_cb_msb);
1138         }
1139     }
1140 
1141     /* Compute signed overflow if required.  */
1142     sv = NULL;
1143     if (is_tsv || cond_need_sv(c)) {
1144         sv = do_add_sv(ctx, dest, in1, in2);
1145         if (is_tsv) {
1146             /* ??? Need to include overflow from shift.  */
1147             gen_helper_tsv(cpu_env, sv);
1148         }
1149     }
1150 
1151     /* Emit any conditional trap before any writeback.  */
1152     cond = do_cond(cf, dest, cb_msb, sv);
1153     if (is_tc) {
1154         tmp = tcg_temp_new();
1155         tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1156         gen_helper_tcond(cpu_env, tmp);
1157         tcg_temp_free(tmp);
1158     }
1159 
1160     /* Write back the result.  */
1161     if (!is_l) {
1162         save_or_nullify(ctx, cpu_psw_cb, cb);
1163         save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1164     }
1165     save_gpr(ctx, rt, dest);
1166     tcg_temp_free(dest);
1167 
1168     /* Install the new nullification.  */
1169     cond_free(&ctx->null_cond);
1170     ctx->null_cond = cond;
1171 }
1172 
1173 static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_sh *a,
1174                        bool is_l, bool is_tsv, bool is_tc, bool is_c)
1175 {
1176     TCGv_reg tcg_r1, tcg_r2;
1177 
1178     if (a->cf) {
1179         nullify_over(ctx);
1180     }
1181     tcg_r1 = load_gpr(ctx, a->r1);
1182     tcg_r2 = load_gpr(ctx, a->r2);
1183     do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l, is_tsv, is_tc, is_c, a->cf);
1184     return nullify_end(ctx);
1185 }
1186 
1187 static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a,
1188                        bool is_tsv, bool is_tc)
1189 {
1190     TCGv_reg tcg_im, tcg_r2;
1191 
1192     if (a->cf) {
1193         nullify_over(ctx);
1194     }
1195     tcg_im = load_const(ctx, a->i);
1196     tcg_r2 = load_gpr(ctx, a->r);
1197     do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf);
1198     return nullify_end(ctx);
1199 }
1200 
1201 static void do_sub(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1202                    TCGv_reg in2, bool is_tsv, bool is_b,
1203                    bool is_tc, unsigned cf)
1204 {
1205     TCGv_reg dest, sv, cb, cb_msb, zero, tmp;
1206     unsigned c = cf >> 1;
1207     DisasCond cond;
1208 
1209     dest = tcg_temp_new();
1210     cb = tcg_temp_new();
1211     cb_msb = tcg_temp_new();
1212 
1213     zero = tcg_constant_reg(0);
1214     if (is_b) {
1215         /* DEST,C = IN1 + ~IN2 + C.  */
1216         tcg_gen_not_reg(cb, in2);
1217         tcg_gen_add2_reg(dest, cb_msb, in1, zero, cpu_psw_cb_msb, zero);
1218         tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cb, zero);
1219         tcg_gen_xor_reg(cb, cb, in1);
1220         tcg_gen_xor_reg(cb, cb, dest);
1221     } else {
1222         /* DEST,C = IN1 + ~IN2 + 1.  We can produce the same result in fewer
1223            operations by seeding the high word with 1 and subtracting.  */
1224         tcg_gen_movi_reg(cb_msb, 1);
1225         tcg_gen_sub2_reg(dest, cb_msb, in1, cb_msb, in2, zero);
1226         tcg_gen_eqv_reg(cb, in1, in2);
1227         tcg_gen_xor_reg(cb, cb, dest);
1228     }
1229 
1230     /* Compute signed overflow if required.  */
1231     sv = NULL;
1232     if (is_tsv || cond_need_sv(c)) {
1233         sv = do_sub_sv(ctx, dest, in1, in2);
1234         if (is_tsv) {
1235             gen_helper_tsv(cpu_env, sv);
1236         }
1237     }
1238 
1239     /* Compute the condition.  We cannot use the special case for borrow.  */
1240     if (!is_b) {
1241         cond = do_sub_cond(cf, dest, in1, in2, sv);
1242     } else {
1243         cond = do_cond(cf, dest, cb_msb, sv);
1244     }
1245 
1246     /* Emit any conditional trap before any writeback.  */
1247     if (is_tc) {
1248         tmp = tcg_temp_new();
1249         tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1250         gen_helper_tcond(cpu_env, tmp);
1251         tcg_temp_free(tmp);
1252     }
1253 
1254     /* Write back the result.  */
1255     save_or_nullify(ctx, cpu_psw_cb, cb);
1256     save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1257     save_gpr(ctx, rt, dest);
1258     tcg_temp_free(dest);
1259     tcg_temp_free(cb);
1260     tcg_temp_free(cb_msb);
1261 
1262     /* Install the new nullification.  */
1263     cond_free(&ctx->null_cond);
1264     ctx->null_cond = cond;
1265 }
1266 
1267 static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf *a,
1268                        bool is_tsv, bool is_b, bool is_tc)
1269 {
1270     TCGv_reg tcg_r1, tcg_r2;
1271 
1272     if (a->cf) {
1273         nullify_over(ctx);
1274     }
1275     tcg_r1 = load_gpr(ctx, a->r1);
1276     tcg_r2 = load_gpr(ctx, a->r2);
1277     do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf);
1278     return nullify_end(ctx);
1279 }
1280 
1281 static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv)
1282 {
1283     TCGv_reg tcg_im, tcg_r2;
1284 
1285     if (a->cf) {
1286         nullify_over(ctx);
1287     }
1288     tcg_im = load_const(ctx, a->i);
1289     tcg_r2 = load_gpr(ctx, a->r);
1290     do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf);
1291     return nullify_end(ctx);
1292 }
1293 
1294 static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1295                       TCGv_reg in2, unsigned cf)
1296 {
1297     TCGv_reg dest, sv;
1298     DisasCond cond;
1299 
1300     dest = tcg_temp_new();
1301     tcg_gen_sub_reg(dest, in1, in2);
1302 
1303     /* Compute signed overflow if required.  */
1304     sv = NULL;
1305     if (cond_need_sv(cf >> 1)) {
1306         sv = do_sub_sv(ctx, dest, in1, in2);
1307     }
1308 
1309     /* Form the condition for the compare.  */
1310     cond = do_sub_cond(cf, dest, in1, in2, sv);
1311 
1312     /* Clear.  */
1313     tcg_gen_movi_reg(dest, 0);
1314     save_gpr(ctx, rt, dest);
1315     tcg_temp_free(dest);
1316 
1317     /* Install the new nullification.  */
1318     cond_free(&ctx->null_cond);
1319     ctx->null_cond = cond;
1320 }
1321 
1322 static void do_log(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1323                    TCGv_reg in2, unsigned cf,
1324                    void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1325 {
1326     TCGv_reg dest = dest_gpr(ctx, rt);
1327 
1328     /* Perform the operation, and writeback.  */
1329     fn(dest, in1, in2);
1330     save_gpr(ctx, rt, dest);
1331 
1332     /* Install the new nullification.  */
1333     cond_free(&ctx->null_cond);
1334     if (cf) {
1335         ctx->null_cond = do_log_cond(cf, dest);
1336     }
1337 }
1338 
1339 static bool do_log_reg(DisasContext *ctx, arg_rrr_cf *a,
1340                        void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1341 {
1342     TCGv_reg tcg_r1, tcg_r2;
1343 
1344     if (a->cf) {
1345         nullify_over(ctx);
1346     }
1347     tcg_r1 = load_gpr(ctx, a->r1);
1348     tcg_r2 = load_gpr(ctx, a->r2);
1349     do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, fn);
1350     return nullify_end(ctx);
1351 }
1352 
1353 static void do_unit(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1354                     TCGv_reg in2, unsigned cf, bool is_tc,
1355                     void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1356 {
1357     TCGv_reg dest;
1358     DisasCond cond;
1359 
1360     if (cf == 0) {
1361         dest = dest_gpr(ctx, rt);
1362         fn(dest, in1, in2);
1363         save_gpr(ctx, rt, dest);
1364         cond_free(&ctx->null_cond);
1365     } else {
1366         dest = tcg_temp_new();
1367         fn(dest, in1, in2);
1368 
1369         cond = do_unit_cond(cf, dest, in1, in2);
1370 
1371         if (is_tc) {
1372             TCGv_reg tmp = tcg_temp_new();
1373             tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1374             gen_helper_tcond(cpu_env, tmp);
1375             tcg_temp_free(tmp);
1376         }
1377         save_gpr(ctx, rt, dest);
1378 
1379         cond_free(&ctx->null_cond);
1380         ctx->null_cond = cond;
1381     }
1382 }
1383 
1384 #ifndef CONFIG_USER_ONLY
1385 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1386    from the top 2 bits of the base register.  There are a few system
1387    instructions that have a 3-bit space specifier, for which SR0 is
1388    not special.  To handle this, pass ~SP.  */
1389 static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_reg base)
1390 {
1391     TCGv_ptr ptr;
1392     TCGv_reg tmp;
1393     TCGv_i64 spc;
1394 
1395     if (sp != 0) {
1396         if (sp < 0) {
1397             sp = ~sp;
1398         }
1399         spc = get_temp_tl(ctx);
1400         load_spr(ctx, spc, sp);
1401         return spc;
1402     }
1403     if (ctx->tb_flags & TB_FLAG_SR_SAME) {
1404         return cpu_srH;
1405     }
1406 
1407     ptr = tcg_temp_new_ptr();
1408     tmp = tcg_temp_new();
1409     spc = get_temp_tl(ctx);
1410 
1411     tcg_gen_shri_reg(tmp, base, TARGET_REGISTER_BITS - 5);
1412     tcg_gen_andi_reg(tmp, tmp, 030);
1413     tcg_gen_trunc_reg_ptr(ptr, tmp);
1414     tcg_temp_free(tmp);
1415 
1416     tcg_gen_add_ptr(ptr, ptr, cpu_env);
1417     tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
1418     tcg_temp_free_ptr(ptr);
1419 
1420     return spc;
1421 }
1422 #endif
1423 
1424 static void form_gva(DisasContext *ctx, TCGv_tl *pgva, TCGv_reg *pofs,
1425                      unsigned rb, unsigned rx, int scale, target_sreg disp,
1426                      unsigned sp, int modify, bool is_phys)
1427 {
1428     TCGv_reg base = load_gpr(ctx, rb);
1429     TCGv_reg ofs;
1430 
1431     /* Note that RX is mutually exclusive with DISP.  */
1432     if (rx) {
1433         ofs = get_temp(ctx);
1434         tcg_gen_shli_reg(ofs, cpu_gr[rx], scale);
1435         tcg_gen_add_reg(ofs, ofs, base);
1436     } else if (disp || modify) {
1437         ofs = get_temp(ctx);
1438         tcg_gen_addi_reg(ofs, base, disp);
1439     } else {
1440         ofs = base;
1441     }
1442 
1443     *pofs = ofs;
1444 #ifdef CONFIG_USER_ONLY
1445     *pgva = (modify <= 0 ? ofs : base);
1446 #else
1447     TCGv_tl addr = get_temp_tl(ctx);
1448     tcg_gen_extu_reg_tl(addr, modify <= 0 ? ofs : base);
1449     if (ctx->tb_flags & PSW_W) {
1450         tcg_gen_andi_tl(addr, addr, 0x3fffffffffffffffull);
1451     }
1452     if (!is_phys) {
1453         tcg_gen_or_tl(addr, addr, space_select(ctx, sp, base));
1454     }
1455     *pgva = addr;
1456 #endif
1457 }
1458 
1459 /* Emit a memory load.  The modify parameter should be
1460  * < 0 for pre-modify,
1461  * > 0 for post-modify,
1462  * = 0 for no base register update.
1463  */
1464 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
1465                        unsigned rx, int scale, target_sreg disp,
1466                        unsigned sp, int modify, MemOp mop)
1467 {
1468     TCGv_reg ofs;
1469     TCGv_tl addr;
1470 
1471     /* Caller uses nullify_over/nullify_end.  */
1472     assert(ctx->null_cond.c == TCG_COND_NEVER);
1473 
1474     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1475              ctx->mmu_idx == MMU_PHYS_IDX);
1476     tcg_gen_qemu_ld_reg(dest, addr, ctx->mmu_idx, mop);
1477     if (modify) {
1478         save_gpr(ctx, rb, ofs);
1479     }
1480 }
1481 
1482 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1483                        unsigned rx, int scale, target_sreg disp,
1484                        unsigned sp, int modify, MemOp mop)
1485 {
1486     TCGv_reg ofs;
1487     TCGv_tl addr;
1488 
1489     /* Caller uses nullify_over/nullify_end.  */
1490     assert(ctx->null_cond.c == TCG_COND_NEVER);
1491 
1492     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1493              ctx->mmu_idx == MMU_PHYS_IDX);
1494     tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop);
1495     if (modify) {
1496         save_gpr(ctx, rb, ofs);
1497     }
1498 }
1499 
1500 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1501                         unsigned rx, int scale, target_sreg disp,
1502                         unsigned sp, int modify, MemOp mop)
1503 {
1504     TCGv_reg ofs;
1505     TCGv_tl addr;
1506 
1507     /* Caller uses nullify_over/nullify_end.  */
1508     assert(ctx->null_cond.c == TCG_COND_NEVER);
1509 
1510     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1511              ctx->mmu_idx == MMU_PHYS_IDX);
1512     tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop);
1513     if (modify) {
1514         save_gpr(ctx, rb, ofs);
1515     }
1516 }
1517 
1518 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1519                         unsigned rx, int scale, target_sreg disp,
1520                         unsigned sp, int modify, MemOp mop)
1521 {
1522     TCGv_reg ofs;
1523     TCGv_tl addr;
1524 
1525     /* Caller uses nullify_over/nullify_end.  */
1526     assert(ctx->null_cond.c == TCG_COND_NEVER);
1527 
1528     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1529              ctx->mmu_idx == MMU_PHYS_IDX);
1530     tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop);
1531     if (modify) {
1532         save_gpr(ctx, rb, ofs);
1533     }
1534 }
1535 
1536 #if TARGET_REGISTER_BITS == 64
1537 #define do_load_reg   do_load_64
1538 #define do_store_reg  do_store_64
1539 #else
1540 #define do_load_reg   do_load_32
1541 #define do_store_reg  do_store_32
1542 #endif
1543 
1544 static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1545                     unsigned rx, int scale, target_sreg disp,
1546                     unsigned sp, int modify, MemOp mop)
1547 {
1548     TCGv_reg dest;
1549 
1550     nullify_over(ctx);
1551 
1552     if (modify == 0) {
1553         /* No base register update.  */
1554         dest = dest_gpr(ctx, rt);
1555     } else {
1556         /* Make sure if RT == RB, we see the result of the load.  */
1557         dest = get_temp(ctx);
1558     }
1559     do_load_reg(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
1560     save_gpr(ctx, rt, dest);
1561 
1562     return nullify_end(ctx);
1563 }
1564 
1565 static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1566                       unsigned rx, int scale, target_sreg disp,
1567                       unsigned sp, int modify)
1568 {
1569     TCGv_i32 tmp;
1570 
1571     nullify_over(ctx);
1572 
1573     tmp = tcg_temp_new_i32();
1574     do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1575     save_frw_i32(rt, tmp);
1576     tcg_temp_free_i32(tmp);
1577 
1578     if (rt == 0) {
1579         gen_helper_loaded_fr0(cpu_env);
1580     }
1581 
1582     return nullify_end(ctx);
1583 }
1584 
1585 static bool trans_fldw(DisasContext *ctx, arg_ldst *a)
1586 {
1587     return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1588                      a->disp, a->sp, a->m);
1589 }
1590 
1591 static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1592                       unsigned rx, int scale, target_sreg disp,
1593                       unsigned sp, int modify)
1594 {
1595     TCGv_i64 tmp;
1596 
1597     nullify_over(ctx);
1598 
1599     tmp = tcg_temp_new_i64();
1600     do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEQ);
1601     save_frd(rt, tmp);
1602     tcg_temp_free_i64(tmp);
1603 
1604     if (rt == 0) {
1605         gen_helper_loaded_fr0(cpu_env);
1606     }
1607 
1608     return nullify_end(ctx);
1609 }
1610 
1611 static bool trans_fldd(DisasContext *ctx, arg_ldst *a)
1612 {
1613     return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1614                      a->disp, a->sp, a->m);
1615 }
1616 
1617 static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1618                      target_sreg disp, unsigned sp,
1619                      int modify, MemOp mop)
1620 {
1621     nullify_over(ctx);
1622     do_store_reg(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
1623     return nullify_end(ctx);
1624 }
1625 
1626 static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1627                        unsigned rx, int scale, target_sreg disp,
1628                        unsigned sp, int modify)
1629 {
1630     TCGv_i32 tmp;
1631 
1632     nullify_over(ctx);
1633 
1634     tmp = load_frw_i32(rt);
1635     do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1636     tcg_temp_free_i32(tmp);
1637 
1638     return nullify_end(ctx);
1639 }
1640 
1641 static bool trans_fstw(DisasContext *ctx, arg_ldst *a)
1642 {
1643     return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1644                       a->disp, a->sp, a->m);
1645 }
1646 
1647 static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1648                        unsigned rx, int scale, target_sreg disp,
1649                        unsigned sp, int modify)
1650 {
1651     TCGv_i64 tmp;
1652 
1653     nullify_over(ctx);
1654 
1655     tmp = load_frd(rt);
1656     do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEQ);
1657     tcg_temp_free_i64(tmp);
1658 
1659     return nullify_end(ctx);
1660 }
1661 
1662 static bool trans_fstd(DisasContext *ctx, arg_ldst *a)
1663 {
1664     return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1665                       a->disp, a->sp, a->m);
1666 }
1667 
1668 static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
1669                        void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
1670 {
1671     TCGv_i32 tmp;
1672 
1673     nullify_over(ctx);
1674     tmp = load_frw0_i32(ra);
1675 
1676     func(tmp, cpu_env, tmp);
1677 
1678     save_frw_i32(rt, tmp);
1679     tcg_temp_free_i32(tmp);
1680     return nullify_end(ctx);
1681 }
1682 
1683 static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
1684                        void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
1685 {
1686     TCGv_i32 dst;
1687     TCGv_i64 src;
1688 
1689     nullify_over(ctx);
1690     src = load_frd(ra);
1691     dst = tcg_temp_new_i32();
1692 
1693     func(dst, cpu_env, src);
1694 
1695     tcg_temp_free_i64(src);
1696     save_frw_i32(rt, dst);
1697     tcg_temp_free_i32(dst);
1698     return nullify_end(ctx);
1699 }
1700 
1701 static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
1702                        void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
1703 {
1704     TCGv_i64 tmp;
1705 
1706     nullify_over(ctx);
1707     tmp = load_frd0(ra);
1708 
1709     func(tmp, cpu_env, tmp);
1710 
1711     save_frd(rt, tmp);
1712     tcg_temp_free_i64(tmp);
1713     return nullify_end(ctx);
1714 }
1715 
1716 static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
1717                        void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
1718 {
1719     TCGv_i32 src;
1720     TCGv_i64 dst;
1721 
1722     nullify_over(ctx);
1723     src = load_frw0_i32(ra);
1724     dst = tcg_temp_new_i64();
1725 
1726     func(dst, cpu_env, src);
1727 
1728     tcg_temp_free_i32(src);
1729     save_frd(rt, dst);
1730     tcg_temp_free_i64(dst);
1731     return nullify_end(ctx);
1732 }
1733 
1734 static bool do_fop_weww(DisasContext *ctx, unsigned rt,
1735                         unsigned ra, unsigned rb,
1736                         void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
1737 {
1738     TCGv_i32 a, b;
1739 
1740     nullify_over(ctx);
1741     a = load_frw0_i32(ra);
1742     b = load_frw0_i32(rb);
1743 
1744     func(a, cpu_env, a, b);
1745 
1746     tcg_temp_free_i32(b);
1747     save_frw_i32(rt, a);
1748     tcg_temp_free_i32(a);
1749     return nullify_end(ctx);
1750 }
1751 
1752 static bool do_fop_dedd(DisasContext *ctx, unsigned rt,
1753                         unsigned ra, unsigned rb,
1754                         void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
1755 {
1756     TCGv_i64 a, b;
1757 
1758     nullify_over(ctx);
1759     a = load_frd0(ra);
1760     b = load_frd0(rb);
1761 
1762     func(a, cpu_env, a, b);
1763 
1764     tcg_temp_free_i64(b);
1765     save_frd(rt, a);
1766     tcg_temp_free_i64(a);
1767     return nullify_end(ctx);
1768 }
1769 
1770 /* Emit an unconditional branch to a direct target, which may or may not
1771    have already had nullification handled.  */
1772 static bool do_dbranch(DisasContext *ctx, target_ureg dest,
1773                        unsigned link, bool is_n)
1774 {
1775     if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1776         if (link != 0) {
1777             copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1778         }
1779         ctx->iaoq_n = dest;
1780         if (is_n) {
1781             ctx->null_cond.c = TCG_COND_ALWAYS;
1782         }
1783     } else {
1784         nullify_over(ctx);
1785 
1786         if (link != 0) {
1787             copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1788         }
1789 
1790         if (is_n && use_nullify_skip(ctx)) {
1791             nullify_set(ctx, 0);
1792             gen_goto_tb(ctx, 0, dest, dest + 4);
1793         } else {
1794             nullify_set(ctx, is_n);
1795             gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1796         }
1797 
1798         nullify_end(ctx);
1799 
1800         nullify_set(ctx, 0);
1801         gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
1802         ctx->base.is_jmp = DISAS_NORETURN;
1803     }
1804     return true;
1805 }
1806 
1807 /* Emit a conditional branch to a direct target.  If the branch itself
1808    is nullified, we should have already used nullify_over.  */
1809 static bool do_cbranch(DisasContext *ctx, target_sreg disp, bool is_n,
1810                        DisasCond *cond)
1811 {
1812     target_ureg dest = iaoq_dest(ctx, disp);
1813     TCGLabel *taken = NULL;
1814     TCGCond c = cond->c;
1815     bool n;
1816 
1817     assert(ctx->null_cond.c == TCG_COND_NEVER);
1818 
1819     /* Handle TRUE and NEVER as direct branches.  */
1820     if (c == TCG_COND_ALWAYS) {
1821         return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
1822     }
1823     if (c == TCG_COND_NEVER) {
1824         return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
1825     }
1826 
1827     taken = gen_new_label();
1828     tcg_gen_brcond_reg(c, cond->a0, cond->a1, taken);
1829     cond_free(cond);
1830 
1831     /* Not taken: Condition not satisfied; nullify on backward branches. */
1832     n = is_n && disp < 0;
1833     if (n && use_nullify_skip(ctx)) {
1834         nullify_set(ctx, 0);
1835         gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
1836     } else {
1837         if (!n && ctx->null_lab) {
1838             gen_set_label(ctx->null_lab);
1839             ctx->null_lab = NULL;
1840         }
1841         nullify_set(ctx, n);
1842         if (ctx->iaoq_n == -1) {
1843             /* The temporary iaoq_n_var died at the branch above.
1844                Regenerate it here instead of saving it.  */
1845             tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
1846         }
1847         gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
1848     }
1849 
1850     gen_set_label(taken);
1851 
1852     /* Taken: Condition satisfied; nullify on forward branches.  */
1853     n = is_n && disp >= 0;
1854     if (n && use_nullify_skip(ctx)) {
1855         nullify_set(ctx, 0);
1856         gen_goto_tb(ctx, 1, dest, dest + 4);
1857     } else {
1858         nullify_set(ctx, n);
1859         gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
1860     }
1861 
1862     /* Not taken: the branch itself was nullified.  */
1863     if (ctx->null_lab) {
1864         gen_set_label(ctx->null_lab);
1865         ctx->null_lab = NULL;
1866         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
1867     } else {
1868         ctx->base.is_jmp = DISAS_NORETURN;
1869     }
1870     return true;
1871 }
1872 
1873 /* Emit an unconditional branch to an indirect target.  This handles
1874    nullification of the branch itself.  */
1875 static bool do_ibranch(DisasContext *ctx, TCGv_reg dest,
1876                        unsigned link, bool is_n)
1877 {
1878     TCGv_reg a0, a1, next, tmp;
1879     TCGCond c;
1880 
1881     assert(ctx->null_lab == NULL);
1882 
1883     if (ctx->null_cond.c == TCG_COND_NEVER) {
1884         if (link != 0) {
1885             copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1886         }
1887         next = get_temp(ctx);
1888         tcg_gen_mov_reg(next, dest);
1889         if (is_n) {
1890             if (use_nullify_skip(ctx)) {
1891                 tcg_gen_mov_reg(cpu_iaoq_f, next);
1892                 tcg_gen_addi_reg(cpu_iaoq_b, next, 4);
1893                 nullify_set(ctx, 0);
1894                 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1895                 return true;
1896             }
1897             ctx->null_cond.c = TCG_COND_ALWAYS;
1898         }
1899         ctx->iaoq_n = -1;
1900         ctx->iaoq_n_var = next;
1901     } else if (is_n && use_nullify_skip(ctx)) {
1902         /* The (conditional) branch, B, nullifies the next insn, N,
1903            and we're allowed to skip execution N (no single-step or
1904            tracepoint in effect).  Since the goto_ptr that we must use
1905            for the indirect branch consumes no special resources, we
1906            can (conditionally) skip B and continue execution.  */
1907         /* The use_nullify_skip test implies we have a known control path.  */
1908         tcg_debug_assert(ctx->iaoq_b != -1);
1909         tcg_debug_assert(ctx->iaoq_n != -1);
1910 
1911         /* We do have to handle the non-local temporary, DEST, before
1912            branching.  Since IOAQ_F is not really live at this point, we
1913            can simply store DEST optimistically.  Similarly with IAOQ_B.  */
1914         tcg_gen_mov_reg(cpu_iaoq_f, dest);
1915         tcg_gen_addi_reg(cpu_iaoq_b, dest, 4);
1916 
1917         nullify_over(ctx);
1918         if (link != 0) {
1919             tcg_gen_movi_reg(cpu_gr[link], ctx->iaoq_n);
1920         }
1921         tcg_gen_lookup_and_goto_ptr();
1922         return nullify_end(ctx);
1923     } else {
1924         c = ctx->null_cond.c;
1925         a0 = ctx->null_cond.a0;
1926         a1 = ctx->null_cond.a1;
1927 
1928         tmp = tcg_temp_new();
1929         next = get_temp(ctx);
1930 
1931         copy_iaoq_entry(tmp, ctx->iaoq_n, ctx->iaoq_n_var);
1932         tcg_gen_movcond_reg(c, next, a0, a1, tmp, dest);
1933         ctx->iaoq_n = -1;
1934         ctx->iaoq_n_var = next;
1935 
1936         if (link != 0) {
1937             tcg_gen_movcond_reg(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
1938         }
1939 
1940         if (is_n) {
1941             /* The branch nullifies the next insn, which means the state of N
1942                after the branch is the inverse of the state of N that applied
1943                to the branch.  */
1944             tcg_gen_setcond_reg(tcg_invert_cond(c), cpu_psw_n, a0, a1);
1945             cond_free(&ctx->null_cond);
1946             ctx->null_cond = cond_make_n();
1947             ctx->psw_n_nonzero = true;
1948         } else {
1949             cond_free(&ctx->null_cond);
1950         }
1951     }
1952     return true;
1953 }
1954 
1955 /* Implement
1956  *    if (IAOQ_Front{30..31} < GR[b]{30..31})
1957  *      IAOQ_Next{30..31} ← GR[b]{30..31};
1958  *    else
1959  *      IAOQ_Next{30..31} ← IAOQ_Front{30..31};
1960  * which keeps the privilege level from being increased.
1961  */
1962 static TCGv_reg do_ibranch_priv(DisasContext *ctx, TCGv_reg offset)
1963 {
1964     TCGv_reg dest;
1965     switch (ctx->privilege) {
1966     case 0:
1967         /* Privilege 0 is maximum and is allowed to decrease.  */
1968         return offset;
1969     case 3:
1970         /* Privilege 3 is minimum and is never allowed to increase.  */
1971         dest = get_temp(ctx);
1972         tcg_gen_ori_reg(dest, offset, 3);
1973         break;
1974     default:
1975         dest = get_temp(ctx);
1976         tcg_gen_andi_reg(dest, offset, -4);
1977         tcg_gen_ori_reg(dest, dest, ctx->privilege);
1978         tcg_gen_movcond_reg(TCG_COND_GTU, dest, dest, offset, dest, offset);
1979         break;
1980     }
1981     return dest;
1982 }
1983 
1984 #ifdef CONFIG_USER_ONLY
1985 /* On Linux, page zero is normally marked execute only + gateway.
1986    Therefore normal read or write is supposed to fail, but specific
1987    offsets have kernel code mapped to raise permissions to implement
1988    system calls.  Handling this via an explicit check here, rather
1989    in than the "be disp(sr2,r0)" instruction that probably sent us
1990    here, is the easiest way to handle the branch delay slot on the
1991    aforementioned BE.  */
1992 static void do_page_zero(DisasContext *ctx)
1993 {
1994     /* If by some means we get here with PSW[N]=1, that implies that
1995        the B,GATE instruction would be skipped, and we'd fault on the
1996        next insn within the privilaged page.  */
1997     switch (ctx->null_cond.c) {
1998     case TCG_COND_NEVER:
1999         break;
2000     case TCG_COND_ALWAYS:
2001         tcg_gen_movi_reg(cpu_psw_n, 0);
2002         goto do_sigill;
2003     default:
2004         /* Since this is always the first (and only) insn within the
2005            TB, we should know the state of PSW[N] from TB->FLAGS.  */
2006         g_assert_not_reached();
2007     }
2008 
2009     /* Check that we didn't arrive here via some means that allowed
2010        non-sequential instruction execution.  Normally the PSW[B] bit
2011        detects this by disallowing the B,GATE instruction to execute
2012        under such conditions.  */
2013     if (ctx->iaoq_b != ctx->iaoq_f + 4) {
2014         goto do_sigill;
2015     }
2016 
2017     switch (ctx->iaoq_f & -4) {
2018     case 0x00: /* Null pointer call */
2019         gen_excp_1(EXCP_IMP);
2020         ctx->base.is_jmp = DISAS_NORETURN;
2021         break;
2022 
2023     case 0xb0: /* LWS */
2024         gen_excp_1(EXCP_SYSCALL_LWS);
2025         ctx->base.is_jmp = DISAS_NORETURN;
2026         break;
2027 
2028     case 0xe0: /* SET_THREAD_POINTER */
2029         tcg_gen_st_reg(cpu_gr[26], cpu_env, offsetof(CPUHPPAState, cr[27]));
2030         tcg_gen_ori_reg(cpu_iaoq_f, cpu_gr[31], 3);
2031         tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
2032         ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
2033         break;
2034 
2035     case 0x100: /* SYSCALL */
2036         gen_excp_1(EXCP_SYSCALL);
2037         ctx->base.is_jmp = DISAS_NORETURN;
2038         break;
2039 
2040     default:
2041     do_sigill:
2042         gen_excp_1(EXCP_ILL);
2043         ctx->base.is_jmp = DISAS_NORETURN;
2044         break;
2045     }
2046 }
2047 #endif
2048 
2049 static bool trans_nop(DisasContext *ctx, arg_nop *a)
2050 {
2051     cond_free(&ctx->null_cond);
2052     return true;
2053 }
2054 
2055 static bool trans_break(DisasContext *ctx, arg_break *a)
2056 {
2057     return gen_excp_iir(ctx, EXCP_BREAK);
2058 }
2059 
2060 static bool trans_sync(DisasContext *ctx, arg_sync *a)
2061 {
2062     /* No point in nullifying the memory barrier.  */
2063     tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
2064 
2065     cond_free(&ctx->null_cond);
2066     return true;
2067 }
2068 
2069 static bool trans_mfia(DisasContext *ctx, arg_mfia *a)
2070 {
2071     unsigned rt = a->t;
2072     TCGv_reg tmp = dest_gpr(ctx, rt);
2073     tcg_gen_movi_reg(tmp, ctx->iaoq_f);
2074     save_gpr(ctx, rt, tmp);
2075 
2076     cond_free(&ctx->null_cond);
2077     return true;
2078 }
2079 
2080 static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a)
2081 {
2082     unsigned rt = a->t;
2083     unsigned rs = a->sp;
2084     TCGv_i64 t0 = tcg_temp_new_i64();
2085     TCGv_reg t1 = tcg_temp_new();
2086 
2087     load_spr(ctx, t0, rs);
2088     tcg_gen_shri_i64(t0, t0, 32);
2089     tcg_gen_trunc_i64_reg(t1, t0);
2090 
2091     save_gpr(ctx, rt, t1);
2092     tcg_temp_free(t1);
2093     tcg_temp_free_i64(t0);
2094 
2095     cond_free(&ctx->null_cond);
2096     return true;
2097 }
2098 
2099 static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
2100 {
2101     unsigned rt = a->t;
2102     unsigned ctl = a->r;
2103     TCGv_reg tmp;
2104 
2105     switch (ctl) {
2106     case CR_SAR:
2107 #ifdef TARGET_HPPA64
2108         if (a->e == 0) {
2109             /* MFSAR without ,W masks low 5 bits.  */
2110             tmp = dest_gpr(ctx, rt);
2111             tcg_gen_andi_reg(tmp, cpu_sar, 31);
2112             save_gpr(ctx, rt, tmp);
2113             goto done;
2114         }
2115 #endif
2116         save_gpr(ctx, rt, cpu_sar);
2117         goto done;
2118     case CR_IT: /* Interval Timer */
2119         /* FIXME: Respect PSW_S bit.  */
2120         nullify_over(ctx);
2121         tmp = dest_gpr(ctx, rt);
2122         if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
2123             gen_io_start();
2124             gen_helper_read_interval_timer(tmp);
2125             ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2126         } else {
2127             gen_helper_read_interval_timer(tmp);
2128         }
2129         save_gpr(ctx, rt, tmp);
2130         return nullify_end(ctx);
2131     case 26:
2132     case 27:
2133         break;
2134     default:
2135         /* All other control registers are privileged.  */
2136         CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2137         break;
2138     }
2139 
2140     tmp = get_temp(ctx);
2141     tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2142     save_gpr(ctx, rt, tmp);
2143 
2144  done:
2145     cond_free(&ctx->null_cond);
2146     return true;
2147 }
2148 
2149 static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a)
2150 {
2151     unsigned rr = a->r;
2152     unsigned rs = a->sp;
2153     TCGv_i64 t64;
2154 
2155     if (rs >= 5) {
2156         CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2157     }
2158     nullify_over(ctx);
2159 
2160     t64 = tcg_temp_new_i64();
2161     tcg_gen_extu_reg_i64(t64, load_gpr(ctx, rr));
2162     tcg_gen_shli_i64(t64, t64, 32);
2163 
2164     if (rs >= 4) {
2165         tcg_gen_st_i64(t64, cpu_env, offsetof(CPUHPPAState, sr[rs]));
2166         ctx->tb_flags &= ~TB_FLAG_SR_SAME;
2167     } else {
2168         tcg_gen_mov_i64(cpu_sr[rs], t64);
2169     }
2170     tcg_temp_free_i64(t64);
2171 
2172     return nullify_end(ctx);
2173 }
2174 
2175 static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
2176 {
2177     unsigned ctl = a->t;
2178     TCGv_reg reg;
2179     TCGv_reg tmp;
2180 
2181     if (ctl == CR_SAR) {
2182         reg = load_gpr(ctx, a->r);
2183         tmp = tcg_temp_new();
2184         tcg_gen_andi_reg(tmp, reg, TARGET_REGISTER_BITS - 1);
2185         save_or_nullify(ctx, cpu_sar, tmp);
2186         tcg_temp_free(tmp);
2187 
2188         cond_free(&ctx->null_cond);
2189         return true;
2190     }
2191 
2192     /* All other control registers are privileged or read-only.  */
2193     CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2194 
2195 #ifndef CONFIG_USER_ONLY
2196     nullify_over(ctx);
2197     reg = load_gpr(ctx, a->r);
2198 
2199     switch (ctl) {
2200     case CR_IT:
2201         gen_helper_write_interval_timer(cpu_env, reg);
2202         break;
2203     case CR_EIRR:
2204         gen_helper_write_eirr(cpu_env, reg);
2205         break;
2206     case CR_EIEM:
2207         gen_helper_write_eiem(cpu_env, reg);
2208         ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2209         break;
2210 
2211     case CR_IIASQ:
2212     case CR_IIAOQ:
2213         /* FIXME: Respect PSW_Q bit */
2214         /* The write advances the queue and stores to the back element.  */
2215         tmp = get_temp(ctx);
2216         tcg_gen_ld_reg(tmp, cpu_env,
2217                        offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2218         tcg_gen_st_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2219         tcg_gen_st_reg(reg, cpu_env,
2220                        offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2221         break;
2222 
2223     case CR_PID1:
2224     case CR_PID2:
2225     case CR_PID3:
2226     case CR_PID4:
2227         tcg_gen_st_reg(reg, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2228 #ifndef CONFIG_USER_ONLY
2229         gen_helper_change_prot_id(cpu_env);
2230 #endif
2231         break;
2232 
2233     default:
2234         tcg_gen_st_reg(reg, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2235         break;
2236     }
2237     return nullify_end(ctx);
2238 #endif
2239 }
2240 
2241 static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a)
2242 {
2243     TCGv_reg tmp = tcg_temp_new();
2244 
2245     tcg_gen_not_reg(tmp, load_gpr(ctx, a->r));
2246     tcg_gen_andi_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
2247     save_or_nullify(ctx, cpu_sar, tmp);
2248     tcg_temp_free(tmp);
2249 
2250     cond_free(&ctx->null_cond);
2251     return true;
2252 }
2253 
2254 static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a)
2255 {
2256     TCGv_reg dest = dest_gpr(ctx, a->t);
2257 
2258 #ifdef CONFIG_USER_ONLY
2259     /* We don't implement space registers in user mode. */
2260     tcg_gen_movi_reg(dest, 0);
2261 #else
2262     TCGv_i64 t0 = tcg_temp_new_i64();
2263 
2264     tcg_gen_mov_i64(t0, space_select(ctx, a->sp, load_gpr(ctx, a->b)));
2265     tcg_gen_shri_i64(t0, t0, 32);
2266     tcg_gen_trunc_i64_reg(dest, t0);
2267 
2268     tcg_temp_free_i64(t0);
2269 #endif
2270     save_gpr(ctx, a->t, dest);
2271 
2272     cond_free(&ctx->null_cond);
2273     return true;
2274 }
2275 
2276 static bool trans_rsm(DisasContext *ctx, arg_rsm *a)
2277 {
2278     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2279 #ifndef CONFIG_USER_ONLY
2280     TCGv_reg tmp;
2281 
2282     nullify_over(ctx);
2283 
2284     tmp = get_temp(ctx);
2285     tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw));
2286     tcg_gen_andi_reg(tmp, tmp, ~a->i);
2287     gen_helper_swap_system_mask(tmp, cpu_env, tmp);
2288     save_gpr(ctx, a->t, tmp);
2289 
2290     /* Exit the TB to recognize new interrupts, e.g. PSW_M.  */
2291     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2292     return nullify_end(ctx);
2293 #endif
2294 }
2295 
2296 static bool trans_ssm(DisasContext *ctx, arg_ssm *a)
2297 {
2298     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2299 #ifndef CONFIG_USER_ONLY
2300     TCGv_reg tmp;
2301 
2302     nullify_over(ctx);
2303 
2304     tmp = get_temp(ctx);
2305     tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw));
2306     tcg_gen_ori_reg(tmp, tmp, a->i);
2307     gen_helper_swap_system_mask(tmp, cpu_env, tmp);
2308     save_gpr(ctx, a->t, tmp);
2309 
2310     /* Exit the TB to recognize new interrupts, e.g. PSW_I.  */
2311     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2312     return nullify_end(ctx);
2313 #endif
2314 }
2315 
2316 static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a)
2317 {
2318     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2319 #ifndef CONFIG_USER_ONLY
2320     TCGv_reg tmp, reg;
2321     nullify_over(ctx);
2322 
2323     reg = load_gpr(ctx, a->r);
2324     tmp = get_temp(ctx);
2325     gen_helper_swap_system_mask(tmp, cpu_env, reg);
2326 
2327     /* Exit the TB to recognize new interrupts.  */
2328     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2329     return nullify_end(ctx);
2330 #endif
2331 }
2332 
2333 static bool do_rfi(DisasContext *ctx, bool rfi_r)
2334 {
2335     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2336 #ifndef CONFIG_USER_ONLY
2337     nullify_over(ctx);
2338 
2339     if (rfi_r) {
2340         gen_helper_rfi_r(cpu_env);
2341     } else {
2342         gen_helper_rfi(cpu_env);
2343     }
2344     /* Exit the TB to recognize new interrupts.  */
2345     tcg_gen_exit_tb(NULL, 0);
2346     ctx->base.is_jmp = DISAS_NORETURN;
2347 
2348     return nullify_end(ctx);
2349 #endif
2350 }
2351 
2352 static bool trans_rfi(DisasContext *ctx, arg_rfi *a)
2353 {
2354     return do_rfi(ctx, false);
2355 }
2356 
2357 static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a)
2358 {
2359     return do_rfi(ctx, true);
2360 }
2361 
2362 static bool trans_halt(DisasContext *ctx, arg_halt *a)
2363 {
2364     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2365 #ifndef CONFIG_USER_ONLY
2366     nullify_over(ctx);
2367     gen_helper_halt(cpu_env);
2368     ctx->base.is_jmp = DISAS_NORETURN;
2369     return nullify_end(ctx);
2370 #endif
2371 }
2372 
2373 static bool trans_reset(DisasContext *ctx, arg_reset *a)
2374 {
2375     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2376 #ifndef CONFIG_USER_ONLY
2377     nullify_over(ctx);
2378     gen_helper_reset(cpu_env);
2379     ctx->base.is_jmp = DISAS_NORETURN;
2380     return nullify_end(ctx);
2381 #endif
2382 }
2383 
2384 static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a)
2385 {
2386     if (a->m) {
2387         TCGv_reg dest = dest_gpr(ctx, a->b);
2388         TCGv_reg src1 = load_gpr(ctx, a->b);
2389         TCGv_reg src2 = load_gpr(ctx, a->x);
2390 
2391         /* The only thing we need to do is the base register modification.  */
2392         tcg_gen_add_reg(dest, src1, src2);
2393         save_gpr(ctx, a->b, dest);
2394     }
2395     cond_free(&ctx->null_cond);
2396     return true;
2397 }
2398 
2399 static bool trans_probe(DisasContext *ctx, arg_probe *a)
2400 {
2401     TCGv_reg dest, ofs;
2402     TCGv_i32 level, want;
2403     TCGv_tl addr;
2404 
2405     nullify_over(ctx);
2406 
2407     dest = dest_gpr(ctx, a->t);
2408     form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2409 
2410     if (a->imm) {
2411         level = tcg_constant_i32(a->ri);
2412     } else {
2413         level = tcg_temp_new_i32();
2414         tcg_gen_trunc_reg_i32(level, load_gpr(ctx, a->ri));
2415         tcg_gen_andi_i32(level, level, 3);
2416     }
2417     want = tcg_constant_i32(a->write ? PAGE_WRITE : PAGE_READ);
2418 
2419     gen_helper_probe(dest, cpu_env, addr, level, want);
2420 
2421     tcg_temp_free_i32(level);
2422 
2423     save_gpr(ctx, a->t, dest);
2424     return nullify_end(ctx);
2425 }
2426 
2427 static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a)
2428 {
2429     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2430 #ifndef CONFIG_USER_ONLY
2431     TCGv_tl addr;
2432     TCGv_reg ofs, reg;
2433 
2434     nullify_over(ctx);
2435 
2436     form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2437     reg = load_gpr(ctx, a->r);
2438     if (a->addr) {
2439         gen_helper_itlba(cpu_env, addr, reg);
2440     } else {
2441         gen_helper_itlbp(cpu_env, addr, reg);
2442     }
2443 
2444     /* Exit TB for TLB change if mmu is enabled.  */
2445     if (ctx->tb_flags & PSW_C) {
2446         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2447     }
2448     return nullify_end(ctx);
2449 #endif
2450 }
2451 
2452 static bool trans_pxtlbx(DisasContext *ctx, arg_pxtlbx *a)
2453 {
2454     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2455 #ifndef CONFIG_USER_ONLY
2456     TCGv_tl addr;
2457     TCGv_reg ofs;
2458 
2459     nullify_over(ctx);
2460 
2461     form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2462     if (a->m) {
2463         save_gpr(ctx, a->b, ofs);
2464     }
2465     if (a->local) {
2466         gen_helper_ptlbe(cpu_env);
2467     } else {
2468         gen_helper_ptlb(cpu_env, addr);
2469     }
2470 
2471     /* Exit TB for TLB change if mmu is enabled.  */
2472     if (ctx->tb_flags & PSW_C) {
2473         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2474     }
2475     return nullify_end(ctx);
2476 #endif
2477 }
2478 
2479 /*
2480  * Implement the pcxl and pcxl2 Fast TLB Insert instructions.
2481  * See
2482  *     https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf
2483  *     page 13-9 (195/206)
2484  */
2485 static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a)
2486 {
2487     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2488 #ifndef CONFIG_USER_ONLY
2489     TCGv_tl addr, atl, stl;
2490     TCGv_reg reg;
2491 
2492     nullify_over(ctx);
2493 
2494     /*
2495      * FIXME:
2496      *  if (not (pcxl or pcxl2))
2497      *    return gen_illegal(ctx);
2498      *
2499      * Note for future: these are 32-bit systems; no hppa64.
2500      */
2501 
2502     atl = tcg_temp_new_tl();
2503     stl = tcg_temp_new_tl();
2504     addr = tcg_temp_new_tl();
2505 
2506     tcg_gen_ld32u_i64(stl, cpu_env,
2507                       a->data ? offsetof(CPUHPPAState, cr[CR_ISR])
2508                       : offsetof(CPUHPPAState, cr[CR_IIASQ]));
2509     tcg_gen_ld32u_i64(atl, cpu_env,
2510                       a->data ? offsetof(CPUHPPAState, cr[CR_IOR])
2511                       : offsetof(CPUHPPAState, cr[CR_IIAOQ]));
2512     tcg_gen_shli_i64(stl, stl, 32);
2513     tcg_gen_or_tl(addr, atl, stl);
2514     tcg_temp_free_tl(atl);
2515     tcg_temp_free_tl(stl);
2516 
2517     reg = load_gpr(ctx, a->r);
2518     if (a->addr) {
2519         gen_helper_itlba(cpu_env, addr, reg);
2520     } else {
2521         gen_helper_itlbp(cpu_env, addr, reg);
2522     }
2523     tcg_temp_free_tl(addr);
2524 
2525     /* Exit TB for TLB change if mmu is enabled.  */
2526     if (ctx->tb_flags & PSW_C) {
2527         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2528     }
2529     return nullify_end(ctx);
2530 #endif
2531 }
2532 
2533 static bool trans_lpa(DisasContext *ctx, arg_ldst *a)
2534 {
2535     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2536 #ifndef CONFIG_USER_ONLY
2537     TCGv_tl vaddr;
2538     TCGv_reg ofs, paddr;
2539 
2540     nullify_over(ctx);
2541 
2542     form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2543 
2544     paddr = tcg_temp_new();
2545     gen_helper_lpa(paddr, cpu_env, vaddr);
2546 
2547     /* Note that physical address result overrides base modification.  */
2548     if (a->m) {
2549         save_gpr(ctx, a->b, ofs);
2550     }
2551     save_gpr(ctx, a->t, paddr);
2552     tcg_temp_free(paddr);
2553 
2554     return nullify_end(ctx);
2555 #endif
2556 }
2557 
2558 static bool trans_lci(DisasContext *ctx, arg_lci *a)
2559 {
2560     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2561 
2562     /* The Coherence Index is an implementation-defined function of the
2563        physical address.  Two addresses with the same CI have a coherent
2564        view of the cache.  Our implementation is to return 0 for all,
2565        since the entire address space is coherent.  */
2566     save_gpr(ctx, a->t, tcg_constant_reg(0));
2567 
2568     cond_free(&ctx->null_cond);
2569     return true;
2570 }
2571 
2572 static bool trans_add(DisasContext *ctx, arg_rrr_cf_sh *a)
2573 {
2574     return do_add_reg(ctx, a, false, false, false, false);
2575 }
2576 
2577 static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_sh *a)
2578 {
2579     return do_add_reg(ctx, a, true, false, false, false);
2580 }
2581 
2582 static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_sh *a)
2583 {
2584     return do_add_reg(ctx, a, false, true, false, false);
2585 }
2586 
2587 static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_sh *a)
2588 {
2589     return do_add_reg(ctx, a, false, false, false, true);
2590 }
2591 
2592 static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_sh *a)
2593 {
2594     return do_add_reg(ctx, a, false, true, false, true);
2595 }
2596 
2597 static bool trans_sub(DisasContext *ctx, arg_rrr_cf *a)
2598 {
2599     return do_sub_reg(ctx, a, false, false, false);
2600 }
2601 
2602 static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf *a)
2603 {
2604     return do_sub_reg(ctx, a, true, false, false);
2605 }
2606 
2607 static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf *a)
2608 {
2609     return do_sub_reg(ctx, a, false, false, true);
2610 }
2611 
2612 static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf *a)
2613 {
2614     return do_sub_reg(ctx, a, true, false, true);
2615 }
2616 
2617 static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf *a)
2618 {
2619     return do_sub_reg(ctx, a, false, true, false);
2620 }
2621 
2622 static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf *a)
2623 {
2624     return do_sub_reg(ctx, a, true, true, false);
2625 }
2626 
2627 static bool trans_andcm(DisasContext *ctx, arg_rrr_cf *a)
2628 {
2629     return do_log_reg(ctx, a, tcg_gen_andc_reg);
2630 }
2631 
2632 static bool trans_and(DisasContext *ctx, arg_rrr_cf *a)
2633 {
2634     return do_log_reg(ctx, a, tcg_gen_and_reg);
2635 }
2636 
2637 static bool trans_or(DisasContext *ctx, arg_rrr_cf *a)
2638 {
2639     if (a->cf == 0) {
2640         unsigned r2 = a->r2;
2641         unsigned r1 = a->r1;
2642         unsigned rt = a->t;
2643 
2644         if (rt == 0) { /* NOP */
2645             cond_free(&ctx->null_cond);
2646             return true;
2647         }
2648         if (r2 == 0) { /* COPY */
2649             if (r1 == 0) {
2650                 TCGv_reg dest = dest_gpr(ctx, rt);
2651                 tcg_gen_movi_reg(dest, 0);
2652                 save_gpr(ctx, rt, dest);
2653             } else {
2654                 save_gpr(ctx, rt, cpu_gr[r1]);
2655             }
2656             cond_free(&ctx->null_cond);
2657             return true;
2658         }
2659 #ifndef CONFIG_USER_ONLY
2660         /* These are QEMU extensions and are nops in the real architecture:
2661          *
2662          * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2663          * or %r31,%r31,%r31 -- death loop; offline cpu
2664          *                      currently implemented as idle.
2665          */
2666         if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */
2667             /* No need to check for supervisor, as userland can only pause
2668                until the next timer interrupt.  */
2669             nullify_over(ctx);
2670 
2671             /* Advance the instruction queue.  */
2672             copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
2673             copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
2674             nullify_set(ctx, 0);
2675 
2676             /* Tell the qemu main loop to halt until this cpu has work.  */
2677             tcg_gen_st_i32(tcg_constant_i32(1), cpu_env,
2678                            offsetof(CPUState, halted) - offsetof(HPPACPU, env));
2679             gen_excp_1(EXCP_HALTED);
2680             ctx->base.is_jmp = DISAS_NORETURN;
2681 
2682             return nullify_end(ctx);
2683         }
2684 #endif
2685     }
2686     return do_log_reg(ctx, a, tcg_gen_or_reg);
2687 }
2688 
2689 static bool trans_xor(DisasContext *ctx, arg_rrr_cf *a)
2690 {
2691     return do_log_reg(ctx, a, tcg_gen_xor_reg);
2692 }
2693 
2694 static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf *a)
2695 {
2696     TCGv_reg tcg_r1, tcg_r2;
2697 
2698     if (a->cf) {
2699         nullify_over(ctx);
2700     }
2701     tcg_r1 = load_gpr(ctx, a->r1);
2702     tcg_r2 = load_gpr(ctx, a->r2);
2703     do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf);
2704     return nullify_end(ctx);
2705 }
2706 
2707 static bool trans_uxor(DisasContext *ctx, arg_rrr_cf *a)
2708 {
2709     TCGv_reg tcg_r1, tcg_r2;
2710 
2711     if (a->cf) {
2712         nullify_over(ctx);
2713     }
2714     tcg_r1 = load_gpr(ctx, a->r1);
2715     tcg_r2 = load_gpr(ctx, a->r2);
2716     do_unit(ctx, a->t, tcg_r1, tcg_r2, a->cf, false, tcg_gen_xor_reg);
2717     return nullify_end(ctx);
2718 }
2719 
2720 static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf *a, bool is_tc)
2721 {
2722     TCGv_reg tcg_r1, tcg_r2, tmp;
2723 
2724     if (a->cf) {
2725         nullify_over(ctx);
2726     }
2727     tcg_r1 = load_gpr(ctx, a->r1);
2728     tcg_r2 = load_gpr(ctx, a->r2);
2729     tmp = get_temp(ctx);
2730     tcg_gen_not_reg(tmp, tcg_r2);
2731     do_unit(ctx, a->t, tcg_r1, tmp, a->cf, is_tc, tcg_gen_add_reg);
2732     return nullify_end(ctx);
2733 }
2734 
2735 static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf *a)
2736 {
2737     return do_uaddcm(ctx, a, false);
2738 }
2739 
2740 static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf *a)
2741 {
2742     return do_uaddcm(ctx, a, true);
2743 }
2744 
2745 static bool do_dcor(DisasContext *ctx, arg_rr_cf *a, bool is_i)
2746 {
2747     TCGv_reg tmp;
2748 
2749     nullify_over(ctx);
2750 
2751     tmp = get_temp(ctx);
2752     tcg_gen_shri_reg(tmp, cpu_psw_cb, 3);
2753     if (!is_i) {
2754         tcg_gen_not_reg(tmp, tmp);
2755     }
2756     tcg_gen_andi_reg(tmp, tmp, 0x11111111);
2757     tcg_gen_muli_reg(tmp, tmp, 6);
2758     do_unit(ctx, a->t, load_gpr(ctx, a->r), tmp, a->cf, false,
2759             is_i ? tcg_gen_add_reg : tcg_gen_sub_reg);
2760     return nullify_end(ctx);
2761 }
2762 
2763 static bool trans_dcor(DisasContext *ctx, arg_rr_cf *a)
2764 {
2765     return do_dcor(ctx, a, false);
2766 }
2767 
2768 static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf *a)
2769 {
2770     return do_dcor(ctx, a, true);
2771 }
2772 
2773 static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a)
2774 {
2775     TCGv_reg dest, add1, add2, addc, zero, in1, in2;
2776 
2777     nullify_over(ctx);
2778 
2779     in1 = load_gpr(ctx, a->r1);
2780     in2 = load_gpr(ctx, a->r2);
2781 
2782     add1 = tcg_temp_new();
2783     add2 = tcg_temp_new();
2784     addc = tcg_temp_new();
2785     dest = tcg_temp_new();
2786     zero = tcg_constant_reg(0);
2787 
2788     /* Form R1 << 1 | PSW[CB]{8}.  */
2789     tcg_gen_add_reg(add1, in1, in1);
2790     tcg_gen_add_reg(add1, add1, cpu_psw_cb_msb);
2791 
2792     /* Add or subtract R2, depending on PSW[V].  Proper computation of
2793        carry{8} requires that we subtract via + ~R2 + 1, as described in
2794        the manual.  By extracting and masking V, we can produce the
2795        proper inputs to the addition without movcond.  */
2796     tcg_gen_sari_reg(addc, cpu_psw_v, TARGET_REGISTER_BITS - 1);
2797     tcg_gen_xor_reg(add2, in2, addc);
2798     tcg_gen_andi_reg(addc, addc, 1);
2799     /* ??? This is only correct for 32-bit.  */
2800     tcg_gen_add2_i32(dest, cpu_psw_cb_msb, add1, zero, add2, zero);
2801     tcg_gen_add2_i32(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero);
2802 
2803     tcg_temp_free(addc);
2804 
2805     /* Write back the result register.  */
2806     save_gpr(ctx, a->t, dest);
2807 
2808     /* Write back PSW[CB].  */
2809     tcg_gen_xor_reg(cpu_psw_cb, add1, add2);
2810     tcg_gen_xor_reg(cpu_psw_cb, cpu_psw_cb, dest);
2811 
2812     /* Write back PSW[V] for the division step.  */
2813     tcg_gen_neg_reg(cpu_psw_v, cpu_psw_cb_msb);
2814     tcg_gen_xor_reg(cpu_psw_v, cpu_psw_v, in2);
2815 
2816     /* Install the new nullification.  */
2817     if (a->cf) {
2818         TCGv_reg sv = NULL;
2819         if (cond_need_sv(a->cf >> 1)) {
2820             /* ??? The lshift is supposed to contribute to overflow.  */
2821             sv = do_add_sv(ctx, dest, add1, add2);
2822         }
2823         ctx->null_cond = do_cond(a->cf, dest, cpu_psw_cb_msb, sv);
2824     }
2825 
2826     tcg_temp_free(add1);
2827     tcg_temp_free(add2);
2828     tcg_temp_free(dest);
2829 
2830     return nullify_end(ctx);
2831 }
2832 
2833 static bool trans_addi(DisasContext *ctx, arg_rri_cf *a)
2834 {
2835     return do_add_imm(ctx, a, false, false);
2836 }
2837 
2838 static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a)
2839 {
2840     return do_add_imm(ctx, a, true, false);
2841 }
2842 
2843 static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a)
2844 {
2845     return do_add_imm(ctx, a, false, true);
2846 }
2847 
2848 static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a)
2849 {
2850     return do_add_imm(ctx, a, true, true);
2851 }
2852 
2853 static bool trans_subi(DisasContext *ctx, arg_rri_cf *a)
2854 {
2855     return do_sub_imm(ctx, a, false);
2856 }
2857 
2858 static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a)
2859 {
2860     return do_sub_imm(ctx, a, true);
2861 }
2862 
2863 static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf *a)
2864 {
2865     TCGv_reg tcg_im, tcg_r2;
2866 
2867     if (a->cf) {
2868         nullify_over(ctx);
2869     }
2870 
2871     tcg_im = load_const(ctx, a->i);
2872     tcg_r2 = load_gpr(ctx, a->r);
2873     do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf);
2874 
2875     return nullify_end(ctx);
2876 }
2877 
2878 static bool trans_ld(DisasContext *ctx, arg_ldst *a)
2879 {
2880     return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0,
2881                    a->disp, a->sp, a->m, a->size | MO_TE);
2882 }
2883 
2884 static bool trans_st(DisasContext *ctx, arg_ldst *a)
2885 {
2886     assert(a->x == 0 && a->scale == 0);
2887     return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE);
2888 }
2889 
2890 static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
2891 {
2892     MemOp mop = MO_TE | MO_ALIGN | a->size;
2893     TCGv_reg zero, dest, ofs;
2894     TCGv_tl addr;
2895 
2896     nullify_over(ctx);
2897 
2898     if (a->m) {
2899         /* Base register modification.  Make sure if RT == RB,
2900            we see the result of the load.  */
2901         dest = get_temp(ctx);
2902     } else {
2903         dest = dest_gpr(ctx, a->t);
2904     }
2905 
2906     form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? a->size : 0,
2907              a->disp, a->sp, a->m, ctx->mmu_idx == MMU_PHYS_IDX);
2908 
2909     /*
2910      * For hppa1.1, LDCW is undefined unless aligned mod 16.
2911      * However actual hardware succeeds with aligned mod 4.
2912      * Detect this case and log a GUEST_ERROR.
2913      *
2914      * TODO: HPPA64 relaxes the over-alignment requirement
2915      * with the ,co completer.
2916      */
2917     gen_helper_ldc_check(addr);
2918 
2919     zero = tcg_constant_reg(0);
2920     tcg_gen_atomic_xchg_reg(dest, addr, zero, ctx->mmu_idx, mop);
2921 
2922     if (a->m) {
2923         save_gpr(ctx, a->b, ofs);
2924     }
2925     save_gpr(ctx, a->t, dest);
2926 
2927     return nullify_end(ctx);
2928 }
2929 
2930 static bool trans_stby(DisasContext *ctx, arg_stby *a)
2931 {
2932     TCGv_reg ofs, val;
2933     TCGv_tl addr;
2934 
2935     nullify_over(ctx);
2936 
2937     form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
2938              ctx->mmu_idx == MMU_PHYS_IDX);
2939     val = load_gpr(ctx, a->r);
2940     if (a->a) {
2941         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2942             gen_helper_stby_e_parallel(cpu_env, addr, val);
2943         } else {
2944             gen_helper_stby_e(cpu_env, addr, val);
2945         }
2946     } else {
2947         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2948             gen_helper_stby_b_parallel(cpu_env, addr, val);
2949         } else {
2950             gen_helper_stby_b(cpu_env, addr, val);
2951         }
2952     }
2953     if (a->m) {
2954         tcg_gen_andi_reg(ofs, ofs, ~3);
2955         save_gpr(ctx, a->b, ofs);
2956     }
2957 
2958     return nullify_end(ctx);
2959 }
2960 
2961 static bool trans_lda(DisasContext *ctx, arg_ldst *a)
2962 {
2963     int hold_mmu_idx = ctx->mmu_idx;
2964 
2965     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2966     ctx->mmu_idx = MMU_PHYS_IDX;
2967     trans_ld(ctx, a);
2968     ctx->mmu_idx = hold_mmu_idx;
2969     return true;
2970 }
2971 
2972 static bool trans_sta(DisasContext *ctx, arg_ldst *a)
2973 {
2974     int hold_mmu_idx = ctx->mmu_idx;
2975 
2976     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2977     ctx->mmu_idx = MMU_PHYS_IDX;
2978     trans_st(ctx, a);
2979     ctx->mmu_idx = hold_mmu_idx;
2980     return true;
2981 }
2982 
2983 static bool trans_ldil(DisasContext *ctx, arg_ldil *a)
2984 {
2985     TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
2986 
2987     tcg_gen_movi_reg(tcg_rt, a->i);
2988     save_gpr(ctx, a->t, tcg_rt);
2989     cond_free(&ctx->null_cond);
2990     return true;
2991 }
2992 
2993 static bool trans_addil(DisasContext *ctx, arg_addil *a)
2994 {
2995     TCGv_reg tcg_rt = load_gpr(ctx, a->r);
2996     TCGv_reg tcg_r1 = dest_gpr(ctx, 1);
2997 
2998     tcg_gen_addi_reg(tcg_r1, tcg_rt, a->i);
2999     save_gpr(ctx, 1, tcg_r1);
3000     cond_free(&ctx->null_cond);
3001     return true;
3002 }
3003 
3004 static bool trans_ldo(DisasContext *ctx, arg_ldo *a)
3005 {
3006     TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
3007 
3008     /* Special case rb == 0, for the LDI pseudo-op.
3009        The COPY pseudo-op is handled for free within tcg_gen_addi_tl.  */
3010     if (a->b == 0) {
3011         tcg_gen_movi_reg(tcg_rt, a->i);
3012     } else {
3013         tcg_gen_addi_reg(tcg_rt, cpu_gr[a->b], a->i);
3014     }
3015     save_gpr(ctx, a->t, tcg_rt);
3016     cond_free(&ctx->null_cond);
3017     return true;
3018 }
3019 
3020 static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_reg in1,
3021                     unsigned c, unsigned f, unsigned n, int disp)
3022 {
3023     TCGv_reg dest, in2, sv;
3024     DisasCond cond;
3025 
3026     in2 = load_gpr(ctx, r);
3027     dest = get_temp(ctx);
3028 
3029     tcg_gen_sub_reg(dest, in1, in2);
3030 
3031     sv = NULL;
3032     if (cond_need_sv(c)) {
3033         sv = do_sub_sv(ctx, dest, in1, in2);
3034     }
3035 
3036     cond = do_sub_cond(c * 2 + f, dest, in1, in2, sv);
3037     return do_cbranch(ctx, disp, n, &cond);
3038 }
3039 
3040 static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a)
3041 {
3042     nullify_over(ctx);
3043     return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3044 }
3045 
3046 static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a)
3047 {
3048     nullify_over(ctx);
3049     return do_cmpb(ctx, a->r, load_const(ctx, a->i), a->c, a->f, a->n, a->disp);
3050 }
3051 
3052 static bool do_addb(DisasContext *ctx, unsigned r, TCGv_reg in1,
3053                     unsigned c, unsigned f, unsigned n, int disp)
3054 {
3055     TCGv_reg dest, in2, sv, cb_msb;
3056     DisasCond cond;
3057 
3058     in2 = load_gpr(ctx, r);
3059     dest = tcg_temp_new();
3060     sv = NULL;
3061     cb_msb = NULL;
3062 
3063     if (cond_need_cb(c)) {
3064         cb_msb = get_temp(ctx);
3065         tcg_gen_movi_reg(cb_msb, 0);
3066         tcg_gen_add2_reg(dest, cb_msb, in1, cb_msb, in2, cb_msb);
3067     } else {
3068         tcg_gen_add_reg(dest, in1, in2);
3069     }
3070     if (cond_need_sv(c)) {
3071         sv = do_add_sv(ctx, dest, in1, in2);
3072     }
3073 
3074     cond = do_cond(c * 2 + f, dest, cb_msb, sv);
3075     save_gpr(ctx, r, dest);
3076     tcg_temp_free(dest);
3077     return do_cbranch(ctx, disp, n, &cond);
3078 }
3079 
3080 static bool trans_addb(DisasContext *ctx, arg_addb *a)
3081 {
3082     nullify_over(ctx);
3083     return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3084 }
3085 
3086 static bool trans_addbi(DisasContext *ctx, arg_addbi *a)
3087 {
3088     nullify_over(ctx);
3089     return do_addb(ctx, a->r, load_const(ctx, a->i), a->c, a->f, a->n, a->disp);
3090 }
3091 
3092 static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a)
3093 {
3094     TCGv_reg tmp, tcg_r;
3095     DisasCond cond;
3096 
3097     nullify_over(ctx);
3098 
3099     tmp = tcg_temp_new();
3100     tcg_r = load_gpr(ctx, a->r);
3101     tcg_gen_shl_reg(tmp, tcg_r, cpu_sar);
3102 
3103     cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3104     tcg_temp_free(tmp);
3105     return do_cbranch(ctx, a->disp, a->n, &cond);
3106 }
3107 
3108 static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a)
3109 {
3110     TCGv_reg tmp, tcg_r;
3111     DisasCond cond;
3112 
3113     nullify_over(ctx);
3114 
3115     tmp = tcg_temp_new();
3116     tcg_r = load_gpr(ctx, a->r);
3117     tcg_gen_shli_reg(tmp, tcg_r, a->p);
3118 
3119     cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3120     tcg_temp_free(tmp);
3121     return do_cbranch(ctx, a->disp, a->n, &cond);
3122 }
3123 
3124 static bool trans_movb(DisasContext *ctx, arg_movb *a)
3125 {
3126     TCGv_reg dest;
3127     DisasCond cond;
3128 
3129     nullify_over(ctx);
3130 
3131     dest = dest_gpr(ctx, a->r2);
3132     if (a->r1 == 0) {
3133         tcg_gen_movi_reg(dest, 0);
3134     } else {
3135         tcg_gen_mov_reg(dest, cpu_gr[a->r1]);
3136     }
3137 
3138     cond = do_sed_cond(a->c, dest);
3139     return do_cbranch(ctx, a->disp, a->n, &cond);
3140 }
3141 
3142 static bool trans_movbi(DisasContext *ctx, arg_movbi *a)
3143 {
3144     TCGv_reg dest;
3145     DisasCond cond;
3146 
3147     nullify_over(ctx);
3148 
3149     dest = dest_gpr(ctx, a->r);
3150     tcg_gen_movi_reg(dest, a->i);
3151 
3152     cond = do_sed_cond(a->c, dest);
3153     return do_cbranch(ctx, a->disp, a->n, &cond);
3154 }
3155 
3156 static bool trans_shrpw_sar(DisasContext *ctx, arg_shrpw_sar *a)
3157 {
3158     TCGv_reg dest;
3159 
3160     if (a->c) {
3161         nullify_over(ctx);
3162     }
3163 
3164     dest = dest_gpr(ctx, a->t);
3165     if (a->r1 == 0) {
3166         tcg_gen_ext32u_reg(dest, load_gpr(ctx, a->r2));
3167         tcg_gen_shr_reg(dest, dest, cpu_sar);
3168     } else if (a->r1 == a->r2) {
3169         TCGv_i32 t32 = tcg_temp_new_i32();
3170         tcg_gen_trunc_reg_i32(t32, load_gpr(ctx, a->r2));
3171         tcg_gen_rotr_i32(t32, t32, cpu_sar);
3172         tcg_gen_extu_i32_reg(dest, t32);
3173         tcg_temp_free_i32(t32);
3174     } else {
3175         TCGv_i64 t = tcg_temp_new_i64();
3176         TCGv_i64 s = tcg_temp_new_i64();
3177 
3178         tcg_gen_concat_reg_i64(t, load_gpr(ctx, a->r2), load_gpr(ctx, a->r1));
3179         tcg_gen_extu_reg_i64(s, cpu_sar);
3180         tcg_gen_shr_i64(t, t, s);
3181         tcg_gen_trunc_i64_reg(dest, t);
3182 
3183         tcg_temp_free_i64(t);
3184         tcg_temp_free_i64(s);
3185     }
3186     save_gpr(ctx, a->t, dest);
3187 
3188     /* Install the new nullification.  */
3189     cond_free(&ctx->null_cond);
3190     if (a->c) {
3191         ctx->null_cond = do_sed_cond(a->c, dest);
3192     }
3193     return nullify_end(ctx);
3194 }
3195 
3196 static bool trans_shrpw_imm(DisasContext *ctx, arg_shrpw_imm *a)
3197 {
3198     unsigned sa = 31 - a->cpos;
3199     TCGv_reg dest, t2;
3200 
3201     if (a->c) {
3202         nullify_over(ctx);
3203     }
3204 
3205     dest = dest_gpr(ctx, a->t);
3206     t2 = load_gpr(ctx, a->r2);
3207     if (a->r1 == a->r2) {
3208         TCGv_i32 t32 = tcg_temp_new_i32();
3209         tcg_gen_trunc_reg_i32(t32, t2);
3210         tcg_gen_rotri_i32(t32, t32, sa);
3211         tcg_gen_extu_i32_reg(dest, t32);
3212         tcg_temp_free_i32(t32);
3213     } else if (a->r1 == 0) {
3214         tcg_gen_extract_reg(dest, t2, sa, 32 - sa);
3215     } else {
3216         TCGv_reg t0 = tcg_temp_new();
3217         tcg_gen_extract_reg(t0, t2, sa, 32 - sa);
3218         tcg_gen_deposit_reg(dest, t0, cpu_gr[a->r1], 32 - sa, sa);
3219         tcg_temp_free(t0);
3220     }
3221     save_gpr(ctx, a->t, dest);
3222 
3223     /* Install the new nullification.  */
3224     cond_free(&ctx->null_cond);
3225     if (a->c) {
3226         ctx->null_cond = do_sed_cond(a->c, dest);
3227     }
3228     return nullify_end(ctx);
3229 }
3230 
3231 static bool trans_extrw_sar(DisasContext *ctx, arg_extrw_sar *a)
3232 {
3233     unsigned len = 32 - a->clen;
3234     TCGv_reg dest, src, tmp;
3235 
3236     if (a->c) {
3237         nullify_over(ctx);
3238     }
3239 
3240     dest = dest_gpr(ctx, a->t);
3241     src = load_gpr(ctx, a->r);
3242     tmp = tcg_temp_new();
3243 
3244     /* Recall that SAR is using big-endian bit numbering.  */
3245     tcg_gen_xori_reg(tmp, cpu_sar, TARGET_REGISTER_BITS - 1);
3246     if (a->se) {
3247         tcg_gen_sar_reg(dest, src, tmp);
3248         tcg_gen_sextract_reg(dest, dest, 0, len);
3249     } else {
3250         tcg_gen_shr_reg(dest, src, tmp);
3251         tcg_gen_extract_reg(dest, dest, 0, len);
3252     }
3253     tcg_temp_free(tmp);
3254     save_gpr(ctx, a->t, dest);
3255 
3256     /* Install the new nullification.  */
3257     cond_free(&ctx->null_cond);
3258     if (a->c) {
3259         ctx->null_cond = do_sed_cond(a->c, dest);
3260     }
3261     return nullify_end(ctx);
3262 }
3263 
3264 static bool trans_extrw_imm(DisasContext *ctx, arg_extrw_imm *a)
3265 {
3266     unsigned len = 32 - a->clen;
3267     unsigned cpos = 31 - a->pos;
3268     TCGv_reg dest, src;
3269 
3270     if (a->c) {
3271         nullify_over(ctx);
3272     }
3273 
3274     dest = dest_gpr(ctx, a->t);
3275     src = load_gpr(ctx, a->r);
3276     if (a->se) {
3277         tcg_gen_sextract_reg(dest, src, cpos, len);
3278     } else {
3279         tcg_gen_extract_reg(dest, src, cpos, len);
3280     }
3281     save_gpr(ctx, a->t, dest);
3282 
3283     /* Install the new nullification.  */
3284     cond_free(&ctx->null_cond);
3285     if (a->c) {
3286         ctx->null_cond = do_sed_cond(a->c, dest);
3287     }
3288     return nullify_end(ctx);
3289 }
3290 
3291 static bool trans_depwi_imm(DisasContext *ctx, arg_depwi_imm *a)
3292 {
3293     unsigned len = 32 - a->clen;
3294     target_sreg mask0, mask1;
3295     TCGv_reg dest;
3296 
3297     if (a->c) {
3298         nullify_over(ctx);
3299     }
3300     if (a->cpos + len > 32) {
3301         len = 32 - a->cpos;
3302     }
3303 
3304     dest = dest_gpr(ctx, a->t);
3305     mask0 = deposit64(0, a->cpos, len, a->i);
3306     mask1 = deposit64(-1, a->cpos, len, a->i);
3307 
3308     if (a->nz) {
3309         TCGv_reg src = load_gpr(ctx, a->t);
3310         if (mask1 != -1) {
3311             tcg_gen_andi_reg(dest, src, mask1);
3312             src = dest;
3313         }
3314         tcg_gen_ori_reg(dest, src, mask0);
3315     } else {
3316         tcg_gen_movi_reg(dest, mask0);
3317     }
3318     save_gpr(ctx, a->t, dest);
3319 
3320     /* Install the new nullification.  */
3321     cond_free(&ctx->null_cond);
3322     if (a->c) {
3323         ctx->null_cond = do_sed_cond(a->c, dest);
3324     }
3325     return nullify_end(ctx);
3326 }
3327 
3328 static bool trans_depw_imm(DisasContext *ctx, arg_depw_imm *a)
3329 {
3330     unsigned rs = a->nz ? a->t : 0;
3331     unsigned len = 32 - a->clen;
3332     TCGv_reg dest, val;
3333 
3334     if (a->c) {
3335         nullify_over(ctx);
3336     }
3337     if (a->cpos + len > 32) {
3338         len = 32 - a->cpos;
3339     }
3340 
3341     dest = dest_gpr(ctx, a->t);
3342     val = load_gpr(ctx, a->r);
3343     if (rs == 0) {
3344         tcg_gen_deposit_z_reg(dest, val, a->cpos, len);
3345     } else {
3346         tcg_gen_deposit_reg(dest, cpu_gr[rs], val, a->cpos, len);
3347     }
3348     save_gpr(ctx, a->t, dest);
3349 
3350     /* Install the new nullification.  */
3351     cond_free(&ctx->null_cond);
3352     if (a->c) {
3353         ctx->null_cond = do_sed_cond(a->c, dest);
3354     }
3355     return nullify_end(ctx);
3356 }
3357 
3358 static bool do_depw_sar(DisasContext *ctx, unsigned rt, unsigned c,
3359                         unsigned nz, unsigned clen, TCGv_reg val)
3360 {
3361     unsigned rs = nz ? rt : 0;
3362     unsigned len = 32 - clen;
3363     TCGv_reg mask, tmp, shift, dest;
3364     unsigned msb = 1U << (len - 1);
3365 
3366     dest = dest_gpr(ctx, rt);
3367     shift = tcg_temp_new();
3368     tmp = tcg_temp_new();
3369 
3370     /* Convert big-endian bit numbering in SAR to left-shift.  */
3371     tcg_gen_xori_reg(shift, cpu_sar, TARGET_REGISTER_BITS - 1);
3372 
3373     mask = tcg_const_reg(msb + (msb - 1));
3374     tcg_gen_and_reg(tmp, val, mask);
3375     if (rs) {
3376         tcg_gen_shl_reg(mask, mask, shift);
3377         tcg_gen_shl_reg(tmp, tmp, shift);
3378         tcg_gen_andc_reg(dest, cpu_gr[rs], mask);
3379         tcg_gen_or_reg(dest, dest, tmp);
3380     } else {
3381         tcg_gen_shl_reg(dest, tmp, shift);
3382     }
3383     tcg_temp_free(shift);
3384     tcg_temp_free(mask);
3385     tcg_temp_free(tmp);
3386     save_gpr(ctx, rt, dest);
3387 
3388     /* Install the new nullification.  */
3389     cond_free(&ctx->null_cond);
3390     if (c) {
3391         ctx->null_cond = do_sed_cond(c, dest);
3392     }
3393     return nullify_end(ctx);
3394 }
3395 
3396 static bool trans_depw_sar(DisasContext *ctx, arg_depw_sar *a)
3397 {
3398     if (a->c) {
3399         nullify_over(ctx);
3400     }
3401     return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_gpr(ctx, a->r));
3402 }
3403 
3404 static bool trans_depwi_sar(DisasContext *ctx, arg_depwi_sar *a)
3405 {
3406     if (a->c) {
3407         nullify_over(ctx);
3408     }
3409     return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_const(ctx, a->i));
3410 }
3411 
3412 static bool trans_be(DisasContext *ctx, arg_be *a)
3413 {
3414     TCGv_reg tmp;
3415 
3416 #ifdef CONFIG_USER_ONLY
3417     /* ??? It seems like there should be a good way of using
3418        "be disp(sr2, r0)", the canonical gateway entry mechanism
3419        to our advantage.  But that appears to be inconvenient to
3420        manage along side branch delay slots.  Therefore we handle
3421        entry into the gateway page via absolute address.  */
3422     /* Since we don't implement spaces, just branch.  Do notice the special
3423        case of "be disp(*,r0)" using a direct branch to disp, so that we can
3424        goto_tb to the TB containing the syscall.  */
3425     if (a->b == 0) {
3426         return do_dbranch(ctx, a->disp, a->l, a->n);
3427     }
3428 #else
3429     nullify_over(ctx);
3430 #endif
3431 
3432     tmp = get_temp(ctx);
3433     tcg_gen_addi_reg(tmp, load_gpr(ctx, a->b), a->disp);
3434     tmp = do_ibranch_priv(ctx, tmp);
3435 
3436 #ifdef CONFIG_USER_ONLY
3437     return do_ibranch(ctx, tmp, a->l, a->n);
3438 #else
3439     TCGv_i64 new_spc = tcg_temp_new_i64();
3440 
3441     load_spr(ctx, new_spc, a->sp);
3442     if (a->l) {
3443         copy_iaoq_entry(cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
3444         tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f);
3445     }
3446     if (a->n && use_nullify_skip(ctx)) {
3447         tcg_gen_mov_reg(cpu_iaoq_f, tmp);
3448         tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
3449         tcg_gen_mov_i64(cpu_iasq_f, new_spc);
3450         tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f);
3451     } else {
3452         copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3453         if (ctx->iaoq_b == -1) {
3454             tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3455         }
3456         tcg_gen_mov_reg(cpu_iaoq_b, tmp);
3457         tcg_gen_mov_i64(cpu_iasq_b, new_spc);
3458         nullify_set(ctx, a->n);
3459     }
3460     tcg_temp_free_i64(new_spc);
3461     tcg_gen_lookup_and_goto_ptr();
3462     ctx->base.is_jmp = DISAS_NORETURN;
3463     return nullify_end(ctx);
3464 #endif
3465 }
3466 
3467 static bool trans_bl(DisasContext *ctx, arg_bl *a)
3468 {
3469     return do_dbranch(ctx, iaoq_dest(ctx, a->disp), a->l, a->n);
3470 }
3471 
3472 static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a)
3473 {
3474     target_ureg dest = iaoq_dest(ctx, a->disp);
3475 
3476     nullify_over(ctx);
3477 
3478     /* Make sure the caller hasn't done something weird with the queue.
3479      * ??? This is not quite the same as the PSW[B] bit, which would be
3480      * expensive to track.  Real hardware will trap for
3481      *    b  gateway
3482      *    b  gateway+4  (in delay slot of first branch)
3483      * However, checking for a non-sequential instruction queue *will*
3484      * diagnose the security hole
3485      *    b  gateway
3486      *    b  evil
3487      * in which instructions at evil would run with increased privs.
3488      */
3489     if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) {
3490         return gen_illegal(ctx);
3491     }
3492 
3493 #ifndef CONFIG_USER_ONLY
3494     if (ctx->tb_flags & PSW_C) {
3495         CPUHPPAState *env = ctx->cs->env_ptr;
3496         int type = hppa_artype_for_page(env, ctx->base.pc_next);
3497         /* If we could not find a TLB entry, then we need to generate an
3498            ITLB miss exception so the kernel will provide it.
3499            The resulting TLB fill operation will invalidate this TB and
3500            we will re-translate, at which point we *will* be able to find
3501            the TLB entry and determine if this is in fact a gateway page.  */
3502         if (type < 0) {
3503             gen_excp(ctx, EXCP_ITLB_MISS);
3504             return true;
3505         }
3506         /* No change for non-gateway pages or for priv decrease.  */
3507         if (type >= 4 && type - 4 < ctx->privilege) {
3508             dest = deposit32(dest, 0, 2, type - 4);
3509         }
3510     } else {
3511         dest &= -4;  /* priv = 0 */
3512     }
3513 #endif
3514 
3515     if (a->l) {
3516         TCGv_reg tmp = dest_gpr(ctx, a->l);
3517         if (ctx->privilege < 3) {
3518             tcg_gen_andi_reg(tmp, tmp, -4);
3519         }
3520         tcg_gen_ori_reg(tmp, tmp, ctx->privilege);
3521         save_gpr(ctx, a->l, tmp);
3522     }
3523 
3524     return do_dbranch(ctx, dest, 0, a->n);
3525 }
3526 
3527 static bool trans_blr(DisasContext *ctx, arg_blr *a)
3528 {
3529     if (a->x) {
3530         TCGv_reg tmp = get_temp(ctx);
3531         tcg_gen_shli_reg(tmp, load_gpr(ctx, a->x), 3);
3532         tcg_gen_addi_reg(tmp, tmp, ctx->iaoq_f + 8);
3533         /* The computation here never changes privilege level.  */
3534         return do_ibranch(ctx, tmp, a->l, a->n);
3535     } else {
3536         /* BLR R0,RX is a good way to load PC+8 into RX.  */
3537         return do_dbranch(ctx, ctx->iaoq_f + 8, a->l, a->n);
3538     }
3539 }
3540 
3541 static bool trans_bv(DisasContext *ctx, arg_bv *a)
3542 {
3543     TCGv_reg dest;
3544 
3545     if (a->x == 0) {
3546         dest = load_gpr(ctx, a->b);
3547     } else {
3548         dest = get_temp(ctx);
3549         tcg_gen_shli_reg(dest, load_gpr(ctx, a->x), 3);
3550         tcg_gen_add_reg(dest, dest, load_gpr(ctx, a->b));
3551     }
3552     dest = do_ibranch_priv(ctx, dest);
3553     return do_ibranch(ctx, dest, 0, a->n);
3554 }
3555 
3556 static bool trans_bve(DisasContext *ctx, arg_bve *a)
3557 {
3558     TCGv_reg dest;
3559 
3560 #ifdef CONFIG_USER_ONLY
3561     dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3562     return do_ibranch(ctx, dest, a->l, a->n);
3563 #else
3564     nullify_over(ctx);
3565     dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3566 
3567     copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3568     if (ctx->iaoq_b == -1) {
3569         tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3570     }
3571     copy_iaoq_entry(cpu_iaoq_b, -1, dest);
3572     tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest));
3573     if (a->l) {
3574         copy_iaoq_entry(cpu_gr[a->l], ctx->iaoq_n, ctx->iaoq_n_var);
3575     }
3576     nullify_set(ctx, a->n);
3577     tcg_gen_lookup_and_goto_ptr();
3578     ctx->base.is_jmp = DISAS_NORETURN;
3579     return nullify_end(ctx);
3580 #endif
3581 }
3582 
3583 /*
3584  * Float class 0
3585  */
3586 
3587 static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3588 {
3589     tcg_gen_mov_i32(dst, src);
3590 }
3591 
3592 static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a)
3593 {
3594     return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f);
3595 }
3596 
3597 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3598 {
3599     tcg_gen_mov_i64(dst, src);
3600 }
3601 
3602 static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a)
3603 {
3604     return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d);
3605 }
3606 
3607 static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3608 {
3609     tcg_gen_andi_i32(dst, src, INT32_MAX);
3610 }
3611 
3612 static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a)
3613 {
3614     return do_fop_wew(ctx, a->t, a->r, gen_fabs_f);
3615 }
3616 
3617 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3618 {
3619     tcg_gen_andi_i64(dst, src, INT64_MAX);
3620 }
3621 
3622 static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a)
3623 {
3624     return do_fop_ded(ctx, a->t, a->r, gen_fabs_d);
3625 }
3626 
3627 static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a)
3628 {
3629     return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s);
3630 }
3631 
3632 static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a)
3633 {
3634     return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d);
3635 }
3636 
3637 static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a)
3638 {
3639     return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s);
3640 }
3641 
3642 static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a)
3643 {
3644     return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d);
3645 }
3646 
3647 static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3648 {
3649     tcg_gen_xori_i32(dst, src, INT32_MIN);
3650 }
3651 
3652 static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a)
3653 {
3654     return do_fop_wew(ctx, a->t, a->r, gen_fneg_f);
3655 }
3656 
3657 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3658 {
3659     tcg_gen_xori_i64(dst, src, INT64_MIN);
3660 }
3661 
3662 static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a)
3663 {
3664     return do_fop_ded(ctx, a->t, a->r, gen_fneg_d);
3665 }
3666 
3667 static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3668 {
3669     tcg_gen_ori_i32(dst, src, INT32_MIN);
3670 }
3671 
3672 static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a)
3673 {
3674     return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f);
3675 }
3676 
3677 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3678 {
3679     tcg_gen_ori_i64(dst, src, INT64_MIN);
3680 }
3681 
3682 static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a)
3683 {
3684     return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d);
3685 }
3686 
3687 /*
3688  * Float class 1
3689  */
3690 
3691 static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a)
3692 {
3693     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s);
3694 }
3695 
3696 static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a)
3697 {
3698     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d);
3699 }
3700 
3701 static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a)
3702 {
3703     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s);
3704 }
3705 
3706 static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a)
3707 {
3708     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s);
3709 }
3710 
3711 static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a)
3712 {
3713     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d);
3714 }
3715 
3716 static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a)
3717 {
3718     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d);
3719 }
3720 
3721 static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a)
3722 {
3723     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w);
3724 }
3725 
3726 static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a)
3727 {
3728     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w);
3729 }
3730 
3731 static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a)
3732 {
3733     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw);
3734 }
3735 
3736 static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a)
3737 {
3738     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw);
3739 }
3740 
3741 static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a)
3742 {
3743     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w);
3744 }
3745 
3746 static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a)
3747 {
3748     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w);
3749 }
3750 
3751 static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a)
3752 {
3753     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw);
3754 }
3755 
3756 static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a)
3757 {
3758     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw);
3759 }
3760 
3761 static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a)
3762 {
3763     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s);
3764 }
3765 
3766 static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a)
3767 {
3768     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s);
3769 }
3770 
3771 static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a)
3772 {
3773     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d);
3774 }
3775 
3776 static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a)
3777 {
3778     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d);
3779 }
3780 
3781 static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a)
3782 {
3783     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw);
3784 }
3785 
3786 static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a)
3787 {
3788     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw);
3789 }
3790 
3791 static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a)
3792 {
3793     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw);
3794 }
3795 
3796 static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a)
3797 {
3798     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw);
3799 }
3800 
3801 static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a)
3802 {
3803     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw);
3804 }
3805 
3806 static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a)
3807 {
3808     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw);
3809 }
3810 
3811 static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a)
3812 {
3813     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw);
3814 }
3815 
3816 static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a)
3817 {
3818     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw);
3819 }
3820 
3821 /*
3822  * Float class 2
3823  */
3824 
3825 static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a)
3826 {
3827     TCGv_i32 ta, tb, tc, ty;
3828 
3829     nullify_over(ctx);
3830 
3831     ta = load_frw0_i32(a->r1);
3832     tb = load_frw0_i32(a->r2);
3833     ty = tcg_constant_i32(a->y);
3834     tc = tcg_constant_i32(a->c);
3835 
3836     gen_helper_fcmp_s(cpu_env, ta, tb, ty, tc);
3837 
3838     tcg_temp_free_i32(ta);
3839     tcg_temp_free_i32(tb);
3840 
3841     return nullify_end(ctx);
3842 }
3843 
3844 static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a)
3845 {
3846     TCGv_i64 ta, tb;
3847     TCGv_i32 tc, ty;
3848 
3849     nullify_over(ctx);
3850 
3851     ta = load_frd0(a->r1);
3852     tb = load_frd0(a->r2);
3853     ty = tcg_constant_i32(a->y);
3854     tc = tcg_constant_i32(a->c);
3855 
3856     gen_helper_fcmp_d(cpu_env, ta, tb, ty, tc);
3857 
3858     tcg_temp_free_i64(ta);
3859     tcg_temp_free_i64(tb);
3860 
3861     return nullify_end(ctx);
3862 }
3863 
3864 static bool trans_ftest(DisasContext *ctx, arg_ftest *a)
3865 {
3866     TCGv_reg t;
3867 
3868     nullify_over(ctx);
3869 
3870     t = get_temp(ctx);
3871     tcg_gen_ld32u_reg(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
3872 
3873     if (a->y == 1) {
3874         int mask;
3875         bool inv = false;
3876 
3877         switch (a->c) {
3878         case 0: /* simple */
3879             tcg_gen_andi_reg(t, t, 0x4000000);
3880             ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3881             goto done;
3882         case 2: /* rej */
3883             inv = true;
3884             /* fallthru */
3885         case 1: /* acc */
3886             mask = 0x43ff800;
3887             break;
3888         case 6: /* rej8 */
3889             inv = true;
3890             /* fallthru */
3891         case 5: /* acc8 */
3892             mask = 0x43f8000;
3893             break;
3894         case 9: /* acc6 */
3895             mask = 0x43e0000;
3896             break;
3897         case 13: /* acc4 */
3898             mask = 0x4380000;
3899             break;
3900         case 17: /* acc2 */
3901             mask = 0x4200000;
3902             break;
3903         default:
3904             gen_illegal(ctx);
3905             return true;
3906         }
3907         if (inv) {
3908             TCGv_reg c = load_const(ctx, mask);
3909             tcg_gen_or_reg(t, t, c);
3910             ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
3911         } else {
3912             tcg_gen_andi_reg(t, t, mask);
3913             ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
3914         }
3915     } else {
3916         unsigned cbit = (a->y ^ 1) - 1;
3917 
3918         tcg_gen_extract_reg(t, t, 21 - cbit, 1);
3919         ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3920         tcg_temp_free(t);
3921     }
3922 
3923  done:
3924     return nullify_end(ctx);
3925 }
3926 
3927 /*
3928  * Float class 2
3929  */
3930 
3931 static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a)
3932 {
3933     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s);
3934 }
3935 
3936 static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a)
3937 {
3938     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d);
3939 }
3940 
3941 static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a)
3942 {
3943     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s);
3944 }
3945 
3946 static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a)
3947 {
3948     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d);
3949 }
3950 
3951 static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a)
3952 {
3953     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s);
3954 }
3955 
3956 static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a)
3957 {
3958     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d);
3959 }
3960 
3961 static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a)
3962 {
3963     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s);
3964 }
3965 
3966 static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a)
3967 {
3968     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d);
3969 }
3970 
3971 static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a)
3972 {
3973     TCGv_i64 x, y;
3974 
3975     nullify_over(ctx);
3976 
3977     x = load_frw0_i64(a->r1);
3978     y = load_frw0_i64(a->r2);
3979     tcg_gen_mul_i64(x, x, y);
3980     save_frd(a->t, x);
3981     tcg_temp_free_i64(x);
3982     tcg_temp_free_i64(y);
3983 
3984     return nullify_end(ctx);
3985 }
3986 
3987 /* Convert the fmpyadd single-precision register encodings to standard.  */
3988 static inline int fmpyadd_s_reg(unsigned r)
3989 {
3990     return (r & 16) * 2 + 16 + (r & 15);
3991 }
3992 
3993 static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
3994 {
3995     int tm = fmpyadd_s_reg(a->tm);
3996     int ra = fmpyadd_s_reg(a->ra);
3997     int ta = fmpyadd_s_reg(a->ta);
3998     int rm2 = fmpyadd_s_reg(a->rm2);
3999     int rm1 = fmpyadd_s_reg(a->rm1);
4000 
4001     nullify_over(ctx);
4002 
4003     do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
4004     do_fop_weww(ctx, ta, ta, ra,
4005                 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
4006 
4007     return nullify_end(ctx);
4008 }
4009 
4010 static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a)
4011 {
4012     return do_fmpyadd_s(ctx, a, false);
4013 }
4014 
4015 static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a)
4016 {
4017     return do_fmpyadd_s(ctx, a, true);
4018 }
4019 
4020 static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4021 {
4022     nullify_over(ctx);
4023 
4024     do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d);
4025     do_fop_dedd(ctx, a->ta, a->ta, a->ra,
4026                 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
4027 
4028     return nullify_end(ctx);
4029 }
4030 
4031 static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a)
4032 {
4033     return do_fmpyadd_d(ctx, a, false);
4034 }
4035 
4036 static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a)
4037 {
4038     return do_fmpyadd_d(ctx, a, true);
4039 }
4040 
4041 static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a)
4042 {
4043     TCGv_i32 x, y, z;
4044 
4045     nullify_over(ctx);
4046     x = load_frw0_i32(a->rm1);
4047     y = load_frw0_i32(a->rm2);
4048     z = load_frw0_i32(a->ra3);
4049 
4050     if (a->neg) {
4051         gen_helper_fmpynfadd_s(x, cpu_env, x, y, z);
4052     } else {
4053         gen_helper_fmpyfadd_s(x, cpu_env, x, y, z);
4054     }
4055 
4056     tcg_temp_free_i32(y);
4057     tcg_temp_free_i32(z);
4058     save_frw_i32(a->t, x);
4059     tcg_temp_free_i32(x);
4060     return nullify_end(ctx);
4061 }
4062 
4063 static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a)
4064 {
4065     TCGv_i64 x, y, z;
4066 
4067     nullify_over(ctx);
4068     x = load_frd0(a->rm1);
4069     y = load_frd0(a->rm2);
4070     z = load_frd0(a->ra3);
4071 
4072     if (a->neg) {
4073         gen_helper_fmpynfadd_d(x, cpu_env, x, y, z);
4074     } else {
4075         gen_helper_fmpyfadd_d(x, cpu_env, x, y, z);
4076     }
4077 
4078     tcg_temp_free_i64(y);
4079     tcg_temp_free_i64(z);
4080     save_frd(a->t, x);
4081     tcg_temp_free_i64(x);
4082     return nullify_end(ctx);
4083 }
4084 
4085 static bool trans_diag(DisasContext *ctx, arg_diag *a)
4086 {
4087     qemu_log_mask(LOG_UNIMP, "DIAG opcode ignored\n");
4088     cond_free(&ctx->null_cond);
4089     return true;
4090 }
4091 
4092 static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
4093 {
4094     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4095     int bound;
4096 
4097     ctx->cs = cs;
4098     ctx->tb_flags = ctx->base.tb->flags;
4099 
4100 #ifdef CONFIG_USER_ONLY
4101     ctx->privilege = MMU_USER_IDX;
4102     ctx->mmu_idx = MMU_USER_IDX;
4103     ctx->iaoq_f = ctx->base.pc_first | MMU_USER_IDX;
4104     ctx->iaoq_b = ctx->base.tb->cs_base | MMU_USER_IDX;
4105 #else
4106     ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
4107     ctx->mmu_idx = (ctx->tb_flags & PSW_D ? ctx->privilege : MMU_PHYS_IDX);
4108 
4109     /* Recover the IAOQ values from the GVA + PRIV.  */
4110     uint64_t cs_base = ctx->base.tb->cs_base;
4111     uint64_t iasq_f = cs_base & ~0xffffffffull;
4112     int32_t diff = cs_base;
4113 
4114     ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege;
4115     ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1);
4116 #endif
4117     ctx->iaoq_n = -1;
4118     ctx->iaoq_n_var = NULL;
4119 
4120     /* Bound the number of instructions by those left on the page.  */
4121     bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
4122     ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
4123 
4124     ctx->ntempr = 0;
4125     ctx->ntempl = 0;
4126     memset(ctx->tempr, 0, sizeof(ctx->tempr));
4127     memset(ctx->templ, 0, sizeof(ctx->templ));
4128 }
4129 
4130 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
4131 {
4132     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4133 
4134     /* Seed the nullification status from PSW[N], as saved in TB->FLAGS.  */
4135     ctx->null_cond = cond_make_f();
4136     ctx->psw_n_nonzero = false;
4137     if (ctx->tb_flags & PSW_N) {
4138         ctx->null_cond.c = TCG_COND_ALWAYS;
4139         ctx->psw_n_nonzero = true;
4140     }
4141     ctx->null_lab = NULL;
4142 }
4143 
4144 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4145 {
4146     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4147 
4148     tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b);
4149 }
4150 
4151 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4152 {
4153     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4154     CPUHPPAState *env = cs->env_ptr;
4155     DisasJumpType ret;
4156     int i, n;
4157 
4158     /* Execute one insn.  */
4159 #ifdef CONFIG_USER_ONLY
4160     if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
4161         do_page_zero(ctx);
4162         ret = ctx->base.is_jmp;
4163         assert(ret != DISAS_NEXT);
4164     } else
4165 #endif
4166     {
4167         /* Always fetch the insn, even if nullified, so that we check
4168            the page permissions for execute.  */
4169         uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
4170 
4171         /* Set up the IA queue for the next insn.
4172            This will be overwritten by a branch.  */
4173         if (ctx->iaoq_b == -1) {
4174             ctx->iaoq_n = -1;
4175             ctx->iaoq_n_var = get_temp(ctx);
4176             tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
4177         } else {
4178             ctx->iaoq_n = ctx->iaoq_b + 4;
4179             ctx->iaoq_n_var = NULL;
4180         }
4181 
4182         if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
4183             ctx->null_cond.c = TCG_COND_NEVER;
4184             ret = DISAS_NEXT;
4185         } else {
4186             ctx->insn = insn;
4187             if (!decode(ctx, insn)) {
4188                 gen_illegal(ctx);
4189             }
4190             ret = ctx->base.is_jmp;
4191             assert(ctx->null_lab == NULL);
4192         }
4193     }
4194 
4195     /* Free any temporaries allocated.  */
4196     for (i = 0, n = ctx->ntempr; i < n; ++i) {
4197         tcg_temp_free(ctx->tempr[i]);
4198         ctx->tempr[i] = NULL;
4199     }
4200     for (i = 0, n = ctx->ntempl; i < n; ++i) {
4201         tcg_temp_free_tl(ctx->templ[i]);
4202         ctx->templ[i] = NULL;
4203     }
4204     ctx->ntempr = 0;
4205     ctx->ntempl = 0;
4206 
4207     /* Advance the insn queue.  Note that this check also detects
4208        a priority change within the instruction queue.  */
4209     if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
4210         if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1
4211             && use_goto_tb(ctx, ctx->iaoq_b)
4212             && (ctx->null_cond.c == TCG_COND_NEVER
4213                 || ctx->null_cond.c == TCG_COND_ALWAYS)) {
4214             nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
4215             gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
4216             ctx->base.is_jmp = ret = DISAS_NORETURN;
4217         } else {
4218             ctx->base.is_jmp = ret = DISAS_IAQ_N_STALE;
4219         }
4220     }
4221     ctx->iaoq_f = ctx->iaoq_b;
4222     ctx->iaoq_b = ctx->iaoq_n;
4223     ctx->base.pc_next += 4;
4224 
4225     switch (ret) {
4226     case DISAS_NORETURN:
4227     case DISAS_IAQ_N_UPDATED:
4228         break;
4229 
4230     case DISAS_NEXT:
4231     case DISAS_IAQ_N_STALE:
4232     case DISAS_IAQ_N_STALE_EXIT:
4233         if (ctx->iaoq_f == -1) {
4234             tcg_gen_mov_reg(cpu_iaoq_f, cpu_iaoq_b);
4235             copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
4236 #ifndef CONFIG_USER_ONLY
4237             tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
4238 #endif
4239             nullify_save(ctx);
4240             ctx->base.is_jmp = (ret == DISAS_IAQ_N_STALE_EXIT
4241                                 ? DISAS_EXIT
4242                                 : DISAS_IAQ_N_UPDATED);
4243         } else if (ctx->iaoq_b == -1) {
4244             tcg_gen_mov_reg(cpu_iaoq_b, ctx->iaoq_n_var);
4245         }
4246         break;
4247 
4248     default:
4249         g_assert_not_reached();
4250     }
4251 }
4252 
4253 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4254 {
4255     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4256     DisasJumpType is_jmp = ctx->base.is_jmp;
4257 
4258     switch (is_jmp) {
4259     case DISAS_NORETURN:
4260         break;
4261     case DISAS_TOO_MANY:
4262     case DISAS_IAQ_N_STALE:
4263     case DISAS_IAQ_N_STALE_EXIT:
4264         copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
4265         copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
4266         nullify_save(ctx);
4267         /* FALLTHRU */
4268     case DISAS_IAQ_N_UPDATED:
4269         if (is_jmp != DISAS_IAQ_N_STALE_EXIT) {
4270             tcg_gen_lookup_and_goto_ptr();
4271             break;
4272         }
4273         /* FALLTHRU */
4274     case DISAS_EXIT:
4275         tcg_gen_exit_tb(NULL, 0);
4276         break;
4277     default:
4278         g_assert_not_reached();
4279     }
4280 }
4281 
4282 static void hppa_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
4283 {
4284     target_ulong pc = dcbase->pc_first;
4285 
4286 #ifdef CONFIG_USER_ONLY
4287     switch (pc) {
4288     case 0x00:
4289         qemu_log("IN:\n0x00000000:  (null)\n");
4290         return;
4291     case 0xb0:
4292         qemu_log("IN:\n0x000000b0:  light-weight-syscall\n");
4293         return;
4294     case 0xe0:
4295         qemu_log("IN:\n0x000000e0:  set-thread-pointer-syscall\n");
4296         return;
4297     case 0x100:
4298         qemu_log("IN:\n0x00000100:  syscall\n");
4299         return;
4300     }
4301 #endif
4302 
4303     qemu_log("IN: %s\n", lookup_symbol(pc));
4304     log_target_disas(cs, pc, dcbase->tb->size);
4305 }
4306 
4307 static const TranslatorOps hppa_tr_ops = {
4308     .init_disas_context = hppa_tr_init_disas_context,
4309     .tb_start           = hppa_tr_tb_start,
4310     .insn_start         = hppa_tr_insn_start,
4311     .translate_insn     = hppa_tr_translate_insn,
4312     .tb_stop            = hppa_tr_tb_stop,
4313     .disas_log          = hppa_tr_disas_log,
4314 };
4315 
4316 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
4317 {
4318     DisasContext ctx;
4319     translator_loop(&hppa_tr_ops, &ctx.base, cs, tb, max_insns);
4320 }
4321 
4322 void restore_state_to_opc(CPUHPPAState *env, TranslationBlock *tb,
4323                           target_ulong *data)
4324 {
4325     env->iaoq_f = data[0];
4326     if (data[1] != (target_ureg)-1) {
4327         env->iaoq_b = data[1];
4328     }
4329     /* Since we were executing the instruction at IAOQ_F, and took some
4330        sort of action that provoked the cpu_restore_state, we can infer
4331        that the instruction was not nullified.  */
4332     env->psw_n = 0;
4333 }
4334