xref: /openbmc/qemu/target/hppa/translate.c (revision 77c05b0b)
1 /*
2  * HPPA emulation cpu translation for qemu.
3  *
4  * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "trace-tcg.h"
31 #include "exec/log.h"
32 
33 /* Since we have a distinction between register size and address size,
34    we need to redefine all of these.  */
35 
36 #undef TCGv
37 #undef tcg_temp_new
38 #undef tcg_global_reg_new
39 #undef tcg_global_mem_new
40 #undef tcg_temp_local_new
41 #undef tcg_temp_free
42 
43 #if TARGET_LONG_BITS == 64
44 #define TCGv_tl              TCGv_i64
45 #define tcg_temp_new_tl      tcg_temp_new_i64
46 #define tcg_temp_free_tl     tcg_temp_free_i64
47 #if TARGET_REGISTER_BITS == 64
48 #define tcg_gen_extu_reg_tl  tcg_gen_mov_i64
49 #else
50 #define tcg_gen_extu_reg_tl  tcg_gen_extu_i32_i64
51 #endif
52 #else
53 #define TCGv_tl              TCGv_i32
54 #define tcg_temp_new_tl      tcg_temp_new_i32
55 #define tcg_temp_free_tl     tcg_temp_free_i32
56 #define tcg_gen_extu_reg_tl  tcg_gen_mov_i32
57 #endif
58 
59 #if TARGET_REGISTER_BITS == 64
60 #define TCGv_reg             TCGv_i64
61 
62 #define tcg_temp_new         tcg_temp_new_i64
63 #define tcg_global_reg_new   tcg_global_reg_new_i64
64 #define tcg_global_mem_new   tcg_global_mem_new_i64
65 #define tcg_temp_local_new   tcg_temp_local_new_i64
66 #define tcg_temp_free        tcg_temp_free_i64
67 
68 #define tcg_gen_movi_reg     tcg_gen_movi_i64
69 #define tcg_gen_mov_reg      tcg_gen_mov_i64
70 #define tcg_gen_ld8u_reg     tcg_gen_ld8u_i64
71 #define tcg_gen_ld8s_reg     tcg_gen_ld8s_i64
72 #define tcg_gen_ld16u_reg    tcg_gen_ld16u_i64
73 #define tcg_gen_ld16s_reg    tcg_gen_ld16s_i64
74 #define tcg_gen_ld32u_reg    tcg_gen_ld32u_i64
75 #define tcg_gen_ld32s_reg    tcg_gen_ld32s_i64
76 #define tcg_gen_ld_reg       tcg_gen_ld_i64
77 #define tcg_gen_st8_reg      tcg_gen_st8_i64
78 #define tcg_gen_st16_reg     tcg_gen_st16_i64
79 #define tcg_gen_st32_reg     tcg_gen_st32_i64
80 #define tcg_gen_st_reg       tcg_gen_st_i64
81 #define tcg_gen_add_reg      tcg_gen_add_i64
82 #define tcg_gen_addi_reg     tcg_gen_addi_i64
83 #define tcg_gen_sub_reg      tcg_gen_sub_i64
84 #define tcg_gen_neg_reg      tcg_gen_neg_i64
85 #define tcg_gen_subfi_reg    tcg_gen_subfi_i64
86 #define tcg_gen_subi_reg     tcg_gen_subi_i64
87 #define tcg_gen_and_reg      tcg_gen_and_i64
88 #define tcg_gen_andi_reg     tcg_gen_andi_i64
89 #define tcg_gen_or_reg       tcg_gen_or_i64
90 #define tcg_gen_ori_reg      tcg_gen_ori_i64
91 #define tcg_gen_xor_reg      tcg_gen_xor_i64
92 #define tcg_gen_xori_reg     tcg_gen_xori_i64
93 #define tcg_gen_not_reg      tcg_gen_not_i64
94 #define tcg_gen_shl_reg      tcg_gen_shl_i64
95 #define tcg_gen_shli_reg     tcg_gen_shli_i64
96 #define tcg_gen_shr_reg      tcg_gen_shr_i64
97 #define tcg_gen_shri_reg     tcg_gen_shri_i64
98 #define tcg_gen_sar_reg      tcg_gen_sar_i64
99 #define tcg_gen_sari_reg     tcg_gen_sari_i64
100 #define tcg_gen_brcond_reg   tcg_gen_brcond_i64
101 #define tcg_gen_brcondi_reg  tcg_gen_brcondi_i64
102 #define tcg_gen_setcond_reg  tcg_gen_setcond_i64
103 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i64
104 #define tcg_gen_mul_reg      tcg_gen_mul_i64
105 #define tcg_gen_muli_reg     tcg_gen_muli_i64
106 #define tcg_gen_div_reg      tcg_gen_div_i64
107 #define tcg_gen_rem_reg      tcg_gen_rem_i64
108 #define tcg_gen_divu_reg     tcg_gen_divu_i64
109 #define tcg_gen_remu_reg     tcg_gen_remu_i64
110 #define tcg_gen_discard_reg  tcg_gen_discard_i64
111 #define tcg_gen_trunc_reg_i32 tcg_gen_extrl_i64_i32
112 #define tcg_gen_trunc_i64_reg tcg_gen_mov_i64
113 #define tcg_gen_extu_i32_reg tcg_gen_extu_i32_i64
114 #define tcg_gen_ext_i32_reg  tcg_gen_ext_i32_i64
115 #define tcg_gen_extu_reg_i64 tcg_gen_mov_i64
116 #define tcg_gen_ext_reg_i64  tcg_gen_mov_i64
117 #define tcg_gen_ext8u_reg    tcg_gen_ext8u_i64
118 #define tcg_gen_ext8s_reg    tcg_gen_ext8s_i64
119 #define tcg_gen_ext16u_reg   tcg_gen_ext16u_i64
120 #define tcg_gen_ext16s_reg   tcg_gen_ext16s_i64
121 #define tcg_gen_ext32u_reg   tcg_gen_ext32u_i64
122 #define tcg_gen_ext32s_reg   tcg_gen_ext32s_i64
123 #define tcg_gen_bswap16_reg  tcg_gen_bswap16_i64
124 #define tcg_gen_bswap32_reg  tcg_gen_bswap32_i64
125 #define tcg_gen_bswap64_reg  tcg_gen_bswap64_i64
126 #define tcg_gen_concat_reg_i64 tcg_gen_concat32_i64
127 #define tcg_gen_andc_reg     tcg_gen_andc_i64
128 #define tcg_gen_eqv_reg      tcg_gen_eqv_i64
129 #define tcg_gen_nand_reg     tcg_gen_nand_i64
130 #define tcg_gen_nor_reg      tcg_gen_nor_i64
131 #define tcg_gen_orc_reg      tcg_gen_orc_i64
132 #define tcg_gen_clz_reg      tcg_gen_clz_i64
133 #define tcg_gen_ctz_reg      tcg_gen_ctz_i64
134 #define tcg_gen_clzi_reg     tcg_gen_clzi_i64
135 #define tcg_gen_ctzi_reg     tcg_gen_ctzi_i64
136 #define tcg_gen_clrsb_reg    tcg_gen_clrsb_i64
137 #define tcg_gen_ctpop_reg    tcg_gen_ctpop_i64
138 #define tcg_gen_rotl_reg     tcg_gen_rotl_i64
139 #define tcg_gen_rotli_reg    tcg_gen_rotli_i64
140 #define tcg_gen_rotr_reg     tcg_gen_rotr_i64
141 #define tcg_gen_rotri_reg    tcg_gen_rotri_i64
142 #define tcg_gen_deposit_reg  tcg_gen_deposit_i64
143 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i64
144 #define tcg_gen_extract_reg  tcg_gen_extract_i64
145 #define tcg_gen_sextract_reg tcg_gen_sextract_i64
146 #define tcg_const_reg        tcg_const_i64
147 #define tcg_const_local_reg  tcg_const_local_i64
148 #define tcg_gen_movcond_reg  tcg_gen_movcond_i64
149 #define tcg_gen_add2_reg     tcg_gen_add2_i64
150 #define tcg_gen_sub2_reg     tcg_gen_sub2_i64
151 #define tcg_gen_qemu_ld_reg  tcg_gen_qemu_ld_i64
152 #define tcg_gen_qemu_st_reg  tcg_gen_qemu_st_i64
153 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i64
154 #define tcg_gen_trunc_reg_ptr   tcg_gen_trunc_i64_ptr
155 #else
156 #define TCGv_reg             TCGv_i32
157 #define tcg_temp_new         tcg_temp_new_i32
158 #define tcg_global_reg_new   tcg_global_reg_new_i32
159 #define tcg_global_mem_new   tcg_global_mem_new_i32
160 #define tcg_temp_local_new   tcg_temp_local_new_i32
161 #define tcg_temp_free        tcg_temp_free_i32
162 
163 #define tcg_gen_movi_reg     tcg_gen_movi_i32
164 #define tcg_gen_mov_reg      tcg_gen_mov_i32
165 #define tcg_gen_ld8u_reg     tcg_gen_ld8u_i32
166 #define tcg_gen_ld8s_reg     tcg_gen_ld8s_i32
167 #define tcg_gen_ld16u_reg    tcg_gen_ld16u_i32
168 #define tcg_gen_ld16s_reg    tcg_gen_ld16s_i32
169 #define tcg_gen_ld32u_reg    tcg_gen_ld_i32
170 #define tcg_gen_ld32s_reg    tcg_gen_ld_i32
171 #define tcg_gen_ld_reg       tcg_gen_ld_i32
172 #define tcg_gen_st8_reg      tcg_gen_st8_i32
173 #define tcg_gen_st16_reg     tcg_gen_st16_i32
174 #define tcg_gen_st32_reg     tcg_gen_st32_i32
175 #define tcg_gen_st_reg       tcg_gen_st_i32
176 #define tcg_gen_add_reg      tcg_gen_add_i32
177 #define tcg_gen_addi_reg     tcg_gen_addi_i32
178 #define tcg_gen_sub_reg      tcg_gen_sub_i32
179 #define tcg_gen_neg_reg      tcg_gen_neg_i32
180 #define tcg_gen_subfi_reg    tcg_gen_subfi_i32
181 #define tcg_gen_subi_reg     tcg_gen_subi_i32
182 #define tcg_gen_and_reg      tcg_gen_and_i32
183 #define tcg_gen_andi_reg     tcg_gen_andi_i32
184 #define tcg_gen_or_reg       tcg_gen_or_i32
185 #define tcg_gen_ori_reg      tcg_gen_ori_i32
186 #define tcg_gen_xor_reg      tcg_gen_xor_i32
187 #define tcg_gen_xori_reg     tcg_gen_xori_i32
188 #define tcg_gen_not_reg      tcg_gen_not_i32
189 #define tcg_gen_shl_reg      tcg_gen_shl_i32
190 #define tcg_gen_shli_reg     tcg_gen_shli_i32
191 #define tcg_gen_shr_reg      tcg_gen_shr_i32
192 #define tcg_gen_shri_reg     tcg_gen_shri_i32
193 #define tcg_gen_sar_reg      tcg_gen_sar_i32
194 #define tcg_gen_sari_reg     tcg_gen_sari_i32
195 #define tcg_gen_brcond_reg   tcg_gen_brcond_i32
196 #define tcg_gen_brcondi_reg  tcg_gen_brcondi_i32
197 #define tcg_gen_setcond_reg  tcg_gen_setcond_i32
198 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i32
199 #define tcg_gen_mul_reg      tcg_gen_mul_i32
200 #define tcg_gen_muli_reg     tcg_gen_muli_i32
201 #define tcg_gen_div_reg      tcg_gen_div_i32
202 #define tcg_gen_rem_reg      tcg_gen_rem_i32
203 #define tcg_gen_divu_reg     tcg_gen_divu_i32
204 #define tcg_gen_remu_reg     tcg_gen_remu_i32
205 #define tcg_gen_discard_reg  tcg_gen_discard_i32
206 #define tcg_gen_trunc_reg_i32 tcg_gen_mov_i32
207 #define tcg_gen_trunc_i64_reg tcg_gen_extrl_i64_i32
208 #define tcg_gen_extu_i32_reg tcg_gen_mov_i32
209 #define tcg_gen_ext_i32_reg  tcg_gen_mov_i32
210 #define tcg_gen_extu_reg_i64 tcg_gen_extu_i32_i64
211 #define tcg_gen_ext_reg_i64  tcg_gen_ext_i32_i64
212 #define tcg_gen_ext8u_reg    tcg_gen_ext8u_i32
213 #define tcg_gen_ext8s_reg    tcg_gen_ext8s_i32
214 #define tcg_gen_ext16u_reg   tcg_gen_ext16u_i32
215 #define tcg_gen_ext16s_reg   tcg_gen_ext16s_i32
216 #define tcg_gen_ext32u_reg   tcg_gen_mov_i32
217 #define tcg_gen_ext32s_reg   tcg_gen_mov_i32
218 #define tcg_gen_bswap16_reg  tcg_gen_bswap16_i32
219 #define tcg_gen_bswap32_reg  tcg_gen_bswap32_i32
220 #define tcg_gen_concat_reg_i64 tcg_gen_concat_i32_i64
221 #define tcg_gen_andc_reg     tcg_gen_andc_i32
222 #define tcg_gen_eqv_reg      tcg_gen_eqv_i32
223 #define tcg_gen_nand_reg     tcg_gen_nand_i32
224 #define tcg_gen_nor_reg      tcg_gen_nor_i32
225 #define tcg_gen_orc_reg      tcg_gen_orc_i32
226 #define tcg_gen_clz_reg      tcg_gen_clz_i32
227 #define tcg_gen_ctz_reg      tcg_gen_ctz_i32
228 #define tcg_gen_clzi_reg     tcg_gen_clzi_i32
229 #define tcg_gen_ctzi_reg     tcg_gen_ctzi_i32
230 #define tcg_gen_clrsb_reg    tcg_gen_clrsb_i32
231 #define tcg_gen_ctpop_reg    tcg_gen_ctpop_i32
232 #define tcg_gen_rotl_reg     tcg_gen_rotl_i32
233 #define tcg_gen_rotli_reg    tcg_gen_rotli_i32
234 #define tcg_gen_rotr_reg     tcg_gen_rotr_i32
235 #define tcg_gen_rotri_reg    tcg_gen_rotri_i32
236 #define tcg_gen_deposit_reg  tcg_gen_deposit_i32
237 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i32
238 #define tcg_gen_extract_reg  tcg_gen_extract_i32
239 #define tcg_gen_sextract_reg tcg_gen_sextract_i32
240 #define tcg_const_reg        tcg_const_i32
241 #define tcg_const_local_reg  tcg_const_local_i32
242 #define tcg_gen_movcond_reg  tcg_gen_movcond_i32
243 #define tcg_gen_add2_reg     tcg_gen_add2_i32
244 #define tcg_gen_sub2_reg     tcg_gen_sub2_i32
245 #define tcg_gen_qemu_ld_reg  tcg_gen_qemu_ld_i32
246 #define tcg_gen_qemu_st_reg  tcg_gen_qemu_st_i32
247 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i32
248 #define tcg_gen_trunc_reg_ptr   tcg_gen_ext_i32_ptr
249 #endif /* TARGET_REGISTER_BITS */
250 
251 typedef struct DisasCond {
252     TCGCond c;
253     TCGv_reg a0, a1;
254     bool a0_is_n;
255     bool a1_is_0;
256 } DisasCond;
257 
258 typedef struct DisasContext {
259     DisasContextBase base;
260     CPUState *cs;
261 
262     target_ureg iaoq_f;
263     target_ureg iaoq_b;
264     target_ureg iaoq_n;
265     TCGv_reg iaoq_n_var;
266 
267     int ntempr, ntempl;
268     TCGv_reg tempr[8];
269     TCGv_tl  templ[4];
270 
271     DisasCond null_cond;
272     TCGLabel *null_lab;
273 
274     uint32_t insn;
275     uint32_t tb_flags;
276     int mmu_idx;
277     int privilege;
278     bool psw_n_nonzero;
279 } DisasContext;
280 
281 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently.  */
282 static int expand_sm_imm(DisasContext *ctx, int val)
283 {
284     if (val & PSW_SM_E) {
285         val = (val & ~PSW_SM_E) | PSW_E;
286     }
287     if (val & PSW_SM_W) {
288         val = (val & ~PSW_SM_W) | PSW_W;
289     }
290     return val;
291 }
292 
293 /* Inverted space register indicates 0 means sr0 not inferred from base.  */
294 static int expand_sr3x(DisasContext *ctx, int val)
295 {
296     return ~val;
297 }
298 
299 /* Convert the M:A bits within a memory insn to the tri-state value
300    we use for the final M.  */
301 static int ma_to_m(DisasContext *ctx, int val)
302 {
303     return val & 2 ? (val & 1 ? -1 : 1) : 0;
304 }
305 
306 /* Convert the sign of the displacement to a pre or post-modify.  */
307 static int pos_to_m(DisasContext *ctx, int val)
308 {
309     return val ? 1 : -1;
310 }
311 
312 static int neg_to_m(DisasContext *ctx, int val)
313 {
314     return val ? -1 : 1;
315 }
316 
317 /* Used for branch targets and fp memory ops.  */
318 static int expand_shl2(DisasContext *ctx, int val)
319 {
320     return val << 2;
321 }
322 
323 /* Used for fp memory ops.  */
324 static int expand_shl3(DisasContext *ctx, int val)
325 {
326     return val << 3;
327 }
328 
329 /* Used for assemble_21.  */
330 static int expand_shl11(DisasContext *ctx, int val)
331 {
332     return val << 11;
333 }
334 
335 
336 /* Include the auto-generated decoder.  */
337 #include "decode-insns.c.inc"
338 
339 /* We are not using a goto_tb (for whatever reason), but have updated
340    the iaq (for whatever reason), so don't do it again on exit.  */
341 #define DISAS_IAQ_N_UPDATED  DISAS_TARGET_0
342 
343 /* We are exiting the TB, but have neither emitted a goto_tb, nor
344    updated the iaq for the next instruction to be executed.  */
345 #define DISAS_IAQ_N_STALE    DISAS_TARGET_1
346 
347 /* Similarly, but we want to return to the main loop immediately
348    to recognize unmasked interrupts.  */
349 #define DISAS_IAQ_N_STALE_EXIT      DISAS_TARGET_2
350 #define DISAS_EXIT                  DISAS_TARGET_3
351 
352 /* global register indexes */
353 static TCGv_reg cpu_gr[32];
354 static TCGv_i64 cpu_sr[4];
355 static TCGv_i64 cpu_srH;
356 static TCGv_reg cpu_iaoq_f;
357 static TCGv_reg cpu_iaoq_b;
358 static TCGv_i64 cpu_iasq_f;
359 static TCGv_i64 cpu_iasq_b;
360 static TCGv_reg cpu_sar;
361 static TCGv_reg cpu_psw_n;
362 static TCGv_reg cpu_psw_v;
363 static TCGv_reg cpu_psw_cb;
364 static TCGv_reg cpu_psw_cb_msb;
365 
366 #include "exec/gen-icount.h"
367 
368 void hppa_translate_init(void)
369 {
370 #define DEF_VAR(V)  { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
371 
372     typedef struct { TCGv_reg *var; const char *name; int ofs; } GlobalVar;
373     static const GlobalVar vars[] = {
374         { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
375         DEF_VAR(psw_n),
376         DEF_VAR(psw_v),
377         DEF_VAR(psw_cb),
378         DEF_VAR(psw_cb_msb),
379         DEF_VAR(iaoq_f),
380         DEF_VAR(iaoq_b),
381     };
382 
383 #undef DEF_VAR
384 
385     /* Use the symbolic register names that match the disassembler.  */
386     static const char gr_names[32][4] = {
387         "r0",  "r1",  "r2",  "r3",  "r4",  "r5",  "r6",  "r7",
388         "r8",  "r9",  "r10", "r11", "r12", "r13", "r14", "r15",
389         "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
390         "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
391     };
392     /* SR[4-7] are not global registers so that we can index them.  */
393     static const char sr_names[5][4] = {
394         "sr0", "sr1", "sr2", "sr3", "srH"
395     };
396 
397     int i;
398 
399     cpu_gr[0] = NULL;
400     for (i = 1; i < 32; i++) {
401         cpu_gr[i] = tcg_global_mem_new(cpu_env,
402                                        offsetof(CPUHPPAState, gr[i]),
403                                        gr_names[i]);
404     }
405     for (i = 0; i < 4; i++) {
406         cpu_sr[i] = tcg_global_mem_new_i64(cpu_env,
407                                            offsetof(CPUHPPAState, sr[i]),
408                                            sr_names[i]);
409     }
410     cpu_srH = tcg_global_mem_new_i64(cpu_env,
411                                      offsetof(CPUHPPAState, sr[4]),
412                                      sr_names[4]);
413 
414     for (i = 0; i < ARRAY_SIZE(vars); ++i) {
415         const GlobalVar *v = &vars[i];
416         *v->var = tcg_global_mem_new(cpu_env, v->ofs, v->name);
417     }
418 
419     cpu_iasq_f = tcg_global_mem_new_i64(cpu_env,
420                                         offsetof(CPUHPPAState, iasq_f),
421                                         "iasq_f");
422     cpu_iasq_b = tcg_global_mem_new_i64(cpu_env,
423                                         offsetof(CPUHPPAState, iasq_b),
424                                         "iasq_b");
425 }
426 
427 static DisasCond cond_make_f(void)
428 {
429     return (DisasCond){
430         .c = TCG_COND_NEVER,
431         .a0 = NULL,
432         .a1 = NULL,
433     };
434 }
435 
436 static DisasCond cond_make_t(void)
437 {
438     return (DisasCond){
439         .c = TCG_COND_ALWAYS,
440         .a0 = NULL,
441         .a1 = NULL,
442     };
443 }
444 
445 static DisasCond cond_make_n(void)
446 {
447     return (DisasCond){
448         .c = TCG_COND_NE,
449         .a0 = cpu_psw_n,
450         .a0_is_n = true,
451         .a1 = NULL,
452         .a1_is_0 = true
453     };
454 }
455 
456 static DisasCond cond_make_0_tmp(TCGCond c, TCGv_reg a0)
457 {
458     assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
459     return (DisasCond){
460         .c = c, .a0 = a0, .a1_is_0 = true
461     };
462 }
463 
464 static DisasCond cond_make_0(TCGCond c, TCGv_reg a0)
465 {
466     TCGv_reg tmp = tcg_temp_new();
467     tcg_gen_mov_reg(tmp, a0);
468     return cond_make_0_tmp(c, tmp);
469 }
470 
471 static DisasCond cond_make(TCGCond c, TCGv_reg a0, TCGv_reg a1)
472 {
473     DisasCond r = { .c = c };
474 
475     assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
476     r.a0 = tcg_temp_new();
477     tcg_gen_mov_reg(r.a0, a0);
478     r.a1 = tcg_temp_new();
479     tcg_gen_mov_reg(r.a1, a1);
480 
481     return r;
482 }
483 
484 static void cond_prep(DisasCond *cond)
485 {
486     if (cond->a1_is_0) {
487         cond->a1_is_0 = false;
488         cond->a1 = tcg_const_reg(0);
489     }
490 }
491 
492 static void cond_free(DisasCond *cond)
493 {
494     switch (cond->c) {
495     default:
496         if (!cond->a0_is_n) {
497             tcg_temp_free(cond->a0);
498         }
499         if (!cond->a1_is_0) {
500             tcg_temp_free(cond->a1);
501         }
502         cond->a0_is_n = false;
503         cond->a1_is_0 = false;
504         cond->a0 = NULL;
505         cond->a1 = NULL;
506         /* fallthru */
507     case TCG_COND_ALWAYS:
508         cond->c = TCG_COND_NEVER;
509         break;
510     case TCG_COND_NEVER:
511         break;
512     }
513 }
514 
515 static TCGv_reg get_temp(DisasContext *ctx)
516 {
517     unsigned i = ctx->ntempr++;
518     g_assert(i < ARRAY_SIZE(ctx->tempr));
519     return ctx->tempr[i] = tcg_temp_new();
520 }
521 
522 #ifndef CONFIG_USER_ONLY
523 static TCGv_tl get_temp_tl(DisasContext *ctx)
524 {
525     unsigned i = ctx->ntempl++;
526     g_assert(i < ARRAY_SIZE(ctx->templ));
527     return ctx->templ[i] = tcg_temp_new_tl();
528 }
529 #endif
530 
531 static TCGv_reg load_const(DisasContext *ctx, target_sreg v)
532 {
533     TCGv_reg t = get_temp(ctx);
534     tcg_gen_movi_reg(t, v);
535     return t;
536 }
537 
538 static TCGv_reg load_gpr(DisasContext *ctx, unsigned reg)
539 {
540     if (reg == 0) {
541         TCGv_reg t = get_temp(ctx);
542         tcg_gen_movi_reg(t, 0);
543         return t;
544     } else {
545         return cpu_gr[reg];
546     }
547 }
548 
549 static TCGv_reg dest_gpr(DisasContext *ctx, unsigned reg)
550 {
551     if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
552         return get_temp(ctx);
553     } else {
554         return cpu_gr[reg];
555     }
556 }
557 
558 static void save_or_nullify(DisasContext *ctx, TCGv_reg dest, TCGv_reg t)
559 {
560     if (ctx->null_cond.c != TCG_COND_NEVER) {
561         cond_prep(&ctx->null_cond);
562         tcg_gen_movcond_reg(ctx->null_cond.c, dest, ctx->null_cond.a0,
563                            ctx->null_cond.a1, dest, t);
564     } else {
565         tcg_gen_mov_reg(dest, t);
566     }
567 }
568 
569 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_reg t)
570 {
571     if (reg != 0) {
572         save_or_nullify(ctx, cpu_gr[reg], t);
573     }
574 }
575 
576 #ifdef HOST_WORDS_BIGENDIAN
577 # define HI_OFS  0
578 # define LO_OFS  4
579 #else
580 # define HI_OFS  4
581 # define LO_OFS  0
582 #endif
583 
584 static TCGv_i32 load_frw_i32(unsigned rt)
585 {
586     TCGv_i32 ret = tcg_temp_new_i32();
587     tcg_gen_ld_i32(ret, cpu_env,
588                    offsetof(CPUHPPAState, fr[rt & 31])
589                    + (rt & 32 ? LO_OFS : HI_OFS));
590     return ret;
591 }
592 
593 static TCGv_i32 load_frw0_i32(unsigned rt)
594 {
595     if (rt == 0) {
596         return tcg_const_i32(0);
597     } else {
598         return load_frw_i32(rt);
599     }
600 }
601 
602 static TCGv_i64 load_frw0_i64(unsigned rt)
603 {
604     if (rt == 0) {
605         return tcg_const_i64(0);
606     } else {
607         TCGv_i64 ret = tcg_temp_new_i64();
608         tcg_gen_ld32u_i64(ret, cpu_env,
609                           offsetof(CPUHPPAState, fr[rt & 31])
610                           + (rt & 32 ? LO_OFS : HI_OFS));
611         return ret;
612     }
613 }
614 
615 static void save_frw_i32(unsigned rt, TCGv_i32 val)
616 {
617     tcg_gen_st_i32(val, cpu_env,
618                    offsetof(CPUHPPAState, fr[rt & 31])
619                    + (rt & 32 ? LO_OFS : HI_OFS));
620 }
621 
622 #undef HI_OFS
623 #undef LO_OFS
624 
625 static TCGv_i64 load_frd(unsigned rt)
626 {
627     TCGv_i64 ret = tcg_temp_new_i64();
628     tcg_gen_ld_i64(ret, cpu_env, offsetof(CPUHPPAState, fr[rt]));
629     return ret;
630 }
631 
632 static TCGv_i64 load_frd0(unsigned rt)
633 {
634     if (rt == 0) {
635         return tcg_const_i64(0);
636     } else {
637         return load_frd(rt);
638     }
639 }
640 
641 static void save_frd(unsigned rt, TCGv_i64 val)
642 {
643     tcg_gen_st_i64(val, cpu_env, offsetof(CPUHPPAState, fr[rt]));
644 }
645 
646 static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
647 {
648 #ifdef CONFIG_USER_ONLY
649     tcg_gen_movi_i64(dest, 0);
650 #else
651     if (reg < 4) {
652         tcg_gen_mov_i64(dest, cpu_sr[reg]);
653     } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
654         tcg_gen_mov_i64(dest, cpu_srH);
655     } else {
656         tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUHPPAState, sr[reg]));
657     }
658 #endif
659 }
660 
661 /* Skip over the implementation of an insn that has been nullified.
662    Use this when the insn is too complex for a conditional move.  */
663 static void nullify_over(DisasContext *ctx)
664 {
665     if (ctx->null_cond.c != TCG_COND_NEVER) {
666         /* The always condition should have been handled in the main loop.  */
667         assert(ctx->null_cond.c != TCG_COND_ALWAYS);
668 
669         ctx->null_lab = gen_new_label();
670         cond_prep(&ctx->null_cond);
671 
672         /* If we're using PSW[N], copy it to a temp because... */
673         if (ctx->null_cond.a0_is_n) {
674             ctx->null_cond.a0_is_n = false;
675             ctx->null_cond.a0 = tcg_temp_new();
676             tcg_gen_mov_reg(ctx->null_cond.a0, cpu_psw_n);
677         }
678         /* ... we clear it before branching over the implementation,
679            so that (1) it's clear after nullifying this insn and
680            (2) if this insn nullifies the next, PSW[N] is valid.  */
681         if (ctx->psw_n_nonzero) {
682             ctx->psw_n_nonzero = false;
683             tcg_gen_movi_reg(cpu_psw_n, 0);
684         }
685 
686         tcg_gen_brcond_reg(ctx->null_cond.c, ctx->null_cond.a0,
687                           ctx->null_cond.a1, ctx->null_lab);
688         cond_free(&ctx->null_cond);
689     }
690 }
691 
692 /* Save the current nullification state to PSW[N].  */
693 static void nullify_save(DisasContext *ctx)
694 {
695     if (ctx->null_cond.c == TCG_COND_NEVER) {
696         if (ctx->psw_n_nonzero) {
697             tcg_gen_movi_reg(cpu_psw_n, 0);
698         }
699         return;
700     }
701     if (!ctx->null_cond.a0_is_n) {
702         cond_prep(&ctx->null_cond);
703         tcg_gen_setcond_reg(ctx->null_cond.c, cpu_psw_n,
704                            ctx->null_cond.a0, ctx->null_cond.a1);
705         ctx->psw_n_nonzero = true;
706     }
707     cond_free(&ctx->null_cond);
708 }
709 
710 /* Set a PSW[N] to X.  The intention is that this is used immediately
711    before a goto_tb/exit_tb, so that there is no fallthru path to other
712    code within the TB.  Therefore we do not update psw_n_nonzero.  */
713 static void nullify_set(DisasContext *ctx, bool x)
714 {
715     if (ctx->psw_n_nonzero || x) {
716         tcg_gen_movi_reg(cpu_psw_n, x);
717     }
718 }
719 
720 /* Mark the end of an instruction that may have been nullified.
721    This is the pair to nullify_over.  Always returns true so that
722    it may be tail-called from a translate function.  */
723 static bool nullify_end(DisasContext *ctx)
724 {
725     TCGLabel *null_lab = ctx->null_lab;
726     DisasJumpType status = ctx->base.is_jmp;
727 
728     /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
729        For UPDATED, we cannot update on the nullified path.  */
730     assert(status != DISAS_IAQ_N_UPDATED);
731 
732     if (likely(null_lab == NULL)) {
733         /* The current insn wasn't conditional or handled the condition
734            applied to it without a branch, so the (new) setting of
735            NULL_COND can be applied directly to the next insn.  */
736         return true;
737     }
738     ctx->null_lab = NULL;
739 
740     if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
741         /* The next instruction will be unconditional,
742            and NULL_COND already reflects that.  */
743         gen_set_label(null_lab);
744     } else {
745         /* The insn that we just executed is itself nullifying the next
746            instruction.  Store the condition in the PSW[N] global.
747            We asserted PSW[N] = 0 in nullify_over, so that after the
748            label we have the proper value in place.  */
749         nullify_save(ctx);
750         gen_set_label(null_lab);
751         ctx->null_cond = cond_make_n();
752     }
753     if (status == DISAS_NORETURN) {
754         ctx->base.is_jmp = DISAS_NEXT;
755     }
756     return true;
757 }
758 
759 static void copy_iaoq_entry(TCGv_reg dest, target_ureg ival, TCGv_reg vval)
760 {
761     if (unlikely(ival == -1)) {
762         tcg_gen_mov_reg(dest, vval);
763     } else {
764         tcg_gen_movi_reg(dest, ival);
765     }
766 }
767 
768 static inline target_ureg iaoq_dest(DisasContext *ctx, target_sreg disp)
769 {
770     return ctx->iaoq_f + disp + 8;
771 }
772 
773 static void gen_excp_1(int exception)
774 {
775     TCGv_i32 t = tcg_const_i32(exception);
776     gen_helper_excp(cpu_env, t);
777     tcg_temp_free_i32(t);
778 }
779 
780 static void gen_excp(DisasContext *ctx, int exception)
781 {
782     copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
783     copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
784     nullify_save(ctx);
785     gen_excp_1(exception);
786     ctx->base.is_jmp = DISAS_NORETURN;
787 }
788 
789 static bool gen_excp_iir(DisasContext *ctx, int exc)
790 {
791     TCGv_reg tmp;
792 
793     nullify_over(ctx);
794     tmp = tcg_const_reg(ctx->insn);
795     tcg_gen_st_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[CR_IIR]));
796     tcg_temp_free(tmp);
797     gen_excp(ctx, exc);
798     return nullify_end(ctx);
799 }
800 
801 static bool gen_illegal(DisasContext *ctx)
802 {
803     return gen_excp_iir(ctx, EXCP_ILL);
804 }
805 
806 #ifdef CONFIG_USER_ONLY
807 #define CHECK_MOST_PRIVILEGED(EXCP) \
808     return gen_excp_iir(ctx, EXCP)
809 #else
810 #define CHECK_MOST_PRIVILEGED(EXCP) \
811     do {                                     \
812         if (ctx->privilege != 0) {           \
813             return gen_excp_iir(ctx, EXCP);  \
814         }                                    \
815     } while (0)
816 #endif
817 
818 static bool use_goto_tb(DisasContext *ctx, target_ureg dest)
819 {
820     /* Suppress goto_tb for page crossing, IO, or single-steping.  */
821     return !(((ctx->base.pc_first ^ dest) & TARGET_PAGE_MASK)
822              || (tb_cflags(ctx->base.tb) & CF_LAST_IO)
823              || ctx->base.singlestep_enabled);
824 }
825 
826 /* If the next insn is to be nullified, and it's on the same page,
827    and we're not attempting to set a breakpoint on it, then we can
828    totally skip the nullified insn.  This avoids creating and
829    executing a TB that merely branches to the next TB.  */
830 static bool use_nullify_skip(DisasContext *ctx)
831 {
832     return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
833             && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
834 }
835 
836 static void gen_goto_tb(DisasContext *ctx, int which,
837                         target_ureg f, target_ureg b)
838 {
839     if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
840         tcg_gen_goto_tb(which);
841         tcg_gen_movi_reg(cpu_iaoq_f, f);
842         tcg_gen_movi_reg(cpu_iaoq_b, b);
843         tcg_gen_exit_tb(ctx->base.tb, which);
844     } else {
845         copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b);
846         copy_iaoq_entry(cpu_iaoq_b, b, ctx->iaoq_n_var);
847         if (ctx->base.singlestep_enabled) {
848             gen_excp_1(EXCP_DEBUG);
849         } else {
850             tcg_gen_lookup_and_goto_ptr();
851         }
852     }
853 }
854 
855 static bool cond_need_sv(int c)
856 {
857     return c == 2 || c == 3 || c == 6;
858 }
859 
860 static bool cond_need_cb(int c)
861 {
862     return c == 4 || c == 5;
863 }
864 
865 /*
866  * Compute conditional for arithmetic.  See Page 5-3, Table 5-1, of
867  * the Parisc 1.1 Architecture Reference Manual for details.
868  */
869 
870 static DisasCond do_cond(unsigned cf, TCGv_reg res,
871                          TCGv_reg cb_msb, TCGv_reg sv)
872 {
873     DisasCond cond;
874     TCGv_reg tmp;
875 
876     switch (cf >> 1) {
877     case 0: /* Never / TR    (0 / 1) */
878         cond = cond_make_f();
879         break;
880     case 1: /* = / <>        (Z / !Z) */
881         cond = cond_make_0(TCG_COND_EQ, res);
882         break;
883     case 2: /* < / >=        (N ^ V / !(N ^ V) */
884         tmp = tcg_temp_new();
885         tcg_gen_xor_reg(tmp, res, sv);
886         cond = cond_make_0_tmp(TCG_COND_LT, tmp);
887         break;
888     case 3: /* <= / >        (N ^ V) | Z / !((N ^ V) | Z) */
889         /*
890          * Simplify:
891          *   (N ^ V) | Z
892          *   ((res < 0) ^ (sv < 0)) | !res
893          *   ((res ^ sv) < 0) | !res
894          *   (~(res ^ sv) >= 0) | !res
895          *   !(~(res ^ sv) >> 31) | !res
896          *   !(~(res ^ sv) >> 31 & res)
897          */
898         tmp = tcg_temp_new();
899         tcg_gen_eqv_reg(tmp, res, sv);
900         tcg_gen_sari_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
901         tcg_gen_and_reg(tmp, tmp, res);
902         cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
903         break;
904     case 4: /* NUV / UV      (!C / C) */
905         cond = cond_make_0(TCG_COND_EQ, cb_msb);
906         break;
907     case 5: /* ZNV / VNZ     (!C | Z / C & !Z) */
908         tmp = tcg_temp_new();
909         tcg_gen_neg_reg(tmp, cb_msb);
910         tcg_gen_and_reg(tmp, tmp, res);
911         cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
912         break;
913     case 6: /* SV / NSV      (V / !V) */
914         cond = cond_make_0(TCG_COND_LT, sv);
915         break;
916     case 7: /* OD / EV */
917         tmp = tcg_temp_new();
918         tcg_gen_andi_reg(tmp, res, 1);
919         cond = cond_make_0_tmp(TCG_COND_NE, tmp);
920         break;
921     default:
922         g_assert_not_reached();
923     }
924     if (cf & 1) {
925         cond.c = tcg_invert_cond(cond.c);
926     }
927 
928     return cond;
929 }
930 
931 /* Similar, but for the special case of subtraction without borrow, we
932    can use the inputs directly.  This can allow other computation to be
933    deleted as unused.  */
934 
935 static DisasCond do_sub_cond(unsigned cf, TCGv_reg res,
936                              TCGv_reg in1, TCGv_reg in2, TCGv_reg sv)
937 {
938     DisasCond cond;
939 
940     switch (cf >> 1) {
941     case 1: /* = / <> */
942         cond = cond_make(TCG_COND_EQ, in1, in2);
943         break;
944     case 2: /* < / >= */
945         cond = cond_make(TCG_COND_LT, in1, in2);
946         break;
947     case 3: /* <= / > */
948         cond = cond_make(TCG_COND_LE, in1, in2);
949         break;
950     case 4: /* << / >>= */
951         cond = cond_make(TCG_COND_LTU, in1, in2);
952         break;
953     case 5: /* <<= / >> */
954         cond = cond_make(TCG_COND_LEU, in1, in2);
955         break;
956     default:
957         return do_cond(cf, res, NULL, sv);
958     }
959     if (cf & 1) {
960         cond.c = tcg_invert_cond(cond.c);
961     }
962 
963     return cond;
964 }
965 
966 /*
967  * Similar, but for logicals, where the carry and overflow bits are not
968  * computed, and use of them is undefined.
969  *
970  * Undefined or not, hardware does not trap.  It seems reasonable to
971  * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's
972  * how cases c={2,3} are treated.
973  */
974 
975 static DisasCond do_log_cond(unsigned cf, TCGv_reg res)
976 {
977     switch (cf) {
978     case 0:  /* never */
979     case 9:  /* undef, C */
980     case 11: /* undef, C & !Z */
981     case 12: /* undef, V */
982         return cond_make_f();
983 
984     case 1:  /* true */
985     case 8:  /* undef, !C */
986     case 10: /* undef, !C | Z */
987     case 13: /* undef, !V */
988         return cond_make_t();
989 
990     case 2:  /* == */
991         return cond_make_0(TCG_COND_EQ, res);
992     case 3:  /* <> */
993         return cond_make_0(TCG_COND_NE, res);
994     case 4:  /* < */
995         return cond_make_0(TCG_COND_LT, res);
996     case 5:  /* >= */
997         return cond_make_0(TCG_COND_GE, res);
998     case 6:  /* <= */
999         return cond_make_0(TCG_COND_LE, res);
1000     case 7:  /* > */
1001         return cond_make_0(TCG_COND_GT, res);
1002 
1003     case 14: /* OD */
1004     case 15: /* EV */
1005         return do_cond(cf, res, NULL, NULL);
1006 
1007     default:
1008         g_assert_not_reached();
1009     }
1010 }
1011 
1012 /* Similar, but for shift/extract/deposit conditions.  */
1013 
1014 static DisasCond do_sed_cond(unsigned orig, TCGv_reg res)
1015 {
1016     unsigned c, f;
1017 
1018     /* Convert the compressed condition codes to standard.
1019        0-2 are the same as logicals (nv,<,<=), while 3 is OD.
1020        4-7 are the reverse of 0-3.  */
1021     c = orig & 3;
1022     if (c == 3) {
1023         c = 7;
1024     }
1025     f = (orig & 4) / 4;
1026 
1027     return do_log_cond(c * 2 + f, res);
1028 }
1029 
1030 /* Similar, but for unit conditions.  */
1031 
1032 static DisasCond do_unit_cond(unsigned cf, TCGv_reg res,
1033                               TCGv_reg in1, TCGv_reg in2)
1034 {
1035     DisasCond cond;
1036     TCGv_reg tmp, cb = NULL;
1037 
1038     if (cf & 8) {
1039         /* Since we want to test lots of carry-out bits all at once, do not
1040          * do our normal thing and compute carry-in of bit B+1 since that
1041          * leaves us with carry bits spread across two words.
1042          */
1043         cb = tcg_temp_new();
1044         tmp = tcg_temp_new();
1045         tcg_gen_or_reg(cb, in1, in2);
1046         tcg_gen_and_reg(tmp, in1, in2);
1047         tcg_gen_andc_reg(cb, cb, res);
1048         tcg_gen_or_reg(cb, cb, tmp);
1049         tcg_temp_free(tmp);
1050     }
1051 
1052     switch (cf >> 1) {
1053     case 0: /* never / TR */
1054     case 1: /* undefined */
1055     case 5: /* undefined */
1056         cond = cond_make_f();
1057         break;
1058 
1059     case 2: /* SBZ / NBZ */
1060         /* See hasless(v,1) from
1061          * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
1062          */
1063         tmp = tcg_temp_new();
1064         tcg_gen_subi_reg(tmp, res, 0x01010101u);
1065         tcg_gen_andc_reg(tmp, tmp, res);
1066         tcg_gen_andi_reg(tmp, tmp, 0x80808080u);
1067         cond = cond_make_0(TCG_COND_NE, tmp);
1068         tcg_temp_free(tmp);
1069         break;
1070 
1071     case 3: /* SHZ / NHZ */
1072         tmp = tcg_temp_new();
1073         tcg_gen_subi_reg(tmp, res, 0x00010001u);
1074         tcg_gen_andc_reg(tmp, tmp, res);
1075         tcg_gen_andi_reg(tmp, tmp, 0x80008000u);
1076         cond = cond_make_0(TCG_COND_NE, tmp);
1077         tcg_temp_free(tmp);
1078         break;
1079 
1080     case 4: /* SDC / NDC */
1081         tcg_gen_andi_reg(cb, cb, 0x88888888u);
1082         cond = cond_make_0(TCG_COND_NE, cb);
1083         break;
1084 
1085     case 6: /* SBC / NBC */
1086         tcg_gen_andi_reg(cb, cb, 0x80808080u);
1087         cond = cond_make_0(TCG_COND_NE, cb);
1088         break;
1089 
1090     case 7: /* SHC / NHC */
1091         tcg_gen_andi_reg(cb, cb, 0x80008000u);
1092         cond = cond_make_0(TCG_COND_NE, cb);
1093         break;
1094 
1095     default:
1096         g_assert_not_reached();
1097     }
1098     if (cf & 8) {
1099         tcg_temp_free(cb);
1100     }
1101     if (cf & 1) {
1102         cond.c = tcg_invert_cond(cond.c);
1103     }
1104 
1105     return cond;
1106 }
1107 
1108 /* Compute signed overflow for addition.  */
1109 static TCGv_reg do_add_sv(DisasContext *ctx, TCGv_reg res,
1110                           TCGv_reg in1, TCGv_reg in2)
1111 {
1112     TCGv_reg sv = get_temp(ctx);
1113     TCGv_reg tmp = tcg_temp_new();
1114 
1115     tcg_gen_xor_reg(sv, res, in1);
1116     tcg_gen_xor_reg(tmp, in1, in2);
1117     tcg_gen_andc_reg(sv, sv, tmp);
1118     tcg_temp_free(tmp);
1119 
1120     return sv;
1121 }
1122 
1123 /* Compute signed overflow for subtraction.  */
1124 static TCGv_reg do_sub_sv(DisasContext *ctx, TCGv_reg res,
1125                           TCGv_reg in1, TCGv_reg in2)
1126 {
1127     TCGv_reg sv = get_temp(ctx);
1128     TCGv_reg tmp = tcg_temp_new();
1129 
1130     tcg_gen_xor_reg(sv, res, in1);
1131     tcg_gen_xor_reg(tmp, in1, in2);
1132     tcg_gen_and_reg(sv, sv, tmp);
1133     tcg_temp_free(tmp);
1134 
1135     return sv;
1136 }
1137 
1138 static void do_add(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1139                    TCGv_reg in2, unsigned shift, bool is_l,
1140                    bool is_tsv, bool is_tc, bool is_c, unsigned cf)
1141 {
1142     TCGv_reg dest, cb, cb_msb, sv, tmp;
1143     unsigned c = cf >> 1;
1144     DisasCond cond;
1145 
1146     dest = tcg_temp_new();
1147     cb = NULL;
1148     cb_msb = NULL;
1149 
1150     if (shift) {
1151         tmp = get_temp(ctx);
1152         tcg_gen_shli_reg(tmp, in1, shift);
1153         in1 = tmp;
1154     }
1155 
1156     if (!is_l || cond_need_cb(c)) {
1157         TCGv_reg zero = tcg_const_reg(0);
1158         cb_msb = get_temp(ctx);
1159         tcg_gen_add2_reg(dest, cb_msb, in1, zero, in2, zero);
1160         if (is_c) {
1161             tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cpu_psw_cb_msb, zero);
1162         }
1163         tcg_temp_free(zero);
1164         if (!is_l) {
1165             cb = get_temp(ctx);
1166             tcg_gen_xor_reg(cb, in1, in2);
1167             tcg_gen_xor_reg(cb, cb, dest);
1168         }
1169     } else {
1170         tcg_gen_add_reg(dest, in1, in2);
1171         if (is_c) {
1172             tcg_gen_add_reg(dest, dest, cpu_psw_cb_msb);
1173         }
1174     }
1175 
1176     /* Compute signed overflow if required.  */
1177     sv = NULL;
1178     if (is_tsv || cond_need_sv(c)) {
1179         sv = do_add_sv(ctx, dest, in1, in2);
1180         if (is_tsv) {
1181             /* ??? Need to include overflow from shift.  */
1182             gen_helper_tsv(cpu_env, sv);
1183         }
1184     }
1185 
1186     /* Emit any conditional trap before any writeback.  */
1187     cond = do_cond(cf, dest, cb_msb, sv);
1188     if (is_tc) {
1189         cond_prep(&cond);
1190         tmp = tcg_temp_new();
1191         tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1192         gen_helper_tcond(cpu_env, tmp);
1193         tcg_temp_free(tmp);
1194     }
1195 
1196     /* Write back the result.  */
1197     if (!is_l) {
1198         save_or_nullify(ctx, cpu_psw_cb, cb);
1199         save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1200     }
1201     save_gpr(ctx, rt, dest);
1202     tcg_temp_free(dest);
1203 
1204     /* Install the new nullification.  */
1205     cond_free(&ctx->null_cond);
1206     ctx->null_cond = cond;
1207 }
1208 
1209 static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_sh *a,
1210                        bool is_l, bool is_tsv, bool is_tc, bool is_c)
1211 {
1212     TCGv_reg tcg_r1, tcg_r2;
1213 
1214     if (a->cf) {
1215         nullify_over(ctx);
1216     }
1217     tcg_r1 = load_gpr(ctx, a->r1);
1218     tcg_r2 = load_gpr(ctx, a->r2);
1219     do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l, is_tsv, is_tc, is_c, a->cf);
1220     return nullify_end(ctx);
1221 }
1222 
1223 static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a,
1224                        bool is_tsv, bool is_tc)
1225 {
1226     TCGv_reg tcg_im, tcg_r2;
1227 
1228     if (a->cf) {
1229         nullify_over(ctx);
1230     }
1231     tcg_im = load_const(ctx, a->i);
1232     tcg_r2 = load_gpr(ctx, a->r);
1233     do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf);
1234     return nullify_end(ctx);
1235 }
1236 
1237 static void do_sub(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1238                    TCGv_reg in2, bool is_tsv, bool is_b,
1239                    bool is_tc, unsigned cf)
1240 {
1241     TCGv_reg dest, sv, cb, cb_msb, zero, tmp;
1242     unsigned c = cf >> 1;
1243     DisasCond cond;
1244 
1245     dest = tcg_temp_new();
1246     cb = tcg_temp_new();
1247     cb_msb = tcg_temp_new();
1248 
1249     zero = tcg_const_reg(0);
1250     if (is_b) {
1251         /* DEST,C = IN1 + ~IN2 + C.  */
1252         tcg_gen_not_reg(cb, in2);
1253         tcg_gen_add2_reg(dest, cb_msb, in1, zero, cpu_psw_cb_msb, zero);
1254         tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cb, zero);
1255         tcg_gen_xor_reg(cb, cb, in1);
1256         tcg_gen_xor_reg(cb, cb, dest);
1257     } else {
1258         /* DEST,C = IN1 + ~IN2 + 1.  We can produce the same result in fewer
1259            operations by seeding the high word with 1 and subtracting.  */
1260         tcg_gen_movi_reg(cb_msb, 1);
1261         tcg_gen_sub2_reg(dest, cb_msb, in1, cb_msb, in2, zero);
1262         tcg_gen_eqv_reg(cb, in1, in2);
1263         tcg_gen_xor_reg(cb, cb, dest);
1264     }
1265     tcg_temp_free(zero);
1266 
1267     /* Compute signed overflow if required.  */
1268     sv = NULL;
1269     if (is_tsv || cond_need_sv(c)) {
1270         sv = do_sub_sv(ctx, dest, in1, in2);
1271         if (is_tsv) {
1272             gen_helper_tsv(cpu_env, sv);
1273         }
1274     }
1275 
1276     /* Compute the condition.  We cannot use the special case for borrow.  */
1277     if (!is_b) {
1278         cond = do_sub_cond(cf, dest, in1, in2, sv);
1279     } else {
1280         cond = do_cond(cf, dest, cb_msb, sv);
1281     }
1282 
1283     /* Emit any conditional trap before any writeback.  */
1284     if (is_tc) {
1285         cond_prep(&cond);
1286         tmp = tcg_temp_new();
1287         tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1288         gen_helper_tcond(cpu_env, tmp);
1289         tcg_temp_free(tmp);
1290     }
1291 
1292     /* Write back the result.  */
1293     save_or_nullify(ctx, cpu_psw_cb, cb);
1294     save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1295     save_gpr(ctx, rt, dest);
1296     tcg_temp_free(dest);
1297     tcg_temp_free(cb);
1298     tcg_temp_free(cb_msb);
1299 
1300     /* Install the new nullification.  */
1301     cond_free(&ctx->null_cond);
1302     ctx->null_cond = cond;
1303 }
1304 
1305 static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf *a,
1306                        bool is_tsv, bool is_b, bool is_tc)
1307 {
1308     TCGv_reg tcg_r1, tcg_r2;
1309 
1310     if (a->cf) {
1311         nullify_over(ctx);
1312     }
1313     tcg_r1 = load_gpr(ctx, a->r1);
1314     tcg_r2 = load_gpr(ctx, a->r2);
1315     do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf);
1316     return nullify_end(ctx);
1317 }
1318 
1319 static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv)
1320 {
1321     TCGv_reg tcg_im, tcg_r2;
1322 
1323     if (a->cf) {
1324         nullify_over(ctx);
1325     }
1326     tcg_im = load_const(ctx, a->i);
1327     tcg_r2 = load_gpr(ctx, a->r);
1328     do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf);
1329     return nullify_end(ctx);
1330 }
1331 
1332 static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1333                       TCGv_reg in2, unsigned cf)
1334 {
1335     TCGv_reg dest, sv;
1336     DisasCond cond;
1337 
1338     dest = tcg_temp_new();
1339     tcg_gen_sub_reg(dest, in1, in2);
1340 
1341     /* Compute signed overflow if required.  */
1342     sv = NULL;
1343     if (cond_need_sv(cf >> 1)) {
1344         sv = do_sub_sv(ctx, dest, in1, in2);
1345     }
1346 
1347     /* Form the condition for the compare.  */
1348     cond = do_sub_cond(cf, dest, in1, in2, sv);
1349 
1350     /* Clear.  */
1351     tcg_gen_movi_reg(dest, 0);
1352     save_gpr(ctx, rt, dest);
1353     tcg_temp_free(dest);
1354 
1355     /* Install the new nullification.  */
1356     cond_free(&ctx->null_cond);
1357     ctx->null_cond = cond;
1358 }
1359 
1360 static void do_log(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1361                    TCGv_reg in2, unsigned cf,
1362                    void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1363 {
1364     TCGv_reg dest = dest_gpr(ctx, rt);
1365 
1366     /* Perform the operation, and writeback.  */
1367     fn(dest, in1, in2);
1368     save_gpr(ctx, rt, dest);
1369 
1370     /* Install the new nullification.  */
1371     cond_free(&ctx->null_cond);
1372     if (cf) {
1373         ctx->null_cond = do_log_cond(cf, dest);
1374     }
1375 }
1376 
1377 static bool do_log_reg(DisasContext *ctx, arg_rrr_cf *a,
1378                        void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1379 {
1380     TCGv_reg tcg_r1, tcg_r2;
1381 
1382     if (a->cf) {
1383         nullify_over(ctx);
1384     }
1385     tcg_r1 = load_gpr(ctx, a->r1);
1386     tcg_r2 = load_gpr(ctx, a->r2);
1387     do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, fn);
1388     return nullify_end(ctx);
1389 }
1390 
1391 static void do_unit(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1392                     TCGv_reg in2, unsigned cf, bool is_tc,
1393                     void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1394 {
1395     TCGv_reg dest;
1396     DisasCond cond;
1397 
1398     if (cf == 0) {
1399         dest = dest_gpr(ctx, rt);
1400         fn(dest, in1, in2);
1401         save_gpr(ctx, rt, dest);
1402         cond_free(&ctx->null_cond);
1403     } else {
1404         dest = tcg_temp_new();
1405         fn(dest, in1, in2);
1406 
1407         cond = do_unit_cond(cf, dest, in1, in2);
1408 
1409         if (is_tc) {
1410             TCGv_reg tmp = tcg_temp_new();
1411             cond_prep(&cond);
1412             tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1413             gen_helper_tcond(cpu_env, tmp);
1414             tcg_temp_free(tmp);
1415         }
1416         save_gpr(ctx, rt, dest);
1417 
1418         cond_free(&ctx->null_cond);
1419         ctx->null_cond = cond;
1420     }
1421 }
1422 
1423 #ifndef CONFIG_USER_ONLY
1424 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1425    from the top 2 bits of the base register.  There are a few system
1426    instructions that have a 3-bit space specifier, for which SR0 is
1427    not special.  To handle this, pass ~SP.  */
1428 static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_reg base)
1429 {
1430     TCGv_ptr ptr;
1431     TCGv_reg tmp;
1432     TCGv_i64 spc;
1433 
1434     if (sp != 0) {
1435         if (sp < 0) {
1436             sp = ~sp;
1437         }
1438         spc = get_temp_tl(ctx);
1439         load_spr(ctx, spc, sp);
1440         return spc;
1441     }
1442     if (ctx->tb_flags & TB_FLAG_SR_SAME) {
1443         return cpu_srH;
1444     }
1445 
1446     ptr = tcg_temp_new_ptr();
1447     tmp = tcg_temp_new();
1448     spc = get_temp_tl(ctx);
1449 
1450     tcg_gen_shri_reg(tmp, base, TARGET_REGISTER_BITS - 5);
1451     tcg_gen_andi_reg(tmp, tmp, 030);
1452     tcg_gen_trunc_reg_ptr(ptr, tmp);
1453     tcg_temp_free(tmp);
1454 
1455     tcg_gen_add_ptr(ptr, ptr, cpu_env);
1456     tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
1457     tcg_temp_free_ptr(ptr);
1458 
1459     return spc;
1460 }
1461 #endif
1462 
1463 static void form_gva(DisasContext *ctx, TCGv_tl *pgva, TCGv_reg *pofs,
1464                      unsigned rb, unsigned rx, int scale, target_sreg disp,
1465                      unsigned sp, int modify, bool is_phys)
1466 {
1467     TCGv_reg base = load_gpr(ctx, rb);
1468     TCGv_reg ofs;
1469 
1470     /* Note that RX is mutually exclusive with DISP.  */
1471     if (rx) {
1472         ofs = get_temp(ctx);
1473         tcg_gen_shli_reg(ofs, cpu_gr[rx], scale);
1474         tcg_gen_add_reg(ofs, ofs, base);
1475     } else if (disp || modify) {
1476         ofs = get_temp(ctx);
1477         tcg_gen_addi_reg(ofs, base, disp);
1478     } else {
1479         ofs = base;
1480     }
1481 
1482     *pofs = ofs;
1483 #ifdef CONFIG_USER_ONLY
1484     *pgva = (modify <= 0 ? ofs : base);
1485 #else
1486     TCGv_tl addr = get_temp_tl(ctx);
1487     tcg_gen_extu_reg_tl(addr, modify <= 0 ? ofs : base);
1488     if (ctx->tb_flags & PSW_W) {
1489         tcg_gen_andi_tl(addr, addr, 0x3fffffffffffffffull);
1490     }
1491     if (!is_phys) {
1492         tcg_gen_or_tl(addr, addr, space_select(ctx, sp, base));
1493     }
1494     *pgva = addr;
1495 #endif
1496 }
1497 
1498 /* Emit a memory load.  The modify parameter should be
1499  * < 0 for pre-modify,
1500  * > 0 for post-modify,
1501  * = 0 for no base register update.
1502  */
1503 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
1504                        unsigned rx, int scale, target_sreg disp,
1505                        unsigned sp, int modify, MemOp mop)
1506 {
1507     TCGv_reg ofs;
1508     TCGv_tl addr;
1509 
1510     /* Caller uses nullify_over/nullify_end.  */
1511     assert(ctx->null_cond.c == TCG_COND_NEVER);
1512 
1513     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1514              ctx->mmu_idx == MMU_PHYS_IDX);
1515     tcg_gen_qemu_ld_reg(dest, addr, ctx->mmu_idx, mop);
1516     if (modify) {
1517         save_gpr(ctx, rb, ofs);
1518     }
1519 }
1520 
1521 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1522                        unsigned rx, int scale, target_sreg disp,
1523                        unsigned sp, int modify, MemOp mop)
1524 {
1525     TCGv_reg ofs;
1526     TCGv_tl addr;
1527 
1528     /* Caller uses nullify_over/nullify_end.  */
1529     assert(ctx->null_cond.c == TCG_COND_NEVER);
1530 
1531     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1532              ctx->mmu_idx == MMU_PHYS_IDX);
1533     tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop);
1534     if (modify) {
1535         save_gpr(ctx, rb, ofs);
1536     }
1537 }
1538 
1539 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1540                         unsigned rx, int scale, target_sreg disp,
1541                         unsigned sp, int modify, MemOp mop)
1542 {
1543     TCGv_reg ofs;
1544     TCGv_tl addr;
1545 
1546     /* Caller uses nullify_over/nullify_end.  */
1547     assert(ctx->null_cond.c == TCG_COND_NEVER);
1548 
1549     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1550              ctx->mmu_idx == MMU_PHYS_IDX);
1551     tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop);
1552     if (modify) {
1553         save_gpr(ctx, rb, ofs);
1554     }
1555 }
1556 
1557 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1558                         unsigned rx, int scale, target_sreg disp,
1559                         unsigned sp, int modify, MemOp mop)
1560 {
1561     TCGv_reg ofs;
1562     TCGv_tl addr;
1563 
1564     /* Caller uses nullify_over/nullify_end.  */
1565     assert(ctx->null_cond.c == TCG_COND_NEVER);
1566 
1567     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1568              ctx->mmu_idx == MMU_PHYS_IDX);
1569     tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop);
1570     if (modify) {
1571         save_gpr(ctx, rb, ofs);
1572     }
1573 }
1574 
1575 #if TARGET_REGISTER_BITS == 64
1576 #define do_load_reg   do_load_64
1577 #define do_store_reg  do_store_64
1578 #else
1579 #define do_load_reg   do_load_32
1580 #define do_store_reg  do_store_32
1581 #endif
1582 
1583 static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1584                     unsigned rx, int scale, target_sreg disp,
1585                     unsigned sp, int modify, MemOp mop)
1586 {
1587     TCGv_reg dest;
1588 
1589     nullify_over(ctx);
1590 
1591     if (modify == 0) {
1592         /* No base register update.  */
1593         dest = dest_gpr(ctx, rt);
1594     } else {
1595         /* Make sure if RT == RB, we see the result of the load.  */
1596         dest = get_temp(ctx);
1597     }
1598     do_load_reg(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
1599     save_gpr(ctx, rt, dest);
1600 
1601     return nullify_end(ctx);
1602 }
1603 
1604 static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1605                       unsigned rx, int scale, target_sreg disp,
1606                       unsigned sp, int modify)
1607 {
1608     TCGv_i32 tmp;
1609 
1610     nullify_over(ctx);
1611 
1612     tmp = tcg_temp_new_i32();
1613     do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1614     save_frw_i32(rt, tmp);
1615     tcg_temp_free_i32(tmp);
1616 
1617     if (rt == 0) {
1618         gen_helper_loaded_fr0(cpu_env);
1619     }
1620 
1621     return nullify_end(ctx);
1622 }
1623 
1624 static bool trans_fldw(DisasContext *ctx, arg_ldst *a)
1625 {
1626     return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1627                      a->disp, a->sp, a->m);
1628 }
1629 
1630 static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1631                       unsigned rx, int scale, target_sreg disp,
1632                       unsigned sp, int modify)
1633 {
1634     TCGv_i64 tmp;
1635 
1636     nullify_over(ctx);
1637 
1638     tmp = tcg_temp_new_i64();
1639     do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEQ);
1640     save_frd(rt, tmp);
1641     tcg_temp_free_i64(tmp);
1642 
1643     if (rt == 0) {
1644         gen_helper_loaded_fr0(cpu_env);
1645     }
1646 
1647     return nullify_end(ctx);
1648 }
1649 
1650 static bool trans_fldd(DisasContext *ctx, arg_ldst *a)
1651 {
1652     return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1653                      a->disp, a->sp, a->m);
1654 }
1655 
1656 static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1657                      target_sreg disp, unsigned sp,
1658                      int modify, MemOp mop)
1659 {
1660     nullify_over(ctx);
1661     do_store_reg(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
1662     return nullify_end(ctx);
1663 }
1664 
1665 static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1666                        unsigned rx, int scale, target_sreg disp,
1667                        unsigned sp, int modify)
1668 {
1669     TCGv_i32 tmp;
1670 
1671     nullify_over(ctx);
1672 
1673     tmp = load_frw_i32(rt);
1674     do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1675     tcg_temp_free_i32(tmp);
1676 
1677     return nullify_end(ctx);
1678 }
1679 
1680 static bool trans_fstw(DisasContext *ctx, arg_ldst *a)
1681 {
1682     return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1683                       a->disp, a->sp, a->m);
1684 }
1685 
1686 static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1687                        unsigned rx, int scale, target_sreg disp,
1688                        unsigned sp, int modify)
1689 {
1690     TCGv_i64 tmp;
1691 
1692     nullify_over(ctx);
1693 
1694     tmp = load_frd(rt);
1695     do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEQ);
1696     tcg_temp_free_i64(tmp);
1697 
1698     return nullify_end(ctx);
1699 }
1700 
1701 static bool trans_fstd(DisasContext *ctx, arg_ldst *a)
1702 {
1703     return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1704                       a->disp, a->sp, a->m);
1705 }
1706 
1707 static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
1708                        void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
1709 {
1710     TCGv_i32 tmp;
1711 
1712     nullify_over(ctx);
1713     tmp = load_frw0_i32(ra);
1714 
1715     func(tmp, cpu_env, tmp);
1716 
1717     save_frw_i32(rt, tmp);
1718     tcg_temp_free_i32(tmp);
1719     return nullify_end(ctx);
1720 }
1721 
1722 static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
1723                        void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
1724 {
1725     TCGv_i32 dst;
1726     TCGv_i64 src;
1727 
1728     nullify_over(ctx);
1729     src = load_frd(ra);
1730     dst = tcg_temp_new_i32();
1731 
1732     func(dst, cpu_env, src);
1733 
1734     tcg_temp_free_i64(src);
1735     save_frw_i32(rt, dst);
1736     tcg_temp_free_i32(dst);
1737     return nullify_end(ctx);
1738 }
1739 
1740 static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
1741                        void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
1742 {
1743     TCGv_i64 tmp;
1744 
1745     nullify_over(ctx);
1746     tmp = load_frd0(ra);
1747 
1748     func(tmp, cpu_env, tmp);
1749 
1750     save_frd(rt, tmp);
1751     tcg_temp_free_i64(tmp);
1752     return nullify_end(ctx);
1753 }
1754 
1755 static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
1756                        void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
1757 {
1758     TCGv_i32 src;
1759     TCGv_i64 dst;
1760 
1761     nullify_over(ctx);
1762     src = load_frw0_i32(ra);
1763     dst = tcg_temp_new_i64();
1764 
1765     func(dst, cpu_env, src);
1766 
1767     tcg_temp_free_i32(src);
1768     save_frd(rt, dst);
1769     tcg_temp_free_i64(dst);
1770     return nullify_end(ctx);
1771 }
1772 
1773 static bool do_fop_weww(DisasContext *ctx, unsigned rt,
1774                         unsigned ra, unsigned rb,
1775                         void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
1776 {
1777     TCGv_i32 a, b;
1778 
1779     nullify_over(ctx);
1780     a = load_frw0_i32(ra);
1781     b = load_frw0_i32(rb);
1782 
1783     func(a, cpu_env, a, b);
1784 
1785     tcg_temp_free_i32(b);
1786     save_frw_i32(rt, a);
1787     tcg_temp_free_i32(a);
1788     return nullify_end(ctx);
1789 }
1790 
1791 static bool do_fop_dedd(DisasContext *ctx, unsigned rt,
1792                         unsigned ra, unsigned rb,
1793                         void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
1794 {
1795     TCGv_i64 a, b;
1796 
1797     nullify_over(ctx);
1798     a = load_frd0(ra);
1799     b = load_frd0(rb);
1800 
1801     func(a, cpu_env, a, b);
1802 
1803     tcg_temp_free_i64(b);
1804     save_frd(rt, a);
1805     tcg_temp_free_i64(a);
1806     return nullify_end(ctx);
1807 }
1808 
1809 /* Emit an unconditional branch to a direct target, which may or may not
1810    have already had nullification handled.  */
1811 static bool do_dbranch(DisasContext *ctx, target_ureg dest,
1812                        unsigned link, bool is_n)
1813 {
1814     if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1815         if (link != 0) {
1816             copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1817         }
1818         ctx->iaoq_n = dest;
1819         if (is_n) {
1820             ctx->null_cond.c = TCG_COND_ALWAYS;
1821         }
1822     } else {
1823         nullify_over(ctx);
1824 
1825         if (link != 0) {
1826             copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1827         }
1828 
1829         if (is_n && use_nullify_skip(ctx)) {
1830             nullify_set(ctx, 0);
1831             gen_goto_tb(ctx, 0, dest, dest + 4);
1832         } else {
1833             nullify_set(ctx, is_n);
1834             gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1835         }
1836 
1837         nullify_end(ctx);
1838 
1839         nullify_set(ctx, 0);
1840         gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
1841         ctx->base.is_jmp = DISAS_NORETURN;
1842     }
1843     return true;
1844 }
1845 
1846 /* Emit a conditional branch to a direct target.  If the branch itself
1847    is nullified, we should have already used nullify_over.  */
1848 static bool do_cbranch(DisasContext *ctx, target_sreg disp, bool is_n,
1849                        DisasCond *cond)
1850 {
1851     target_ureg dest = iaoq_dest(ctx, disp);
1852     TCGLabel *taken = NULL;
1853     TCGCond c = cond->c;
1854     bool n;
1855 
1856     assert(ctx->null_cond.c == TCG_COND_NEVER);
1857 
1858     /* Handle TRUE and NEVER as direct branches.  */
1859     if (c == TCG_COND_ALWAYS) {
1860         return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
1861     }
1862     if (c == TCG_COND_NEVER) {
1863         return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
1864     }
1865 
1866     taken = gen_new_label();
1867     cond_prep(cond);
1868     tcg_gen_brcond_reg(c, cond->a0, cond->a1, taken);
1869     cond_free(cond);
1870 
1871     /* Not taken: Condition not satisfied; nullify on backward branches. */
1872     n = is_n && disp < 0;
1873     if (n && use_nullify_skip(ctx)) {
1874         nullify_set(ctx, 0);
1875         gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
1876     } else {
1877         if (!n && ctx->null_lab) {
1878             gen_set_label(ctx->null_lab);
1879             ctx->null_lab = NULL;
1880         }
1881         nullify_set(ctx, n);
1882         if (ctx->iaoq_n == -1) {
1883             /* The temporary iaoq_n_var died at the branch above.
1884                Regenerate it here instead of saving it.  */
1885             tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
1886         }
1887         gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
1888     }
1889 
1890     gen_set_label(taken);
1891 
1892     /* Taken: Condition satisfied; nullify on forward branches.  */
1893     n = is_n && disp >= 0;
1894     if (n && use_nullify_skip(ctx)) {
1895         nullify_set(ctx, 0);
1896         gen_goto_tb(ctx, 1, dest, dest + 4);
1897     } else {
1898         nullify_set(ctx, n);
1899         gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
1900     }
1901 
1902     /* Not taken: the branch itself was nullified.  */
1903     if (ctx->null_lab) {
1904         gen_set_label(ctx->null_lab);
1905         ctx->null_lab = NULL;
1906         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
1907     } else {
1908         ctx->base.is_jmp = DISAS_NORETURN;
1909     }
1910     return true;
1911 }
1912 
1913 /* Emit an unconditional branch to an indirect target.  This handles
1914    nullification of the branch itself.  */
1915 static bool do_ibranch(DisasContext *ctx, TCGv_reg dest,
1916                        unsigned link, bool is_n)
1917 {
1918     TCGv_reg a0, a1, next, tmp;
1919     TCGCond c;
1920 
1921     assert(ctx->null_lab == NULL);
1922 
1923     if (ctx->null_cond.c == TCG_COND_NEVER) {
1924         if (link != 0) {
1925             copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1926         }
1927         next = get_temp(ctx);
1928         tcg_gen_mov_reg(next, dest);
1929         if (is_n) {
1930             if (use_nullify_skip(ctx)) {
1931                 tcg_gen_mov_reg(cpu_iaoq_f, next);
1932                 tcg_gen_addi_reg(cpu_iaoq_b, next, 4);
1933                 nullify_set(ctx, 0);
1934                 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1935                 return true;
1936             }
1937             ctx->null_cond.c = TCG_COND_ALWAYS;
1938         }
1939         ctx->iaoq_n = -1;
1940         ctx->iaoq_n_var = next;
1941     } else if (is_n && use_nullify_skip(ctx)) {
1942         /* The (conditional) branch, B, nullifies the next insn, N,
1943            and we're allowed to skip execution N (no single-step or
1944            tracepoint in effect).  Since the goto_ptr that we must use
1945            for the indirect branch consumes no special resources, we
1946            can (conditionally) skip B and continue execution.  */
1947         /* The use_nullify_skip test implies we have a known control path.  */
1948         tcg_debug_assert(ctx->iaoq_b != -1);
1949         tcg_debug_assert(ctx->iaoq_n != -1);
1950 
1951         /* We do have to handle the non-local temporary, DEST, before
1952            branching.  Since IOAQ_F is not really live at this point, we
1953            can simply store DEST optimistically.  Similarly with IAOQ_B.  */
1954         tcg_gen_mov_reg(cpu_iaoq_f, dest);
1955         tcg_gen_addi_reg(cpu_iaoq_b, dest, 4);
1956 
1957         nullify_over(ctx);
1958         if (link != 0) {
1959             tcg_gen_movi_reg(cpu_gr[link], ctx->iaoq_n);
1960         }
1961         tcg_gen_lookup_and_goto_ptr();
1962         return nullify_end(ctx);
1963     } else {
1964         cond_prep(&ctx->null_cond);
1965         c = ctx->null_cond.c;
1966         a0 = ctx->null_cond.a0;
1967         a1 = ctx->null_cond.a1;
1968 
1969         tmp = tcg_temp_new();
1970         next = get_temp(ctx);
1971 
1972         copy_iaoq_entry(tmp, ctx->iaoq_n, ctx->iaoq_n_var);
1973         tcg_gen_movcond_reg(c, next, a0, a1, tmp, dest);
1974         ctx->iaoq_n = -1;
1975         ctx->iaoq_n_var = next;
1976 
1977         if (link != 0) {
1978             tcg_gen_movcond_reg(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
1979         }
1980 
1981         if (is_n) {
1982             /* The branch nullifies the next insn, which means the state of N
1983                after the branch is the inverse of the state of N that applied
1984                to the branch.  */
1985             tcg_gen_setcond_reg(tcg_invert_cond(c), cpu_psw_n, a0, a1);
1986             cond_free(&ctx->null_cond);
1987             ctx->null_cond = cond_make_n();
1988             ctx->psw_n_nonzero = true;
1989         } else {
1990             cond_free(&ctx->null_cond);
1991         }
1992     }
1993     return true;
1994 }
1995 
1996 /* Implement
1997  *    if (IAOQ_Front{30..31} < GR[b]{30..31})
1998  *      IAOQ_Next{30..31} ← GR[b]{30..31};
1999  *    else
2000  *      IAOQ_Next{30..31} ← IAOQ_Front{30..31};
2001  * which keeps the privilege level from being increased.
2002  */
2003 static TCGv_reg do_ibranch_priv(DisasContext *ctx, TCGv_reg offset)
2004 {
2005     TCGv_reg dest;
2006     switch (ctx->privilege) {
2007     case 0:
2008         /* Privilege 0 is maximum and is allowed to decrease.  */
2009         return offset;
2010     case 3:
2011         /* Privilege 3 is minimum and is never allowed to increase.  */
2012         dest = get_temp(ctx);
2013         tcg_gen_ori_reg(dest, offset, 3);
2014         break;
2015     default:
2016         dest = get_temp(ctx);
2017         tcg_gen_andi_reg(dest, offset, -4);
2018         tcg_gen_ori_reg(dest, dest, ctx->privilege);
2019         tcg_gen_movcond_reg(TCG_COND_GTU, dest, dest, offset, dest, offset);
2020         break;
2021     }
2022     return dest;
2023 }
2024 
2025 #ifdef CONFIG_USER_ONLY
2026 /* On Linux, page zero is normally marked execute only + gateway.
2027    Therefore normal read or write is supposed to fail, but specific
2028    offsets have kernel code mapped to raise permissions to implement
2029    system calls.  Handling this via an explicit check here, rather
2030    in than the "be disp(sr2,r0)" instruction that probably sent us
2031    here, is the easiest way to handle the branch delay slot on the
2032    aforementioned BE.  */
2033 static void do_page_zero(DisasContext *ctx)
2034 {
2035     /* If by some means we get here with PSW[N]=1, that implies that
2036        the B,GATE instruction would be skipped, and we'd fault on the
2037        next insn within the privilaged page.  */
2038     switch (ctx->null_cond.c) {
2039     case TCG_COND_NEVER:
2040         break;
2041     case TCG_COND_ALWAYS:
2042         tcg_gen_movi_reg(cpu_psw_n, 0);
2043         goto do_sigill;
2044     default:
2045         /* Since this is always the first (and only) insn within the
2046            TB, we should know the state of PSW[N] from TB->FLAGS.  */
2047         g_assert_not_reached();
2048     }
2049 
2050     /* Check that we didn't arrive here via some means that allowed
2051        non-sequential instruction execution.  Normally the PSW[B] bit
2052        detects this by disallowing the B,GATE instruction to execute
2053        under such conditions.  */
2054     if (ctx->iaoq_b != ctx->iaoq_f + 4) {
2055         goto do_sigill;
2056     }
2057 
2058     switch (ctx->iaoq_f & -4) {
2059     case 0x00: /* Null pointer call */
2060         gen_excp_1(EXCP_IMP);
2061         ctx->base.is_jmp = DISAS_NORETURN;
2062         break;
2063 
2064     case 0xb0: /* LWS */
2065         gen_excp_1(EXCP_SYSCALL_LWS);
2066         ctx->base.is_jmp = DISAS_NORETURN;
2067         break;
2068 
2069     case 0xe0: /* SET_THREAD_POINTER */
2070         tcg_gen_st_reg(cpu_gr[26], cpu_env, offsetof(CPUHPPAState, cr[27]));
2071         tcg_gen_ori_reg(cpu_iaoq_f, cpu_gr[31], 3);
2072         tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
2073         ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
2074         break;
2075 
2076     case 0x100: /* SYSCALL */
2077         gen_excp_1(EXCP_SYSCALL);
2078         ctx->base.is_jmp = DISAS_NORETURN;
2079         break;
2080 
2081     default:
2082     do_sigill:
2083         gen_excp_1(EXCP_ILL);
2084         ctx->base.is_jmp = DISAS_NORETURN;
2085         break;
2086     }
2087 }
2088 #endif
2089 
2090 static bool trans_nop(DisasContext *ctx, arg_nop *a)
2091 {
2092     cond_free(&ctx->null_cond);
2093     return true;
2094 }
2095 
2096 static bool trans_break(DisasContext *ctx, arg_break *a)
2097 {
2098     return gen_excp_iir(ctx, EXCP_BREAK);
2099 }
2100 
2101 static bool trans_sync(DisasContext *ctx, arg_sync *a)
2102 {
2103     /* No point in nullifying the memory barrier.  */
2104     tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
2105 
2106     cond_free(&ctx->null_cond);
2107     return true;
2108 }
2109 
2110 static bool trans_mfia(DisasContext *ctx, arg_mfia *a)
2111 {
2112     unsigned rt = a->t;
2113     TCGv_reg tmp = dest_gpr(ctx, rt);
2114     tcg_gen_movi_reg(tmp, ctx->iaoq_f);
2115     save_gpr(ctx, rt, tmp);
2116 
2117     cond_free(&ctx->null_cond);
2118     return true;
2119 }
2120 
2121 static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a)
2122 {
2123     unsigned rt = a->t;
2124     unsigned rs = a->sp;
2125     TCGv_i64 t0 = tcg_temp_new_i64();
2126     TCGv_reg t1 = tcg_temp_new();
2127 
2128     load_spr(ctx, t0, rs);
2129     tcg_gen_shri_i64(t0, t0, 32);
2130     tcg_gen_trunc_i64_reg(t1, t0);
2131 
2132     save_gpr(ctx, rt, t1);
2133     tcg_temp_free(t1);
2134     tcg_temp_free_i64(t0);
2135 
2136     cond_free(&ctx->null_cond);
2137     return true;
2138 }
2139 
2140 static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
2141 {
2142     unsigned rt = a->t;
2143     unsigned ctl = a->r;
2144     TCGv_reg tmp;
2145 
2146     switch (ctl) {
2147     case CR_SAR:
2148 #ifdef TARGET_HPPA64
2149         if (a->e == 0) {
2150             /* MFSAR without ,W masks low 5 bits.  */
2151             tmp = dest_gpr(ctx, rt);
2152             tcg_gen_andi_reg(tmp, cpu_sar, 31);
2153             save_gpr(ctx, rt, tmp);
2154             goto done;
2155         }
2156 #endif
2157         save_gpr(ctx, rt, cpu_sar);
2158         goto done;
2159     case CR_IT: /* Interval Timer */
2160         /* FIXME: Respect PSW_S bit.  */
2161         nullify_over(ctx);
2162         tmp = dest_gpr(ctx, rt);
2163         if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
2164             gen_io_start();
2165             gen_helper_read_interval_timer(tmp);
2166             ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2167         } else {
2168             gen_helper_read_interval_timer(tmp);
2169         }
2170         save_gpr(ctx, rt, tmp);
2171         return nullify_end(ctx);
2172     case 26:
2173     case 27:
2174         break;
2175     default:
2176         /* All other control registers are privileged.  */
2177         CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2178         break;
2179     }
2180 
2181     tmp = get_temp(ctx);
2182     tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2183     save_gpr(ctx, rt, tmp);
2184 
2185  done:
2186     cond_free(&ctx->null_cond);
2187     return true;
2188 }
2189 
2190 static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a)
2191 {
2192     unsigned rr = a->r;
2193     unsigned rs = a->sp;
2194     TCGv_i64 t64;
2195 
2196     if (rs >= 5) {
2197         CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2198     }
2199     nullify_over(ctx);
2200 
2201     t64 = tcg_temp_new_i64();
2202     tcg_gen_extu_reg_i64(t64, load_gpr(ctx, rr));
2203     tcg_gen_shli_i64(t64, t64, 32);
2204 
2205     if (rs >= 4) {
2206         tcg_gen_st_i64(t64, cpu_env, offsetof(CPUHPPAState, sr[rs]));
2207         ctx->tb_flags &= ~TB_FLAG_SR_SAME;
2208     } else {
2209         tcg_gen_mov_i64(cpu_sr[rs], t64);
2210     }
2211     tcg_temp_free_i64(t64);
2212 
2213     return nullify_end(ctx);
2214 }
2215 
2216 static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
2217 {
2218     unsigned ctl = a->t;
2219     TCGv_reg reg;
2220     TCGv_reg tmp;
2221 
2222     if (ctl == CR_SAR) {
2223         reg = load_gpr(ctx, a->r);
2224         tmp = tcg_temp_new();
2225         tcg_gen_andi_reg(tmp, reg, TARGET_REGISTER_BITS - 1);
2226         save_or_nullify(ctx, cpu_sar, tmp);
2227         tcg_temp_free(tmp);
2228 
2229         cond_free(&ctx->null_cond);
2230         return true;
2231     }
2232 
2233     /* All other control registers are privileged or read-only.  */
2234     CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2235 
2236 #ifndef CONFIG_USER_ONLY
2237     nullify_over(ctx);
2238     reg = load_gpr(ctx, a->r);
2239 
2240     switch (ctl) {
2241     case CR_IT:
2242         gen_helper_write_interval_timer(cpu_env, reg);
2243         break;
2244     case CR_EIRR:
2245         gen_helper_write_eirr(cpu_env, reg);
2246         break;
2247     case CR_EIEM:
2248         gen_helper_write_eiem(cpu_env, reg);
2249         ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2250         break;
2251 
2252     case CR_IIASQ:
2253     case CR_IIAOQ:
2254         /* FIXME: Respect PSW_Q bit */
2255         /* The write advances the queue and stores to the back element.  */
2256         tmp = get_temp(ctx);
2257         tcg_gen_ld_reg(tmp, cpu_env,
2258                        offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2259         tcg_gen_st_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2260         tcg_gen_st_reg(reg, cpu_env,
2261                        offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2262         break;
2263 
2264     case CR_PID1:
2265     case CR_PID2:
2266     case CR_PID3:
2267     case CR_PID4:
2268         tcg_gen_st_reg(reg, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2269 #ifndef CONFIG_USER_ONLY
2270         gen_helper_change_prot_id(cpu_env);
2271 #endif
2272         break;
2273 
2274     default:
2275         tcg_gen_st_reg(reg, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2276         break;
2277     }
2278     return nullify_end(ctx);
2279 #endif
2280 }
2281 
2282 static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a)
2283 {
2284     TCGv_reg tmp = tcg_temp_new();
2285 
2286     tcg_gen_not_reg(tmp, load_gpr(ctx, a->r));
2287     tcg_gen_andi_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
2288     save_or_nullify(ctx, cpu_sar, tmp);
2289     tcg_temp_free(tmp);
2290 
2291     cond_free(&ctx->null_cond);
2292     return true;
2293 }
2294 
2295 static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a)
2296 {
2297     TCGv_reg dest = dest_gpr(ctx, a->t);
2298 
2299 #ifdef CONFIG_USER_ONLY
2300     /* We don't implement space registers in user mode. */
2301     tcg_gen_movi_reg(dest, 0);
2302 #else
2303     TCGv_i64 t0 = tcg_temp_new_i64();
2304 
2305     tcg_gen_mov_i64(t0, space_select(ctx, a->sp, load_gpr(ctx, a->b)));
2306     tcg_gen_shri_i64(t0, t0, 32);
2307     tcg_gen_trunc_i64_reg(dest, t0);
2308 
2309     tcg_temp_free_i64(t0);
2310 #endif
2311     save_gpr(ctx, a->t, dest);
2312 
2313     cond_free(&ctx->null_cond);
2314     return true;
2315 }
2316 
2317 static bool trans_rsm(DisasContext *ctx, arg_rsm *a)
2318 {
2319     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2320 #ifndef CONFIG_USER_ONLY
2321     TCGv_reg tmp;
2322 
2323     nullify_over(ctx);
2324 
2325     tmp = get_temp(ctx);
2326     tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw));
2327     tcg_gen_andi_reg(tmp, tmp, ~a->i);
2328     gen_helper_swap_system_mask(tmp, cpu_env, tmp);
2329     save_gpr(ctx, a->t, tmp);
2330 
2331     /* Exit the TB to recognize new interrupts, e.g. PSW_M.  */
2332     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2333     return nullify_end(ctx);
2334 #endif
2335 }
2336 
2337 static bool trans_ssm(DisasContext *ctx, arg_ssm *a)
2338 {
2339     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2340 #ifndef CONFIG_USER_ONLY
2341     TCGv_reg tmp;
2342 
2343     nullify_over(ctx);
2344 
2345     tmp = get_temp(ctx);
2346     tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw));
2347     tcg_gen_ori_reg(tmp, tmp, a->i);
2348     gen_helper_swap_system_mask(tmp, cpu_env, tmp);
2349     save_gpr(ctx, a->t, tmp);
2350 
2351     /* Exit the TB to recognize new interrupts, e.g. PSW_I.  */
2352     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2353     return nullify_end(ctx);
2354 #endif
2355 }
2356 
2357 static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a)
2358 {
2359     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2360 #ifndef CONFIG_USER_ONLY
2361     TCGv_reg tmp, reg;
2362     nullify_over(ctx);
2363 
2364     reg = load_gpr(ctx, a->r);
2365     tmp = get_temp(ctx);
2366     gen_helper_swap_system_mask(tmp, cpu_env, reg);
2367 
2368     /* Exit the TB to recognize new interrupts.  */
2369     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2370     return nullify_end(ctx);
2371 #endif
2372 }
2373 
2374 static bool do_rfi(DisasContext *ctx, bool rfi_r)
2375 {
2376     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2377 #ifndef CONFIG_USER_ONLY
2378     nullify_over(ctx);
2379 
2380     if (rfi_r) {
2381         gen_helper_rfi_r(cpu_env);
2382     } else {
2383         gen_helper_rfi(cpu_env);
2384     }
2385     /* Exit the TB to recognize new interrupts.  */
2386     if (ctx->base.singlestep_enabled) {
2387         gen_excp_1(EXCP_DEBUG);
2388     } else {
2389         tcg_gen_exit_tb(NULL, 0);
2390     }
2391     ctx->base.is_jmp = DISAS_NORETURN;
2392 
2393     return nullify_end(ctx);
2394 #endif
2395 }
2396 
2397 static bool trans_rfi(DisasContext *ctx, arg_rfi *a)
2398 {
2399     return do_rfi(ctx, false);
2400 }
2401 
2402 static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a)
2403 {
2404     return do_rfi(ctx, true);
2405 }
2406 
2407 static bool trans_halt(DisasContext *ctx, arg_halt *a)
2408 {
2409     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2410 #ifndef CONFIG_USER_ONLY
2411     nullify_over(ctx);
2412     gen_helper_halt(cpu_env);
2413     ctx->base.is_jmp = DISAS_NORETURN;
2414     return nullify_end(ctx);
2415 #endif
2416 }
2417 
2418 static bool trans_reset(DisasContext *ctx, arg_reset *a)
2419 {
2420     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2421 #ifndef CONFIG_USER_ONLY
2422     nullify_over(ctx);
2423     gen_helper_reset(cpu_env);
2424     ctx->base.is_jmp = DISAS_NORETURN;
2425     return nullify_end(ctx);
2426 #endif
2427 }
2428 
2429 static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a)
2430 {
2431     if (a->m) {
2432         TCGv_reg dest = dest_gpr(ctx, a->b);
2433         TCGv_reg src1 = load_gpr(ctx, a->b);
2434         TCGv_reg src2 = load_gpr(ctx, a->x);
2435 
2436         /* The only thing we need to do is the base register modification.  */
2437         tcg_gen_add_reg(dest, src1, src2);
2438         save_gpr(ctx, a->b, dest);
2439     }
2440     cond_free(&ctx->null_cond);
2441     return true;
2442 }
2443 
2444 static bool trans_probe(DisasContext *ctx, arg_probe *a)
2445 {
2446     TCGv_reg dest, ofs;
2447     TCGv_i32 level, want;
2448     TCGv_tl addr;
2449 
2450     nullify_over(ctx);
2451 
2452     dest = dest_gpr(ctx, a->t);
2453     form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2454 
2455     if (a->imm) {
2456         level = tcg_const_i32(a->ri);
2457     } else {
2458         level = tcg_temp_new_i32();
2459         tcg_gen_trunc_reg_i32(level, load_gpr(ctx, a->ri));
2460         tcg_gen_andi_i32(level, level, 3);
2461     }
2462     want = tcg_const_i32(a->write ? PAGE_WRITE : PAGE_READ);
2463 
2464     gen_helper_probe(dest, cpu_env, addr, level, want);
2465 
2466     tcg_temp_free_i32(want);
2467     tcg_temp_free_i32(level);
2468 
2469     save_gpr(ctx, a->t, dest);
2470     return nullify_end(ctx);
2471 }
2472 
2473 static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a)
2474 {
2475     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2476 #ifndef CONFIG_USER_ONLY
2477     TCGv_tl addr;
2478     TCGv_reg ofs, reg;
2479 
2480     nullify_over(ctx);
2481 
2482     form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2483     reg = load_gpr(ctx, a->r);
2484     if (a->addr) {
2485         gen_helper_itlba(cpu_env, addr, reg);
2486     } else {
2487         gen_helper_itlbp(cpu_env, addr, reg);
2488     }
2489 
2490     /* Exit TB for TLB change if mmu is enabled.  */
2491     if (ctx->tb_flags & PSW_C) {
2492         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2493     }
2494     return nullify_end(ctx);
2495 #endif
2496 }
2497 
2498 static bool trans_pxtlbx(DisasContext *ctx, arg_pxtlbx *a)
2499 {
2500     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2501 #ifndef CONFIG_USER_ONLY
2502     TCGv_tl addr;
2503     TCGv_reg ofs;
2504 
2505     nullify_over(ctx);
2506 
2507     form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2508     if (a->m) {
2509         save_gpr(ctx, a->b, ofs);
2510     }
2511     if (a->local) {
2512         gen_helper_ptlbe(cpu_env);
2513     } else {
2514         gen_helper_ptlb(cpu_env, addr);
2515     }
2516 
2517     /* Exit TB for TLB change if mmu is enabled.  */
2518     if (ctx->tb_flags & PSW_C) {
2519         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2520     }
2521     return nullify_end(ctx);
2522 #endif
2523 }
2524 
2525 /*
2526  * Implement the pcxl and pcxl2 Fast TLB Insert instructions.
2527  * See
2528  *     https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf
2529  *     page 13-9 (195/206)
2530  */
2531 static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a)
2532 {
2533     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2534 #ifndef CONFIG_USER_ONLY
2535     TCGv_tl addr, atl, stl;
2536     TCGv_reg reg;
2537 
2538     nullify_over(ctx);
2539 
2540     /*
2541      * FIXME:
2542      *  if (not (pcxl or pcxl2))
2543      *    return gen_illegal(ctx);
2544      *
2545      * Note for future: these are 32-bit systems; no hppa64.
2546      */
2547 
2548     atl = tcg_temp_new_tl();
2549     stl = tcg_temp_new_tl();
2550     addr = tcg_temp_new_tl();
2551 
2552     tcg_gen_ld32u_i64(stl, cpu_env,
2553                       a->data ? offsetof(CPUHPPAState, cr[CR_ISR])
2554                       : offsetof(CPUHPPAState, cr[CR_IIASQ]));
2555     tcg_gen_ld32u_i64(atl, cpu_env,
2556                       a->data ? offsetof(CPUHPPAState, cr[CR_IOR])
2557                       : offsetof(CPUHPPAState, cr[CR_IIAOQ]));
2558     tcg_gen_shli_i64(stl, stl, 32);
2559     tcg_gen_or_tl(addr, atl, stl);
2560     tcg_temp_free_tl(atl);
2561     tcg_temp_free_tl(stl);
2562 
2563     reg = load_gpr(ctx, a->r);
2564     if (a->addr) {
2565         gen_helper_itlba(cpu_env, addr, reg);
2566     } else {
2567         gen_helper_itlbp(cpu_env, addr, reg);
2568     }
2569     tcg_temp_free_tl(addr);
2570 
2571     /* Exit TB for TLB change if mmu is enabled.  */
2572     if (ctx->tb_flags & PSW_C) {
2573         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2574     }
2575     return nullify_end(ctx);
2576 #endif
2577 }
2578 
2579 static bool trans_lpa(DisasContext *ctx, arg_ldst *a)
2580 {
2581     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2582 #ifndef CONFIG_USER_ONLY
2583     TCGv_tl vaddr;
2584     TCGv_reg ofs, paddr;
2585 
2586     nullify_over(ctx);
2587 
2588     form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2589 
2590     paddr = tcg_temp_new();
2591     gen_helper_lpa(paddr, cpu_env, vaddr);
2592 
2593     /* Note that physical address result overrides base modification.  */
2594     if (a->m) {
2595         save_gpr(ctx, a->b, ofs);
2596     }
2597     save_gpr(ctx, a->t, paddr);
2598     tcg_temp_free(paddr);
2599 
2600     return nullify_end(ctx);
2601 #endif
2602 }
2603 
2604 static bool trans_lci(DisasContext *ctx, arg_lci *a)
2605 {
2606     TCGv_reg ci;
2607 
2608     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2609 
2610     /* The Coherence Index is an implementation-defined function of the
2611        physical address.  Two addresses with the same CI have a coherent
2612        view of the cache.  Our implementation is to return 0 for all,
2613        since the entire address space is coherent.  */
2614     ci = tcg_const_reg(0);
2615     save_gpr(ctx, a->t, ci);
2616     tcg_temp_free(ci);
2617 
2618     cond_free(&ctx->null_cond);
2619     return true;
2620 }
2621 
2622 static bool trans_add(DisasContext *ctx, arg_rrr_cf_sh *a)
2623 {
2624     return do_add_reg(ctx, a, false, false, false, false);
2625 }
2626 
2627 static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_sh *a)
2628 {
2629     return do_add_reg(ctx, a, true, false, false, false);
2630 }
2631 
2632 static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_sh *a)
2633 {
2634     return do_add_reg(ctx, a, false, true, false, false);
2635 }
2636 
2637 static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_sh *a)
2638 {
2639     return do_add_reg(ctx, a, false, false, false, true);
2640 }
2641 
2642 static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_sh *a)
2643 {
2644     return do_add_reg(ctx, a, false, true, false, true);
2645 }
2646 
2647 static bool trans_sub(DisasContext *ctx, arg_rrr_cf *a)
2648 {
2649     return do_sub_reg(ctx, a, false, false, false);
2650 }
2651 
2652 static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf *a)
2653 {
2654     return do_sub_reg(ctx, a, true, false, false);
2655 }
2656 
2657 static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf *a)
2658 {
2659     return do_sub_reg(ctx, a, false, false, true);
2660 }
2661 
2662 static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf *a)
2663 {
2664     return do_sub_reg(ctx, a, true, false, true);
2665 }
2666 
2667 static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf *a)
2668 {
2669     return do_sub_reg(ctx, a, false, true, false);
2670 }
2671 
2672 static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf *a)
2673 {
2674     return do_sub_reg(ctx, a, true, true, false);
2675 }
2676 
2677 static bool trans_andcm(DisasContext *ctx, arg_rrr_cf *a)
2678 {
2679     return do_log_reg(ctx, a, tcg_gen_andc_reg);
2680 }
2681 
2682 static bool trans_and(DisasContext *ctx, arg_rrr_cf *a)
2683 {
2684     return do_log_reg(ctx, a, tcg_gen_and_reg);
2685 }
2686 
2687 static bool trans_or(DisasContext *ctx, arg_rrr_cf *a)
2688 {
2689     if (a->cf == 0) {
2690         unsigned r2 = a->r2;
2691         unsigned r1 = a->r1;
2692         unsigned rt = a->t;
2693 
2694         if (rt == 0) { /* NOP */
2695             cond_free(&ctx->null_cond);
2696             return true;
2697         }
2698         if (r2 == 0) { /* COPY */
2699             if (r1 == 0) {
2700                 TCGv_reg dest = dest_gpr(ctx, rt);
2701                 tcg_gen_movi_reg(dest, 0);
2702                 save_gpr(ctx, rt, dest);
2703             } else {
2704                 save_gpr(ctx, rt, cpu_gr[r1]);
2705             }
2706             cond_free(&ctx->null_cond);
2707             return true;
2708         }
2709 #ifndef CONFIG_USER_ONLY
2710         /* These are QEMU extensions and are nops in the real architecture:
2711          *
2712          * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2713          * or %r31,%r31,%r31 -- death loop; offline cpu
2714          *                      currently implemented as idle.
2715          */
2716         if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */
2717             TCGv_i32 tmp;
2718 
2719             /* No need to check for supervisor, as userland can only pause
2720                until the next timer interrupt.  */
2721             nullify_over(ctx);
2722 
2723             /* Advance the instruction queue.  */
2724             copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
2725             copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
2726             nullify_set(ctx, 0);
2727 
2728             /* Tell the qemu main loop to halt until this cpu has work.  */
2729             tmp = tcg_const_i32(1);
2730             tcg_gen_st_i32(tmp, cpu_env, -offsetof(HPPACPU, env) +
2731                                          offsetof(CPUState, halted));
2732             tcg_temp_free_i32(tmp);
2733             gen_excp_1(EXCP_HALTED);
2734             ctx->base.is_jmp = DISAS_NORETURN;
2735 
2736             return nullify_end(ctx);
2737         }
2738 #endif
2739     }
2740     return do_log_reg(ctx, a, tcg_gen_or_reg);
2741 }
2742 
2743 static bool trans_xor(DisasContext *ctx, arg_rrr_cf *a)
2744 {
2745     return do_log_reg(ctx, a, tcg_gen_xor_reg);
2746 }
2747 
2748 static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf *a)
2749 {
2750     TCGv_reg tcg_r1, tcg_r2;
2751 
2752     if (a->cf) {
2753         nullify_over(ctx);
2754     }
2755     tcg_r1 = load_gpr(ctx, a->r1);
2756     tcg_r2 = load_gpr(ctx, a->r2);
2757     do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf);
2758     return nullify_end(ctx);
2759 }
2760 
2761 static bool trans_uxor(DisasContext *ctx, arg_rrr_cf *a)
2762 {
2763     TCGv_reg tcg_r1, tcg_r2;
2764 
2765     if (a->cf) {
2766         nullify_over(ctx);
2767     }
2768     tcg_r1 = load_gpr(ctx, a->r1);
2769     tcg_r2 = load_gpr(ctx, a->r2);
2770     do_unit(ctx, a->t, tcg_r1, tcg_r2, a->cf, false, tcg_gen_xor_reg);
2771     return nullify_end(ctx);
2772 }
2773 
2774 static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf *a, bool is_tc)
2775 {
2776     TCGv_reg tcg_r1, tcg_r2, tmp;
2777 
2778     if (a->cf) {
2779         nullify_over(ctx);
2780     }
2781     tcg_r1 = load_gpr(ctx, a->r1);
2782     tcg_r2 = load_gpr(ctx, a->r2);
2783     tmp = get_temp(ctx);
2784     tcg_gen_not_reg(tmp, tcg_r2);
2785     do_unit(ctx, a->t, tcg_r1, tmp, a->cf, is_tc, tcg_gen_add_reg);
2786     return nullify_end(ctx);
2787 }
2788 
2789 static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf *a)
2790 {
2791     return do_uaddcm(ctx, a, false);
2792 }
2793 
2794 static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf *a)
2795 {
2796     return do_uaddcm(ctx, a, true);
2797 }
2798 
2799 static bool do_dcor(DisasContext *ctx, arg_rr_cf *a, bool is_i)
2800 {
2801     TCGv_reg tmp;
2802 
2803     nullify_over(ctx);
2804 
2805     tmp = get_temp(ctx);
2806     tcg_gen_shri_reg(tmp, cpu_psw_cb, 3);
2807     if (!is_i) {
2808         tcg_gen_not_reg(tmp, tmp);
2809     }
2810     tcg_gen_andi_reg(tmp, tmp, 0x11111111);
2811     tcg_gen_muli_reg(tmp, tmp, 6);
2812     do_unit(ctx, a->t, load_gpr(ctx, a->r), tmp, a->cf, false,
2813             is_i ? tcg_gen_add_reg : tcg_gen_sub_reg);
2814     return nullify_end(ctx);
2815 }
2816 
2817 static bool trans_dcor(DisasContext *ctx, arg_rr_cf *a)
2818 {
2819     return do_dcor(ctx, a, false);
2820 }
2821 
2822 static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf *a)
2823 {
2824     return do_dcor(ctx, a, true);
2825 }
2826 
2827 static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a)
2828 {
2829     TCGv_reg dest, add1, add2, addc, zero, in1, in2;
2830 
2831     nullify_over(ctx);
2832 
2833     in1 = load_gpr(ctx, a->r1);
2834     in2 = load_gpr(ctx, a->r2);
2835 
2836     add1 = tcg_temp_new();
2837     add2 = tcg_temp_new();
2838     addc = tcg_temp_new();
2839     dest = tcg_temp_new();
2840     zero = tcg_const_reg(0);
2841 
2842     /* Form R1 << 1 | PSW[CB]{8}.  */
2843     tcg_gen_add_reg(add1, in1, in1);
2844     tcg_gen_add_reg(add1, add1, cpu_psw_cb_msb);
2845 
2846     /* Add or subtract R2, depending on PSW[V].  Proper computation of
2847        carry{8} requires that we subtract via + ~R2 + 1, as described in
2848        the manual.  By extracting and masking V, we can produce the
2849        proper inputs to the addition without movcond.  */
2850     tcg_gen_sari_reg(addc, cpu_psw_v, TARGET_REGISTER_BITS - 1);
2851     tcg_gen_xor_reg(add2, in2, addc);
2852     tcg_gen_andi_reg(addc, addc, 1);
2853     /* ??? This is only correct for 32-bit.  */
2854     tcg_gen_add2_i32(dest, cpu_psw_cb_msb, add1, zero, add2, zero);
2855     tcg_gen_add2_i32(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero);
2856 
2857     tcg_temp_free(addc);
2858     tcg_temp_free(zero);
2859 
2860     /* Write back the result register.  */
2861     save_gpr(ctx, a->t, dest);
2862 
2863     /* Write back PSW[CB].  */
2864     tcg_gen_xor_reg(cpu_psw_cb, add1, add2);
2865     tcg_gen_xor_reg(cpu_psw_cb, cpu_psw_cb, dest);
2866 
2867     /* Write back PSW[V] for the division step.  */
2868     tcg_gen_neg_reg(cpu_psw_v, cpu_psw_cb_msb);
2869     tcg_gen_xor_reg(cpu_psw_v, cpu_psw_v, in2);
2870 
2871     /* Install the new nullification.  */
2872     if (a->cf) {
2873         TCGv_reg sv = NULL;
2874         if (cond_need_sv(a->cf >> 1)) {
2875             /* ??? The lshift is supposed to contribute to overflow.  */
2876             sv = do_add_sv(ctx, dest, add1, add2);
2877         }
2878         ctx->null_cond = do_cond(a->cf, dest, cpu_psw_cb_msb, sv);
2879     }
2880 
2881     tcg_temp_free(add1);
2882     tcg_temp_free(add2);
2883     tcg_temp_free(dest);
2884 
2885     return nullify_end(ctx);
2886 }
2887 
2888 static bool trans_addi(DisasContext *ctx, arg_rri_cf *a)
2889 {
2890     return do_add_imm(ctx, a, false, false);
2891 }
2892 
2893 static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a)
2894 {
2895     return do_add_imm(ctx, a, true, false);
2896 }
2897 
2898 static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a)
2899 {
2900     return do_add_imm(ctx, a, false, true);
2901 }
2902 
2903 static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a)
2904 {
2905     return do_add_imm(ctx, a, true, true);
2906 }
2907 
2908 static bool trans_subi(DisasContext *ctx, arg_rri_cf *a)
2909 {
2910     return do_sub_imm(ctx, a, false);
2911 }
2912 
2913 static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a)
2914 {
2915     return do_sub_imm(ctx, a, true);
2916 }
2917 
2918 static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf *a)
2919 {
2920     TCGv_reg tcg_im, tcg_r2;
2921 
2922     if (a->cf) {
2923         nullify_over(ctx);
2924     }
2925 
2926     tcg_im = load_const(ctx, a->i);
2927     tcg_r2 = load_gpr(ctx, a->r);
2928     do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf);
2929 
2930     return nullify_end(ctx);
2931 }
2932 
2933 static bool trans_ld(DisasContext *ctx, arg_ldst *a)
2934 {
2935     return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0,
2936                    a->disp, a->sp, a->m, a->size | MO_TE);
2937 }
2938 
2939 static bool trans_st(DisasContext *ctx, arg_ldst *a)
2940 {
2941     assert(a->x == 0 && a->scale == 0);
2942     return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE);
2943 }
2944 
2945 static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
2946 {
2947     MemOp mop = MO_TE | MO_ALIGN | a->size;
2948     TCGv_reg zero, dest, ofs;
2949     TCGv_tl addr;
2950 
2951     nullify_over(ctx);
2952 
2953     if (a->m) {
2954         /* Base register modification.  Make sure if RT == RB,
2955            we see the result of the load.  */
2956         dest = get_temp(ctx);
2957     } else {
2958         dest = dest_gpr(ctx, a->t);
2959     }
2960 
2961     form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? a->size : 0,
2962              a->disp, a->sp, a->m, ctx->mmu_idx == MMU_PHYS_IDX);
2963 
2964     /*
2965      * For hppa1.1, LDCW is undefined unless aligned mod 16.
2966      * However actual hardware succeeds with aligned mod 4.
2967      * Detect this case and log a GUEST_ERROR.
2968      *
2969      * TODO: HPPA64 relaxes the over-alignment requirement
2970      * with the ,co completer.
2971      */
2972     gen_helper_ldc_check(addr);
2973 
2974     zero = tcg_const_reg(0);
2975     tcg_gen_atomic_xchg_reg(dest, addr, zero, ctx->mmu_idx, mop);
2976     tcg_temp_free(zero);
2977 
2978     if (a->m) {
2979         save_gpr(ctx, a->b, ofs);
2980     }
2981     save_gpr(ctx, a->t, dest);
2982 
2983     return nullify_end(ctx);
2984 }
2985 
2986 static bool trans_stby(DisasContext *ctx, arg_stby *a)
2987 {
2988     TCGv_reg ofs, val;
2989     TCGv_tl addr;
2990 
2991     nullify_over(ctx);
2992 
2993     form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
2994              ctx->mmu_idx == MMU_PHYS_IDX);
2995     val = load_gpr(ctx, a->r);
2996     if (a->a) {
2997         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2998             gen_helper_stby_e_parallel(cpu_env, addr, val);
2999         } else {
3000             gen_helper_stby_e(cpu_env, addr, val);
3001         }
3002     } else {
3003         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3004             gen_helper_stby_b_parallel(cpu_env, addr, val);
3005         } else {
3006             gen_helper_stby_b(cpu_env, addr, val);
3007         }
3008     }
3009     if (a->m) {
3010         tcg_gen_andi_reg(ofs, ofs, ~3);
3011         save_gpr(ctx, a->b, ofs);
3012     }
3013 
3014     return nullify_end(ctx);
3015 }
3016 
3017 static bool trans_lda(DisasContext *ctx, arg_ldst *a)
3018 {
3019     int hold_mmu_idx = ctx->mmu_idx;
3020 
3021     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3022     ctx->mmu_idx = MMU_PHYS_IDX;
3023     trans_ld(ctx, a);
3024     ctx->mmu_idx = hold_mmu_idx;
3025     return true;
3026 }
3027 
3028 static bool trans_sta(DisasContext *ctx, arg_ldst *a)
3029 {
3030     int hold_mmu_idx = ctx->mmu_idx;
3031 
3032     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3033     ctx->mmu_idx = MMU_PHYS_IDX;
3034     trans_st(ctx, a);
3035     ctx->mmu_idx = hold_mmu_idx;
3036     return true;
3037 }
3038 
3039 static bool trans_ldil(DisasContext *ctx, arg_ldil *a)
3040 {
3041     TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
3042 
3043     tcg_gen_movi_reg(tcg_rt, a->i);
3044     save_gpr(ctx, a->t, tcg_rt);
3045     cond_free(&ctx->null_cond);
3046     return true;
3047 }
3048 
3049 static bool trans_addil(DisasContext *ctx, arg_addil *a)
3050 {
3051     TCGv_reg tcg_rt = load_gpr(ctx, a->r);
3052     TCGv_reg tcg_r1 = dest_gpr(ctx, 1);
3053 
3054     tcg_gen_addi_reg(tcg_r1, tcg_rt, a->i);
3055     save_gpr(ctx, 1, tcg_r1);
3056     cond_free(&ctx->null_cond);
3057     return true;
3058 }
3059 
3060 static bool trans_ldo(DisasContext *ctx, arg_ldo *a)
3061 {
3062     TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
3063 
3064     /* Special case rb == 0, for the LDI pseudo-op.
3065        The COPY pseudo-op is handled for free within tcg_gen_addi_tl.  */
3066     if (a->b == 0) {
3067         tcg_gen_movi_reg(tcg_rt, a->i);
3068     } else {
3069         tcg_gen_addi_reg(tcg_rt, cpu_gr[a->b], a->i);
3070     }
3071     save_gpr(ctx, a->t, tcg_rt);
3072     cond_free(&ctx->null_cond);
3073     return true;
3074 }
3075 
3076 static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_reg in1,
3077                     unsigned c, unsigned f, unsigned n, int disp)
3078 {
3079     TCGv_reg dest, in2, sv;
3080     DisasCond cond;
3081 
3082     in2 = load_gpr(ctx, r);
3083     dest = get_temp(ctx);
3084 
3085     tcg_gen_sub_reg(dest, in1, in2);
3086 
3087     sv = NULL;
3088     if (cond_need_sv(c)) {
3089         sv = do_sub_sv(ctx, dest, in1, in2);
3090     }
3091 
3092     cond = do_sub_cond(c * 2 + f, dest, in1, in2, sv);
3093     return do_cbranch(ctx, disp, n, &cond);
3094 }
3095 
3096 static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a)
3097 {
3098     nullify_over(ctx);
3099     return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3100 }
3101 
3102 static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a)
3103 {
3104     nullify_over(ctx);
3105     return do_cmpb(ctx, a->r, load_const(ctx, a->i), a->c, a->f, a->n, a->disp);
3106 }
3107 
3108 static bool do_addb(DisasContext *ctx, unsigned r, TCGv_reg in1,
3109                     unsigned c, unsigned f, unsigned n, int disp)
3110 {
3111     TCGv_reg dest, in2, sv, cb_msb;
3112     DisasCond cond;
3113 
3114     in2 = load_gpr(ctx, r);
3115     dest = tcg_temp_new();
3116     sv = NULL;
3117     cb_msb = NULL;
3118 
3119     if (cond_need_cb(c)) {
3120         cb_msb = get_temp(ctx);
3121         tcg_gen_movi_reg(cb_msb, 0);
3122         tcg_gen_add2_reg(dest, cb_msb, in1, cb_msb, in2, cb_msb);
3123     } else {
3124         tcg_gen_add_reg(dest, in1, in2);
3125     }
3126     if (cond_need_sv(c)) {
3127         sv = do_add_sv(ctx, dest, in1, in2);
3128     }
3129 
3130     cond = do_cond(c * 2 + f, dest, cb_msb, sv);
3131     save_gpr(ctx, r, dest);
3132     tcg_temp_free(dest);
3133     return do_cbranch(ctx, disp, n, &cond);
3134 }
3135 
3136 static bool trans_addb(DisasContext *ctx, arg_addb *a)
3137 {
3138     nullify_over(ctx);
3139     return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3140 }
3141 
3142 static bool trans_addbi(DisasContext *ctx, arg_addbi *a)
3143 {
3144     nullify_over(ctx);
3145     return do_addb(ctx, a->r, load_const(ctx, a->i), a->c, a->f, a->n, a->disp);
3146 }
3147 
3148 static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a)
3149 {
3150     TCGv_reg tmp, tcg_r;
3151     DisasCond cond;
3152 
3153     nullify_over(ctx);
3154 
3155     tmp = tcg_temp_new();
3156     tcg_r = load_gpr(ctx, a->r);
3157     tcg_gen_shl_reg(tmp, tcg_r, cpu_sar);
3158 
3159     cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3160     tcg_temp_free(tmp);
3161     return do_cbranch(ctx, a->disp, a->n, &cond);
3162 }
3163 
3164 static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a)
3165 {
3166     TCGv_reg tmp, tcg_r;
3167     DisasCond cond;
3168 
3169     nullify_over(ctx);
3170 
3171     tmp = tcg_temp_new();
3172     tcg_r = load_gpr(ctx, a->r);
3173     tcg_gen_shli_reg(tmp, tcg_r, a->p);
3174 
3175     cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3176     tcg_temp_free(tmp);
3177     return do_cbranch(ctx, a->disp, a->n, &cond);
3178 }
3179 
3180 static bool trans_movb(DisasContext *ctx, arg_movb *a)
3181 {
3182     TCGv_reg dest;
3183     DisasCond cond;
3184 
3185     nullify_over(ctx);
3186 
3187     dest = dest_gpr(ctx, a->r2);
3188     if (a->r1 == 0) {
3189         tcg_gen_movi_reg(dest, 0);
3190     } else {
3191         tcg_gen_mov_reg(dest, cpu_gr[a->r1]);
3192     }
3193 
3194     cond = do_sed_cond(a->c, dest);
3195     return do_cbranch(ctx, a->disp, a->n, &cond);
3196 }
3197 
3198 static bool trans_movbi(DisasContext *ctx, arg_movbi *a)
3199 {
3200     TCGv_reg dest;
3201     DisasCond cond;
3202 
3203     nullify_over(ctx);
3204 
3205     dest = dest_gpr(ctx, a->r);
3206     tcg_gen_movi_reg(dest, a->i);
3207 
3208     cond = do_sed_cond(a->c, dest);
3209     return do_cbranch(ctx, a->disp, a->n, &cond);
3210 }
3211 
3212 static bool trans_shrpw_sar(DisasContext *ctx, arg_shrpw_sar *a)
3213 {
3214     TCGv_reg dest;
3215 
3216     if (a->c) {
3217         nullify_over(ctx);
3218     }
3219 
3220     dest = dest_gpr(ctx, a->t);
3221     if (a->r1 == 0) {
3222         tcg_gen_ext32u_reg(dest, load_gpr(ctx, a->r2));
3223         tcg_gen_shr_reg(dest, dest, cpu_sar);
3224     } else if (a->r1 == a->r2) {
3225         TCGv_i32 t32 = tcg_temp_new_i32();
3226         tcg_gen_trunc_reg_i32(t32, load_gpr(ctx, a->r2));
3227         tcg_gen_rotr_i32(t32, t32, cpu_sar);
3228         tcg_gen_extu_i32_reg(dest, t32);
3229         tcg_temp_free_i32(t32);
3230     } else {
3231         TCGv_i64 t = tcg_temp_new_i64();
3232         TCGv_i64 s = tcg_temp_new_i64();
3233 
3234         tcg_gen_concat_reg_i64(t, load_gpr(ctx, a->r2), load_gpr(ctx, a->r1));
3235         tcg_gen_extu_reg_i64(s, cpu_sar);
3236         tcg_gen_shr_i64(t, t, s);
3237         tcg_gen_trunc_i64_reg(dest, t);
3238 
3239         tcg_temp_free_i64(t);
3240         tcg_temp_free_i64(s);
3241     }
3242     save_gpr(ctx, a->t, dest);
3243 
3244     /* Install the new nullification.  */
3245     cond_free(&ctx->null_cond);
3246     if (a->c) {
3247         ctx->null_cond = do_sed_cond(a->c, dest);
3248     }
3249     return nullify_end(ctx);
3250 }
3251 
3252 static bool trans_shrpw_imm(DisasContext *ctx, arg_shrpw_imm *a)
3253 {
3254     unsigned sa = 31 - a->cpos;
3255     TCGv_reg dest, t2;
3256 
3257     if (a->c) {
3258         nullify_over(ctx);
3259     }
3260 
3261     dest = dest_gpr(ctx, a->t);
3262     t2 = load_gpr(ctx, a->r2);
3263     if (a->r1 == a->r2) {
3264         TCGv_i32 t32 = tcg_temp_new_i32();
3265         tcg_gen_trunc_reg_i32(t32, t2);
3266         tcg_gen_rotri_i32(t32, t32, sa);
3267         tcg_gen_extu_i32_reg(dest, t32);
3268         tcg_temp_free_i32(t32);
3269     } else if (a->r1 == 0) {
3270         tcg_gen_extract_reg(dest, t2, sa, 32 - sa);
3271     } else {
3272         TCGv_reg t0 = tcg_temp_new();
3273         tcg_gen_extract_reg(t0, t2, sa, 32 - sa);
3274         tcg_gen_deposit_reg(dest, t0, cpu_gr[a->r1], 32 - sa, sa);
3275         tcg_temp_free(t0);
3276     }
3277     save_gpr(ctx, a->t, dest);
3278 
3279     /* Install the new nullification.  */
3280     cond_free(&ctx->null_cond);
3281     if (a->c) {
3282         ctx->null_cond = do_sed_cond(a->c, dest);
3283     }
3284     return nullify_end(ctx);
3285 }
3286 
3287 static bool trans_extrw_sar(DisasContext *ctx, arg_extrw_sar *a)
3288 {
3289     unsigned len = 32 - a->clen;
3290     TCGv_reg dest, src, tmp;
3291 
3292     if (a->c) {
3293         nullify_over(ctx);
3294     }
3295 
3296     dest = dest_gpr(ctx, a->t);
3297     src = load_gpr(ctx, a->r);
3298     tmp = tcg_temp_new();
3299 
3300     /* Recall that SAR is using big-endian bit numbering.  */
3301     tcg_gen_xori_reg(tmp, cpu_sar, TARGET_REGISTER_BITS - 1);
3302     if (a->se) {
3303         tcg_gen_sar_reg(dest, src, tmp);
3304         tcg_gen_sextract_reg(dest, dest, 0, len);
3305     } else {
3306         tcg_gen_shr_reg(dest, src, tmp);
3307         tcg_gen_extract_reg(dest, dest, 0, len);
3308     }
3309     tcg_temp_free(tmp);
3310     save_gpr(ctx, a->t, dest);
3311 
3312     /* Install the new nullification.  */
3313     cond_free(&ctx->null_cond);
3314     if (a->c) {
3315         ctx->null_cond = do_sed_cond(a->c, dest);
3316     }
3317     return nullify_end(ctx);
3318 }
3319 
3320 static bool trans_extrw_imm(DisasContext *ctx, arg_extrw_imm *a)
3321 {
3322     unsigned len = 32 - a->clen;
3323     unsigned cpos = 31 - a->pos;
3324     TCGv_reg dest, src;
3325 
3326     if (a->c) {
3327         nullify_over(ctx);
3328     }
3329 
3330     dest = dest_gpr(ctx, a->t);
3331     src = load_gpr(ctx, a->r);
3332     if (a->se) {
3333         tcg_gen_sextract_reg(dest, src, cpos, len);
3334     } else {
3335         tcg_gen_extract_reg(dest, src, cpos, len);
3336     }
3337     save_gpr(ctx, a->t, dest);
3338 
3339     /* Install the new nullification.  */
3340     cond_free(&ctx->null_cond);
3341     if (a->c) {
3342         ctx->null_cond = do_sed_cond(a->c, dest);
3343     }
3344     return nullify_end(ctx);
3345 }
3346 
3347 static bool trans_depwi_imm(DisasContext *ctx, arg_depwi_imm *a)
3348 {
3349     unsigned len = 32 - a->clen;
3350     target_sreg mask0, mask1;
3351     TCGv_reg dest;
3352 
3353     if (a->c) {
3354         nullify_over(ctx);
3355     }
3356     if (a->cpos + len > 32) {
3357         len = 32 - a->cpos;
3358     }
3359 
3360     dest = dest_gpr(ctx, a->t);
3361     mask0 = deposit64(0, a->cpos, len, a->i);
3362     mask1 = deposit64(-1, a->cpos, len, a->i);
3363 
3364     if (a->nz) {
3365         TCGv_reg src = load_gpr(ctx, a->t);
3366         if (mask1 != -1) {
3367             tcg_gen_andi_reg(dest, src, mask1);
3368             src = dest;
3369         }
3370         tcg_gen_ori_reg(dest, src, mask0);
3371     } else {
3372         tcg_gen_movi_reg(dest, mask0);
3373     }
3374     save_gpr(ctx, a->t, dest);
3375 
3376     /* Install the new nullification.  */
3377     cond_free(&ctx->null_cond);
3378     if (a->c) {
3379         ctx->null_cond = do_sed_cond(a->c, dest);
3380     }
3381     return nullify_end(ctx);
3382 }
3383 
3384 static bool trans_depw_imm(DisasContext *ctx, arg_depw_imm *a)
3385 {
3386     unsigned rs = a->nz ? a->t : 0;
3387     unsigned len = 32 - a->clen;
3388     TCGv_reg dest, val;
3389 
3390     if (a->c) {
3391         nullify_over(ctx);
3392     }
3393     if (a->cpos + len > 32) {
3394         len = 32 - a->cpos;
3395     }
3396 
3397     dest = dest_gpr(ctx, a->t);
3398     val = load_gpr(ctx, a->r);
3399     if (rs == 0) {
3400         tcg_gen_deposit_z_reg(dest, val, a->cpos, len);
3401     } else {
3402         tcg_gen_deposit_reg(dest, cpu_gr[rs], val, a->cpos, len);
3403     }
3404     save_gpr(ctx, a->t, dest);
3405 
3406     /* Install the new nullification.  */
3407     cond_free(&ctx->null_cond);
3408     if (a->c) {
3409         ctx->null_cond = do_sed_cond(a->c, dest);
3410     }
3411     return nullify_end(ctx);
3412 }
3413 
3414 static bool do_depw_sar(DisasContext *ctx, unsigned rt, unsigned c,
3415                         unsigned nz, unsigned clen, TCGv_reg val)
3416 {
3417     unsigned rs = nz ? rt : 0;
3418     unsigned len = 32 - clen;
3419     TCGv_reg mask, tmp, shift, dest;
3420     unsigned msb = 1U << (len - 1);
3421 
3422     dest = dest_gpr(ctx, rt);
3423     shift = tcg_temp_new();
3424     tmp = tcg_temp_new();
3425 
3426     /* Convert big-endian bit numbering in SAR to left-shift.  */
3427     tcg_gen_xori_reg(shift, cpu_sar, TARGET_REGISTER_BITS - 1);
3428 
3429     mask = tcg_const_reg(msb + (msb - 1));
3430     tcg_gen_and_reg(tmp, val, mask);
3431     if (rs) {
3432         tcg_gen_shl_reg(mask, mask, shift);
3433         tcg_gen_shl_reg(tmp, tmp, shift);
3434         tcg_gen_andc_reg(dest, cpu_gr[rs], mask);
3435         tcg_gen_or_reg(dest, dest, tmp);
3436     } else {
3437         tcg_gen_shl_reg(dest, tmp, shift);
3438     }
3439     tcg_temp_free(shift);
3440     tcg_temp_free(mask);
3441     tcg_temp_free(tmp);
3442     save_gpr(ctx, rt, dest);
3443 
3444     /* Install the new nullification.  */
3445     cond_free(&ctx->null_cond);
3446     if (c) {
3447         ctx->null_cond = do_sed_cond(c, dest);
3448     }
3449     return nullify_end(ctx);
3450 }
3451 
3452 static bool trans_depw_sar(DisasContext *ctx, arg_depw_sar *a)
3453 {
3454     if (a->c) {
3455         nullify_over(ctx);
3456     }
3457     return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_gpr(ctx, a->r));
3458 }
3459 
3460 static bool trans_depwi_sar(DisasContext *ctx, arg_depwi_sar *a)
3461 {
3462     if (a->c) {
3463         nullify_over(ctx);
3464     }
3465     return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_const(ctx, a->i));
3466 }
3467 
3468 static bool trans_be(DisasContext *ctx, arg_be *a)
3469 {
3470     TCGv_reg tmp;
3471 
3472 #ifdef CONFIG_USER_ONLY
3473     /* ??? It seems like there should be a good way of using
3474        "be disp(sr2, r0)", the canonical gateway entry mechanism
3475        to our advantage.  But that appears to be inconvenient to
3476        manage along side branch delay slots.  Therefore we handle
3477        entry into the gateway page via absolute address.  */
3478     /* Since we don't implement spaces, just branch.  Do notice the special
3479        case of "be disp(*,r0)" using a direct branch to disp, so that we can
3480        goto_tb to the TB containing the syscall.  */
3481     if (a->b == 0) {
3482         return do_dbranch(ctx, a->disp, a->l, a->n);
3483     }
3484 #else
3485     nullify_over(ctx);
3486 #endif
3487 
3488     tmp = get_temp(ctx);
3489     tcg_gen_addi_reg(tmp, load_gpr(ctx, a->b), a->disp);
3490     tmp = do_ibranch_priv(ctx, tmp);
3491 
3492 #ifdef CONFIG_USER_ONLY
3493     return do_ibranch(ctx, tmp, a->l, a->n);
3494 #else
3495     TCGv_i64 new_spc = tcg_temp_new_i64();
3496 
3497     load_spr(ctx, new_spc, a->sp);
3498     if (a->l) {
3499         copy_iaoq_entry(cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
3500         tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f);
3501     }
3502     if (a->n && use_nullify_skip(ctx)) {
3503         tcg_gen_mov_reg(cpu_iaoq_f, tmp);
3504         tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
3505         tcg_gen_mov_i64(cpu_iasq_f, new_spc);
3506         tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f);
3507     } else {
3508         copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3509         if (ctx->iaoq_b == -1) {
3510             tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3511         }
3512         tcg_gen_mov_reg(cpu_iaoq_b, tmp);
3513         tcg_gen_mov_i64(cpu_iasq_b, new_spc);
3514         nullify_set(ctx, a->n);
3515     }
3516     tcg_temp_free_i64(new_spc);
3517     tcg_gen_lookup_and_goto_ptr();
3518     ctx->base.is_jmp = DISAS_NORETURN;
3519     return nullify_end(ctx);
3520 #endif
3521 }
3522 
3523 static bool trans_bl(DisasContext *ctx, arg_bl *a)
3524 {
3525     return do_dbranch(ctx, iaoq_dest(ctx, a->disp), a->l, a->n);
3526 }
3527 
3528 static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a)
3529 {
3530     target_ureg dest = iaoq_dest(ctx, a->disp);
3531 
3532     nullify_over(ctx);
3533 
3534     /* Make sure the caller hasn't done something weird with the queue.
3535      * ??? This is not quite the same as the PSW[B] bit, which would be
3536      * expensive to track.  Real hardware will trap for
3537      *    b  gateway
3538      *    b  gateway+4  (in delay slot of first branch)
3539      * However, checking for a non-sequential instruction queue *will*
3540      * diagnose the security hole
3541      *    b  gateway
3542      *    b  evil
3543      * in which instructions at evil would run with increased privs.
3544      */
3545     if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) {
3546         return gen_illegal(ctx);
3547     }
3548 
3549 #ifndef CONFIG_USER_ONLY
3550     if (ctx->tb_flags & PSW_C) {
3551         CPUHPPAState *env = ctx->cs->env_ptr;
3552         int type = hppa_artype_for_page(env, ctx->base.pc_next);
3553         /* If we could not find a TLB entry, then we need to generate an
3554            ITLB miss exception so the kernel will provide it.
3555            The resulting TLB fill operation will invalidate this TB and
3556            we will re-translate, at which point we *will* be able to find
3557            the TLB entry and determine if this is in fact a gateway page.  */
3558         if (type < 0) {
3559             gen_excp(ctx, EXCP_ITLB_MISS);
3560             return true;
3561         }
3562         /* No change for non-gateway pages or for priv decrease.  */
3563         if (type >= 4 && type - 4 < ctx->privilege) {
3564             dest = deposit32(dest, 0, 2, type - 4);
3565         }
3566     } else {
3567         dest &= -4;  /* priv = 0 */
3568     }
3569 #endif
3570 
3571     if (a->l) {
3572         TCGv_reg tmp = dest_gpr(ctx, a->l);
3573         if (ctx->privilege < 3) {
3574             tcg_gen_andi_reg(tmp, tmp, -4);
3575         }
3576         tcg_gen_ori_reg(tmp, tmp, ctx->privilege);
3577         save_gpr(ctx, a->l, tmp);
3578     }
3579 
3580     return do_dbranch(ctx, dest, 0, a->n);
3581 }
3582 
3583 static bool trans_blr(DisasContext *ctx, arg_blr *a)
3584 {
3585     if (a->x) {
3586         TCGv_reg tmp = get_temp(ctx);
3587         tcg_gen_shli_reg(tmp, load_gpr(ctx, a->x), 3);
3588         tcg_gen_addi_reg(tmp, tmp, ctx->iaoq_f + 8);
3589         /* The computation here never changes privilege level.  */
3590         return do_ibranch(ctx, tmp, a->l, a->n);
3591     } else {
3592         /* BLR R0,RX is a good way to load PC+8 into RX.  */
3593         return do_dbranch(ctx, ctx->iaoq_f + 8, a->l, a->n);
3594     }
3595 }
3596 
3597 static bool trans_bv(DisasContext *ctx, arg_bv *a)
3598 {
3599     TCGv_reg dest;
3600 
3601     if (a->x == 0) {
3602         dest = load_gpr(ctx, a->b);
3603     } else {
3604         dest = get_temp(ctx);
3605         tcg_gen_shli_reg(dest, load_gpr(ctx, a->x), 3);
3606         tcg_gen_add_reg(dest, dest, load_gpr(ctx, a->b));
3607     }
3608     dest = do_ibranch_priv(ctx, dest);
3609     return do_ibranch(ctx, dest, 0, a->n);
3610 }
3611 
3612 static bool trans_bve(DisasContext *ctx, arg_bve *a)
3613 {
3614     TCGv_reg dest;
3615 
3616 #ifdef CONFIG_USER_ONLY
3617     dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3618     return do_ibranch(ctx, dest, a->l, a->n);
3619 #else
3620     nullify_over(ctx);
3621     dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3622 
3623     copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3624     if (ctx->iaoq_b == -1) {
3625         tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3626     }
3627     copy_iaoq_entry(cpu_iaoq_b, -1, dest);
3628     tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest));
3629     if (a->l) {
3630         copy_iaoq_entry(cpu_gr[a->l], ctx->iaoq_n, ctx->iaoq_n_var);
3631     }
3632     nullify_set(ctx, a->n);
3633     tcg_gen_lookup_and_goto_ptr();
3634     ctx->base.is_jmp = DISAS_NORETURN;
3635     return nullify_end(ctx);
3636 #endif
3637 }
3638 
3639 /*
3640  * Float class 0
3641  */
3642 
3643 static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3644 {
3645     tcg_gen_mov_i32(dst, src);
3646 }
3647 
3648 static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a)
3649 {
3650     return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f);
3651 }
3652 
3653 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3654 {
3655     tcg_gen_mov_i64(dst, src);
3656 }
3657 
3658 static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a)
3659 {
3660     return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d);
3661 }
3662 
3663 static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3664 {
3665     tcg_gen_andi_i32(dst, src, INT32_MAX);
3666 }
3667 
3668 static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a)
3669 {
3670     return do_fop_wew(ctx, a->t, a->r, gen_fabs_f);
3671 }
3672 
3673 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3674 {
3675     tcg_gen_andi_i64(dst, src, INT64_MAX);
3676 }
3677 
3678 static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a)
3679 {
3680     return do_fop_ded(ctx, a->t, a->r, gen_fabs_d);
3681 }
3682 
3683 static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a)
3684 {
3685     return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s);
3686 }
3687 
3688 static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a)
3689 {
3690     return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d);
3691 }
3692 
3693 static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a)
3694 {
3695     return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s);
3696 }
3697 
3698 static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a)
3699 {
3700     return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d);
3701 }
3702 
3703 static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3704 {
3705     tcg_gen_xori_i32(dst, src, INT32_MIN);
3706 }
3707 
3708 static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a)
3709 {
3710     return do_fop_wew(ctx, a->t, a->r, gen_fneg_f);
3711 }
3712 
3713 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3714 {
3715     tcg_gen_xori_i64(dst, src, INT64_MIN);
3716 }
3717 
3718 static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a)
3719 {
3720     return do_fop_ded(ctx, a->t, a->r, gen_fneg_d);
3721 }
3722 
3723 static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3724 {
3725     tcg_gen_ori_i32(dst, src, INT32_MIN);
3726 }
3727 
3728 static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a)
3729 {
3730     return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f);
3731 }
3732 
3733 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3734 {
3735     tcg_gen_ori_i64(dst, src, INT64_MIN);
3736 }
3737 
3738 static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a)
3739 {
3740     return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d);
3741 }
3742 
3743 /*
3744  * Float class 1
3745  */
3746 
3747 static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a)
3748 {
3749     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s);
3750 }
3751 
3752 static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a)
3753 {
3754     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d);
3755 }
3756 
3757 static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a)
3758 {
3759     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s);
3760 }
3761 
3762 static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a)
3763 {
3764     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s);
3765 }
3766 
3767 static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a)
3768 {
3769     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d);
3770 }
3771 
3772 static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a)
3773 {
3774     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d);
3775 }
3776 
3777 static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a)
3778 {
3779     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w);
3780 }
3781 
3782 static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a)
3783 {
3784     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w);
3785 }
3786 
3787 static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a)
3788 {
3789     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw);
3790 }
3791 
3792 static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a)
3793 {
3794     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw);
3795 }
3796 
3797 static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a)
3798 {
3799     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w);
3800 }
3801 
3802 static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a)
3803 {
3804     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w);
3805 }
3806 
3807 static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a)
3808 {
3809     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw);
3810 }
3811 
3812 static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a)
3813 {
3814     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw);
3815 }
3816 
3817 static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a)
3818 {
3819     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s);
3820 }
3821 
3822 static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a)
3823 {
3824     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s);
3825 }
3826 
3827 static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a)
3828 {
3829     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d);
3830 }
3831 
3832 static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a)
3833 {
3834     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d);
3835 }
3836 
3837 static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a)
3838 {
3839     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw);
3840 }
3841 
3842 static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a)
3843 {
3844     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw);
3845 }
3846 
3847 static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a)
3848 {
3849     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw);
3850 }
3851 
3852 static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a)
3853 {
3854     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw);
3855 }
3856 
3857 static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a)
3858 {
3859     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw);
3860 }
3861 
3862 static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a)
3863 {
3864     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw);
3865 }
3866 
3867 static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a)
3868 {
3869     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw);
3870 }
3871 
3872 static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a)
3873 {
3874     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw);
3875 }
3876 
3877 /*
3878  * Float class 2
3879  */
3880 
3881 static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a)
3882 {
3883     TCGv_i32 ta, tb, tc, ty;
3884 
3885     nullify_over(ctx);
3886 
3887     ta = load_frw0_i32(a->r1);
3888     tb = load_frw0_i32(a->r2);
3889     ty = tcg_const_i32(a->y);
3890     tc = tcg_const_i32(a->c);
3891 
3892     gen_helper_fcmp_s(cpu_env, ta, tb, ty, tc);
3893 
3894     tcg_temp_free_i32(ta);
3895     tcg_temp_free_i32(tb);
3896     tcg_temp_free_i32(ty);
3897     tcg_temp_free_i32(tc);
3898 
3899     return nullify_end(ctx);
3900 }
3901 
3902 static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a)
3903 {
3904     TCGv_i64 ta, tb;
3905     TCGv_i32 tc, ty;
3906 
3907     nullify_over(ctx);
3908 
3909     ta = load_frd0(a->r1);
3910     tb = load_frd0(a->r2);
3911     ty = tcg_const_i32(a->y);
3912     tc = tcg_const_i32(a->c);
3913 
3914     gen_helper_fcmp_d(cpu_env, ta, tb, ty, tc);
3915 
3916     tcg_temp_free_i64(ta);
3917     tcg_temp_free_i64(tb);
3918     tcg_temp_free_i32(ty);
3919     tcg_temp_free_i32(tc);
3920 
3921     return nullify_end(ctx);
3922 }
3923 
3924 static bool trans_ftest(DisasContext *ctx, arg_ftest *a)
3925 {
3926     TCGv_reg t;
3927 
3928     nullify_over(ctx);
3929 
3930     t = get_temp(ctx);
3931     tcg_gen_ld32u_reg(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
3932 
3933     if (a->y == 1) {
3934         int mask;
3935         bool inv = false;
3936 
3937         switch (a->c) {
3938         case 0: /* simple */
3939             tcg_gen_andi_reg(t, t, 0x4000000);
3940             ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3941             goto done;
3942         case 2: /* rej */
3943             inv = true;
3944             /* fallthru */
3945         case 1: /* acc */
3946             mask = 0x43ff800;
3947             break;
3948         case 6: /* rej8 */
3949             inv = true;
3950             /* fallthru */
3951         case 5: /* acc8 */
3952             mask = 0x43f8000;
3953             break;
3954         case 9: /* acc6 */
3955             mask = 0x43e0000;
3956             break;
3957         case 13: /* acc4 */
3958             mask = 0x4380000;
3959             break;
3960         case 17: /* acc2 */
3961             mask = 0x4200000;
3962             break;
3963         default:
3964             gen_illegal(ctx);
3965             return true;
3966         }
3967         if (inv) {
3968             TCGv_reg c = load_const(ctx, mask);
3969             tcg_gen_or_reg(t, t, c);
3970             ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
3971         } else {
3972             tcg_gen_andi_reg(t, t, mask);
3973             ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
3974         }
3975     } else {
3976         unsigned cbit = (a->y ^ 1) - 1;
3977 
3978         tcg_gen_extract_reg(t, t, 21 - cbit, 1);
3979         ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3980         tcg_temp_free(t);
3981     }
3982 
3983  done:
3984     return nullify_end(ctx);
3985 }
3986 
3987 /*
3988  * Float class 2
3989  */
3990 
3991 static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a)
3992 {
3993     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s);
3994 }
3995 
3996 static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a)
3997 {
3998     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d);
3999 }
4000 
4001 static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a)
4002 {
4003     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s);
4004 }
4005 
4006 static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a)
4007 {
4008     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d);
4009 }
4010 
4011 static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a)
4012 {
4013     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s);
4014 }
4015 
4016 static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a)
4017 {
4018     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d);
4019 }
4020 
4021 static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a)
4022 {
4023     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s);
4024 }
4025 
4026 static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a)
4027 {
4028     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d);
4029 }
4030 
4031 static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a)
4032 {
4033     TCGv_i64 x, y;
4034 
4035     nullify_over(ctx);
4036 
4037     x = load_frw0_i64(a->r1);
4038     y = load_frw0_i64(a->r2);
4039     tcg_gen_mul_i64(x, x, y);
4040     save_frd(a->t, x);
4041     tcg_temp_free_i64(x);
4042     tcg_temp_free_i64(y);
4043 
4044     return nullify_end(ctx);
4045 }
4046 
4047 /* Convert the fmpyadd single-precision register encodings to standard.  */
4048 static inline int fmpyadd_s_reg(unsigned r)
4049 {
4050     return (r & 16) * 2 + 16 + (r & 15);
4051 }
4052 
4053 static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4054 {
4055     int tm = fmpyadd_s_reg(a->tm);
4056     int ra = fmpyadd_s_reg(a->ra);
4057     int ta = fmpyadd_s_reg(a->ta);
4058     int rm2 = fmpyadd_s_reg(a->rm2);
4059     int rm1 = fmpyadd_s_reg(a->rm1);
4060 
4061     nullify_over(ctx);
4062 
4063     do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
4064     do_fop_weww(ctx, ta, ta, ra,
4065                 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
4066 
4067     return nullify_end(ctx);
4068 }
4069 
4070 static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a)
4071 {
4072     return do_fmpyadd_s(ctx, a, false);
4073 }
4074 
4075 static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a)
4076 {
4077     return do_fmpyadd_s(ctx, a, true);
4078 }
4079 
4080 static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4081 {
4082     nullify_over(ctx);
4083 
4084     do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d);
4085     do_fop_dedd(ctx, a->ta, a->ta, a->ra,
4086                 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
4087 
4088     return nullify_end(ctx);
4089 }
4090 
4091 static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a)
4092 {
4093     return do_fmpyadd_d(ctx, a, false);
4094 }
4095 
4096 static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a)
4097 {
4098     return do_fmpyadd_d(ctx, a, true);
4099 }
4100 
4101 static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a)
4102 {
4103     TCGv_i32 x, y, z;
4104 
4105     nullify_over(ctx);
4106     x = load_frw0_i32(a->rm1);
4107     y = load_frw0_i32(a->rm2);
4108     z = load_frw0_i32(a->ra3);
4109 
4110     if (a->neg) {
4111         gen_helper_fmpynfadd_s(x, cpu_env, x, y, z);
4112     } else {
4113         gen_helper_fmpyfadd_s(x, cpu_env, x, y, z);
4114     }
4115 
4116     tcg_temp_free_i32(y);
4117     tcg_temp_free_i32(z);
4118     save_frw_i32(a->t, x);
4119     tcg_temp_free_i32(x);
4120     return nullify_end(ctx);
4121 }
4122 
4123 static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a)
4124 {
4125     TCGv_i64 x, y, z;
4126 
4127     nullify_over(ctx);
4128     x = load_frd0(a->rm1);
4129     y = load_frd0(a->rm2);
4130     z = load_frd0(a->ra3);
4131 
4132     if (a->neg) {
4133         gen_helper_fmpynfadd_d(x, cpu_env, x, y, z);
4134     } else {
4135         gen_helper_fmpyfadd_d(x, cpu_env, x, y, z);
4136     }
4137 
4138     tcg_temp_free_i64(y);
4139     tcg_temp_free_i64(z);
4140     save_frd(a->t, x);
4141     tcg_temp_free_i64(x);
4142     return nullify_end(ctx);
4143 }
4144 
4145 static bool trans_diag(DisasContext *ctx, arg_diag *a)
4146 {
4147     qemu_log_mask(LOG_UNIMP, "DIAG opcode ignored\n");
4148     cond_free(&ctx->null_cond);
4149     return true;
4150 }
4151 
4152 static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
4153 {
4154     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4155     int bound;
4156 
4157     ctx->cs = cs;
4158     ctx->tb_flags = ctx->base.tb->flags;
4159 
4160 #ifdef CONFIG_USER_ONLY
4161     ctx->privilege = MMU_USER_IDX;
4162     ctx->mmu_idx = MMU_USER_IDX;
4163     ctx->iaoq_f = ctx->base.pc_first | MMU_USER_IDX;
4164     ctx->iaoq_b = ctx->base.tb->cs_base | MMU_USER_IDX;
4165 #else
4166     ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
4167     ctx->mmu_idx = (ctx->tb_flags & PSW_D ? ctx->privilege : MMU_PHYS_IDX);
4168 
4169     /* Recover the IAOQ values from the GVA + PRIV.  */
4170     uint64_t cs_base = ctx->base.tb->cs_base;
4171     uint64_t iasq_f = cs_base & ~0xffffffffull;
4172     int32_t diff = cs_base;
4173 
4174     ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege;
4175     ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1);
4176 #endif
4177     ctx->iaoq_n = -1;
4178     ctx->iaoq_n_var = NULL;
4179 
4180     /* Bound the number of instructions by those left on the page.  */
4181     bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
4182     ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
4183 
4184     ctx->ntempr = 0;
4185     ctx->ntempl = 0;
4186     memset(ctx->tempr, 0, sizeof(ctx->tempr));
4187     memset(ctx->templ, 0, sizeof(ctx->templ));
4188 }
4189 
4190 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
4191 {
4192     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4193 
4194     /* Seed the nullification status from PSW[N], as saved in TB->FLAGS.  */
4195     ctx->null_cond = cond_make_f();
4196     ctx->psw_n_nonzero = false;
4197     if (ctx->tb_flags & PSW_N) {
4198         ctx->null_cond.c = TCG_COND_ALWAYS;
4199         ctx->psw_n_nonzero = true;
4200     }
4201     ctx->null_lab = NULL;
4202 }
4203 
4204 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4205 {
4206     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4207 
4208     tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b);
4209 }
4210 
4211 static bool hppa_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
4212                                       const CPUBreakpoint *bp)
4213 {
4214     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4215 
4216     gen_excp(ctx, EXCP_DEBUG);
4217     ctx->base.pc_next += 4;
4218     return true;
4219 }
4220 
4221 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4222 {
4223     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4224     CPUHPPAState *env = cs->env_ptr;
4225     DisasJumpType ret;
4226     int i, n;
4227 
4228     /* Execute one insn.  */
4229 #ifdef CONFIG_USER_ONLY
4230     if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
4231         do_page_zero(ctx);
4232         ret = ctx->base.is_jmp;
4233         assert(ret != DISAS_NEXT);
4234     } else
4235 #endif
4236     {
4237         /* Always fetch the insn, even if nullified, so that we check
4238            the page permissions for execute.  */
4239         uint32_t insn = translator_ldl(env, ctx->base.pc_next);
4240 
4241         /* Set up the IA queue for the next insn.
4242            This will be overwritten by a branch.  */
4243         if (ctx->iaoq_b == -1) {
4244             ctx->iaoq_n = -1;
4245             ctx->iaoq_n_var = get_temp(ctx);
4246             tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
4247         } else {
4248             ctx->iaoq_n = ctx->iaoq_b + 4;
4249             ctx->iaoq_n_var = NULL;
4250         }
4251 
4252         if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
4253             ctx->null_cond.c = TCG_COND_NEVER;
4254             ret = DISAS_NEXT;
4255         } else {
4256             ctx->insn = insn;
4257             if (!decode(ctx, insn)) {
4258                 gen_illegal(ctx);
4259             }
4260             ret = ctx->base.is_jmp;
4261             assert(ctx->null_lab == NULL);
4262         }
4263     }
4264 
4265     /* Free any temporaries allocated.  */
4266     for (i = 0, n = ctx->ntempr; i < n; ++i) {
4267         tcg_temp_free(ctx->tempr[i]);
4268         ctx->tempr[i] = NULL;
4269     }
4270     for (i = 0, n = ctx->ntempl; i < n; ++i) {
4271         tcg_temp_free_tl(ctx->templ[i]);
4272         ctx->templ[i] = NULL;
4273     }
4274     ctx->ntempr = 0;
4275     ctx->ntempl = 0;
4276 
4277     /* Advance the insn queue.  Note that this check also detects
4278        a priority change within the instruction queue.  */
4279     if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
4280         if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1
4281             && use_goto_tb(ctx, ctx->iaoq_b)
4282             && (ctx->null_cond.c == TCG_COND_NEVER
4283                 || ctx->null_cond.c == TCG_COND_ALWAYS)) {
4284             nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
4285             gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
4286             ctx->base.is_jmp = ret = DISAS_NORETURN;
4287         } else {
4288             ctx->base.is_jmp = ret = DISAS_IAQ_N_STALE;
4289         }
4290     }
4291     ctx->iaoq_f = ctx->iaoq_b;
4292     ctx->iaoq_b = ctx->iaoq_n;
4293     ctx->base.pc_next += 4;
4294 
4295     switch (ret) {
4296     case DISAS_NORETURN:
4297     case DISAS_IAQ_N_UPDATED:
4298         break;
4299 
4300     case DISAS_NEXT:
4301     case DISAS_IAQ_N_STALE:
4302     case DISAS_IAQ_N_STALE_EXIT:
4303         if (ctx->iaoq_f == -1) {
4304             tcg_gen_mov_reg(cpu_iaoq_f, cpu_iaoq_b);
4305             copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
4306 #ifndef CONFIG_USER_ONLY
4307             tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
4308 #endif
4309             nullify_save(ctx);
4310             ctx->base.is_jmp = (ret == DISAS_IAQ_N_STALE_EXIT
4311                                 ? DISAS_EXIT
4312                                 : DISAS_IAQ_N_UPDATED);
4313         } else if (ctx->iaoq_b == -1) {
4314             tcg_gen_mov_reg(cpu_iaoq_b, ctx->iaoq_n_var);
4315         }
4316         break;
4317 
4318     default:
4319         g_assert_not_reached();
4320     }
4321 }
4322 
4323 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4324 {
4325     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4326     DisasJumpType is_jmp = ctx->base.is_jmp;
4327 
4328     switch (is_jmp) {
4329     case DISAS_NORETURN:
4330         break;
4331     case DISAS_TOO_MANY:
4332     case DISAS_IAQ_N_STALE:
4333     case DISAS_IAQ_N_STALE_EXIT:
4334         copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
4335         copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
4336         nullify_save(ctx);
4337         /* FALLTHRU */
4338     case DISAS_IAQ_N_UPDATED:
4339         if (ctx->base.singlestep_enabled) {
4340             gen_excp_1(EXCP_DEBUG);
4341         } else if (is_jmp != DISAS_IAQ_N_STALE_EXIT) {
4342             tcg_gen_lookup_and_goto_ptr();
4343         }
4344         /* FALLTHRU */
4345     case DISAS_EXIT:
4346         tcg_gen_exit_tb(NULL, 0);
4347         break;
4348     default:
4349         g_assert_not_reached();
4350     }
4351 }
4352 
4353 static void hppa_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
4354 {
4355     target_ulong pc = dcbase->pc_first;
4356 
4357 #ifdef CONFIG_USER_ONLY
4358     switch (pc) {
4359     case 0x00:
4360         qemu_log("IN:\n0x00000000:  (null)\n");
4361         return;
4362     case 0xb0:
4363         qemu_log("IN:\n0x000000b0:  light-weight-syscall\n");
4364         return;
4365     case 0xe0:
4366         qemu_log("IN:\n0x000000e0:  set-thread-pointer-syscall\n");
4367         return;
4368     case 0x100:
4369         qemu_log("IN:\n0x00000100:  syscall\n");
4370         return;
4371     }
4372 #endif
4373 
4374     qemu_log("IN: %s\n", lookup_symbol(pc));
4375     log_target_disas(cs, pc, dcbase->tb->size);
4376 }
4377 
4378 static const TranslatorOps hppa_tr_ops = {
4379     .init_disas_context = hppa_tr_init_disas_context,
4380     .tb_start           = hppa_tr_tb_start,
4381     .insn_start         = hppa_tr_insn_start,
4382     .breakpoint_check   = hppa_tr_breakpoint_check,
4383     .translate_insn     = hppa_tr_translate_insn,
4384     .tb_stop            = hppa_tr_tb_stop,
4385     .disas_log          = hppa_tr_disas_log,
4386 };
4387 
4388 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
4389 {
4390     DisasContext ctx;
4391     translator_loop(&hppa_tr_ops, &ctx.base, cs, tb, max_insns);
4392 }
4393 
4394 void restore_state_to_opc(CPUHPPAState *env, TranslationBlock *tb,
4395                           target_ulong *data)
4396 {
4397     env->iaoq_f = data[0];
4398     if (data[1] != (target_ureg)-1) {
4399         env->iaoq_b = data[1];
4400     }
4401     /* Since we were executing the instruction at IAOQ_F, and took some
4402        sort of action that provoked the cpu_restore_state, we can infer
4403        that the instruction was not nullified.  */
4404     env->psw_n = 0;
4405 }
4406