xref: /openbmc/qemu/target/hppa/translate.c (revision 31274b46)
1 /*
2  * HPPA emulation cpu translation for qemu.
3  *
4  * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
25 #include "tcg-op.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "trace-tcg.h"
31 #include "exec/log.h"
32 
33 /* Since we have a distinction between register size and address size,
34    we need to redefine all of these.  */
35 
36 #undef TCGv
37 #undef tcg_temp_new
38 #undef tcg_global_reg_new
39 #undef tcg_global_mem_new
40 #undef tcg_temp_local_new
41 #undef tcg_temp_free
42 
43 #if TARGET_LONG_BITS == 64
44 #define TCGv_tl              TCGv_i64
45 #define tcg_temp_new_tl      tcg_temp_new_i64
46 #define tcg_temp_free_tl     tcg_temp_free_i64
47 #if TARGET_REGISTER_BITS == 64
48 #define tcg_gen_extu_reg_tl  tcg_gen_mov_i64
49 #else
50 #define tcg_gen_extu_reg_tl  tcg_gen_extu_i32_i64
51 #endif
52 #else
53 #define TCGv_tl              TCGv_i32
54 #define tcg_temp_new_tl      tcg_temp_new_i32
55 #define tcg_temp_free_tl     tcg_temp_free_i32
56 #define tcg_gen_extu_reg_tl  tcg_gen_mov_i32
57 #endif
58 
59 #if TARGET_REGISTER_BITS == 64
60 #define TCGv_reg             TCGv_i64
61 
62 #define tcg_temp_new         tcg_temp_new_i64
63 #define tcg_global_reg_new   tcg_global_reg_new_i64
64 #define tcg_global_mem_new   tcg_global_mem_new_i64
65 #define tcg_temp_local_new   tcg_temp_local_new_i64
66 #define tcg_temp_free        tcg_temp_free_i64
67 
68 #define tcg_gen_movi_reg     tcg_gen_movi_i64
69 #define tcg_gen_mov_reg      tcg_gen_mov_i64
70 #define tcg_gen_ld8u_reg     tcg_gen_ld8u_i64
71 #define tcg_gen_ld8s_reg     tcg_gen_ld8s_i64
72 #define tcg_gen_ld16u_reg    tcg_gen_ld16u_i64
73 #define tcg_gen_ld16s_reg    tcg_gen_ld16s_i64
74 #define tcg_gen_ld32u_reg    tcg_gen_ld32u_i64
75 #define tcg_gen_ld32s_reg    tcg_gen_ld32s_i64
76 #define tcg_gen_ld_reg       tcg_gen_ld_i64
77 #define tcg_gen_st8_reg      tcg_gen_st8_i64
78 #define tcg_gen_st16_reg     tcg_gen_st16_i64
79 #define tcg_gen_st32_reg     tcg_gen_st32_i64
80 #define tcg_gen_st_reg       tcg_gen_st_i64
81 #define tcg_gen_add_reg      tcg_gen_add_i64
82 #define tcg_gen_addi_reg     tcg_gen_addi_i64
83 #define tcg_gen_sub_reg      tcg_gen_sub_i64
84 #define tcg_gen_neg_reg      tcg_gen_neg_i64
85 #define tcg_gen_subfi_reg    tcg_gen_subfi_i64
86 #define tcg_gen_subi_reg     tcg_gen_subi_i64
87 #define tcg_gen_and_reg      tcg_gen_and_i64
88 #define tcg_gen_andi_reg     tcg_gen_andi_i64
89 #define tcg_gen_or_reg       tcg_gen_or_i64
90 #define tcg_gen_ori_reg      tcg_gen_ori_i64
91 #define tcg_gen_xor_reg      tcg_gen_xor_i64
92 #define tcg_gen_xori_reg     tcg_gen_xori_i64
93 #define tcg_gen_not_reg      tcg_gen_not_i64
94 #define tcg_gen_shl_reg      tcg_gen_shl_i64
95 #define tcg_gen_shli_reg     tcg_gen_shli_i64
96 #define tcg_gen_shr_reg      tcg_gen_shr_i64
97 #define tcg_gen_shri_reg     tcg_gen_shri_i64
98 #define tcg_gen_sar_reg      tcg_gen_sar_i64
99 #define tcg_gen_sari_reg     tcg_gen_sari_i64
100 #define tcg_gen_brcond_reg   tcg_gen_brcond_i64
101 #define tcg_gen_brcondi_reg  tcg_gen_brcondi_i64
102 #define tcg_gen_setcond_reg  tcg_gen_setcond_i64
103 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i64
104 #define tcg_gen_mul_reg      tcg_gen_mul_i64
105 #define tcg_gen_muli_reg     tcg_gen_muli_i64
106 #define tcg_gen_div_reg      tcg_gen_div_i64
107 #define tcg_gen_rem_reg      tcg_gen_rem_i64
108 #define tcg_gen_divu_reg     tcg_gen_divu_i64
109 #define tcg_gen_remu_reg     tcg_gen_remu_i64
110 #define tcg_gen_discard_reg  tcg_gen_discard_i64
111 #define tcg_gen_trunc_reg_i32 tcg_gen_extrl_i64_i32
112 #define tcg_gen_trunc_i64_reg tcg_gen_mov_i64
113 #define tcg_gen_extu_i32_reg tcg_gen_extu_i32_i64
114 #define tcg_gen_ext_i32_reg  tcg_gen_ext_i32_i64
115 #define tcg_gen_extu_reg_i64 tcg_gen_mov_i64
116 #define tcg_gen_ext_reg_i64  tcg_gen_mov_i64
117 #define tcg_gen_ext8u_reg    tcg_gen_ext8u_i64
118 #define tcg_gen_ext8s_reg    tcg_gen_ext8s_i64
119 #define tcg_gen_ext16u_reg   tcg_gen_ext16u_i64
120 #define tcg_gen_ext16s_reg   tcg_gen_ext16s_i64
121 #define tcg_gen_ext32u_reg   tcg_gen_ext32u_i64
122 #define tcg_gen_ext32s_reg   tcg_gen_ext32s_i64
123 #define tcg_gen_bswap16_reg  tcg_gen_bswap16_i64
124 #define tcg_gen_bswap32_reg  tcg_gen_bswap32_i64
125 #define tcg_gen_bswap64_reg  tcg_gen_bswap64_i64
126 #define tcg_gen_concat_reg_i64 tcg_gen_concat32_i64
127 #define tcg_gen_andc_reg     tcg_gen_andc_i64
128 #define tcg_gen_eqv_reg      tcg_gen_eqv_i64
129 #define tcg_gen_nand_reg     tcg_gen_nand_i64
130 #define tcg_gen_nor_reg      tcg_gen_nor_i64
131 #define tcg_gen_orc_reg      tcg_gen_orc_i64
132 #define tcg_gen_clz_reg      tcg_gen_clz_i64
133 #define tcg_gen_ctz_reg      tcg_gen_ctz_i64
134 #define tcg_gen_clzi_reg     tcg_gen_clzi_i64
135 #define tcg_gen_ctzi_reg     tcg_gen_ctzi_i64
136 #define tcg_gen_clrsb_reg    tcg_gen_clrsb_i64
137 #define tcg_gen_ctpop_reg    tcg_gen_ctpop_i64
138 #define tcg_gen_rotl_reg     tcg_gen_rotl_i64
139 #define tcg_gen_rotli_reg    tcg_gen_rotli_i64
140 #define tcg_gen_rotr_reg     tcg_gen_rotr_i64
141 #define tcg_gen_rotri_reg    tcg_gen_rotri_i64
142 #define tcg_gen_deposit_reg  tcg_gen_deposit_i64
143 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i64
144 #define tcg_gen_extract_reg  tcg_gen_extract_i64
145 #define tcg_gen_sextract_reg tcg_gen_sextract_i64
146 #define tcg_const_reg        tcg_const_i64
147 #define tcg_const_local_reg  tcg_const_local_i64
148 #define tcg_gen_movcond_reg  tcg_gen_movcond_i64
149 #define tcg_gen_add2_reg     tcg_gen_add2_i64
150 #define tcg_gen_sub2_reg     tcg_gen_sub2_i64
151 #define tcg_gen_qemu_ld_reg  tcg_gen_qemu_ld_i64
152 #define tcg_gen_qemu_st_reg  tcg_gen_qemu_st_i64
153 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i64
154 #define tcg_gen_trunc_reg_ptr   tcg_gen_trunc_i64_ptr
155 #else
156 #define TCGv_reg             TCGv_i32
157 #define tcg_temp_new         tcg_temp_new_i32
158 #define tcg_global_reg_new   tcg_global_reg_new_i32
159 #define tcg_global_mem_new   tcg_global_mem_new_i32
160 #define tcg_temp_local_new   tcg_temp_local_new_i32
161 #define tcg_temp_free        tcg_temp_free_i32
162 
163 #define tcg_gen_movi_reg     tcg_gen_movi_i32
164 #define tcg_gen_mov_reg      tcg_gen_mov_i32
165 #define tcg_gen_ld8u_reg     tcg_gen_ld8u_i32
166 #define tcg_gen_ld8s_reg     tcg_gen_ld8s_i32
167 #define tcg_gen_ld16u_reg    tcg_gen_ld16u_i32
168 #define tcg_gen_ld16s_reg    tcg_gen_ld16s_i32
169 #define tcg_gen_ld32u_reg    tcg_gen_ld_i32
170 #define tcg_gen_ld32s_reg    tcg_gen_ld_i32
171 #define tcg_gen_ld_reg       tcg_gen_ld_i32
172 #define tcg_gen_st8_reg      tcg_gen_st8_i32
173 #define tcg_gen_st16_reg     tcg_gen_st16_i32
174 #define tcg_gen_st32_reg     tcg_gen_st32_i32
175 #define tcg_gen_st_reg       tcg_gen_st_i32
176 #define tcg_gen_add_reg      tcg_gen_add_i32
177 #define tcg_gen_addi_reg     tcg_gen_addi_i32
178 #define tcg_gen_sub_reg      tcg_gen_sub_i32
179 #define tcg_gen_neg_reg      tcg_gen_neg_i32
180 #define tcg_gen_subfi_reg    tcg_gen_subfi_i32
181 #define tcg_gen_subi_reg     tcg_gen_subi_i32
182 #define tcg_gen_and_reg      tcg_gen_and_i32
183 #define tcg_gen_andi_reg     tcg_gen_andi_i32
184 #define tcg_gen_or_reg       tcg_gen_or_i32
185 #define tcg_gen_ori_reg      tcg_gen_ori_i32
186 #define tcg_gen_xor_reg      tcg_gen_xor_i32
187 #define tcg_gen_xori_reg     tcg_gen_xori_i32
188 #define tcg_gen_not_reg      tcg_gen_not_i32
189 #define tcg_gen_shl_reg      tcg_gen_shl_i32
190 #define tcg_gen_shli_reg     tcg_gen_shli_i32
191 #define tcg_gen_shr_reg      tcg_gen_shr_i32
192 #define tcg_gen_shri_reg     tcg_gen_shri_i32
193 #define tcg_gen_sar_reg      tcg_gen_sar_i32
194 #define tcg_gen_sari_reg     tcg_gen_sari_i32
195 #define tcg_gen_brcond_reg   tcg_gen_brcond_i32
196 #define tcg_gen_brcondi_reg  tcg_gen_brcondi_i32
197 #define tcg_gen_setcond_reg  tcg_gen_setcond_i32
198 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i32
199 #define tcg_gen_mul_reg      tcg_gen_mul_i32
200 #define tcg_gen_muli_reg     tcg_gen_muli_i32
201 #define tcg_gen_div_reg      tcg_gen_div_i32
202 #define tcg_gen_rem_reg      tcg_gen_rem_i32
203 #define tcg_gen_divu_reg     tcg_gen_divu_i32
204 #define tcg_gen_remu_reg     tcg_gen_remu_i32
205 #define tcg_gen_discard_reg  tcg_gen_discard_i32
206 #define tcg_gen_trunc_reg_i32 tcg_gen_mov_i32
207 #define tcg_gen_trunc_i64_reg tcg_gen_extrl_i64_i32
208 #define tcg_gen_extu_i32_reg tcg_gen_mov_i32
209 #define tcg_gen_ext_i32_reg  tcg_gen_mov_i32
210 #define tcg_gen_extu_reg_i64 tcg_gen_extu_i32_i64
211 #define tcg_gen_ext_reg_i64  tcg_gen_ext_i32_i64
212 #define tcg_gen_ext8u_reg    tcg_gen_ext8u_i32
213 #define tcg_gen_ext8s_reg    tcg_gen_ext8s_i32
214 #define tcg_gen_ext16u_reg   tcg_gen_ext16u_i32
215 #define tcg_gen_ext16s_reg   tcg_gen_ext16s_i32
216 #define tcg_gen_ext32u_reg   tcg_gen_mov_i32
217 #define tcg_gen_ext32s_reg   tcg_gen_mov_i32
218 #define tcg_gen_bswap16_reg  tcg_gen_bswap16_i32
219 #define tcg_gen_bswap32_reg  tcg_gen_bswap32_i32
220 #define tcg_gen_concat_reg_i64 tcg_gen_concat_i32_i64
221 #define tcg_gen_andc_reg     tcg_gen_andc_i32
222 #define tcg_gen_eqv_reg      tcg_gen_eqv_i32
223 #define tcg_gen_nand_reg     tcg_gen_nand_i32
224 #define tcg_gen_nor_reg      tcg_gen_nor_i32
225 #define tcg_gen_orc_reg      tcg_gen_orc_i32
226 #define tcg_gen_clz_reg      tcg_gen_clz_i32
227 #define tcg_gen_ctz_reg      tcg_gen_ctz_i32
228 #define tcg_gen_clzi_reg     tcg_gen_clzi_i32
229 #define tcg_gen_ctzi_reg     tcg_gen_ctzi_i32
230 #define tcg_gen_clrsb_reg    tcg_gen_clrsb_i32
231 #define tcg_gen_ctpop_reg    tcg_gen_ctpop_i32
232 #define tcg_gen_rotl_reg     tcg_gen_rotl_i32
233 #define tcg_gen_rotli_reg    tcg_gen_rotli_i32
234 #define tcg_gen_rotr_reg     tcg_gen_rotr_i32
235 #define tcg_gen_rotri_reg    tcg_gen_rotri_i32
236 #define tcg_gen_deposit_reg  tcg_gen_deposit_i32
237 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i32
238 #define tcg_gen_extract_reg  tcg_gen_extract_i32
239 #define tcg_gen_sextract_reg tcg_gen_sextract_i32
240 #define tcg_const_reg        tcg_const_i32
241 #define tcg_const_local_reg  tcg_const_local_i32
242 #define tcg_gen_movcond_reg  tcg_gen_movcond_i32
243 #define tcg_gen_add2_reg     tcg_gen_add2_i32
244 #define tcg_gen_sub2_reg     tcg_gen_sub2_i32
245 #define tcg_gen_qemu_ld_reg  tcg_gen_qemu_ld_i32
246 #define tcg_gen_qemu_st_reg  tcg_gen_qemu_st_i32
247 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i32
248 #define tcg_gen_trunc_reg_ptr   tcg_gen_ext_i32_ptr
249 #endif /* TARGET_REGISTER_BITS */
250 
251 typedef struct DisasCond {
252     TCGCond c;
253     TCGv_reg a0, a1;
254     bool a0_is_n;
255     bool a1_is_0;
256 } DisasCond;
257 
258 typedef struct DisasContext {
259     DisasContextBase base;
260     CPUState *cs;
261 
262     target_ureg iaoq_f;
263     target_ureg iaoq_b;
264     target_ureg iaoq_n;
265     TCGv_reg iaoq_n_var;
266 
267     int ntempr, ntempl;
268     TCGv_reg tempr[8];
269     TCGv_tl  templ[4];
270 
271     DisasCond null_cond;
272     TCGLabel *null_lab;
273 
274     uint32_t insn;
275     uint32_t tb_flags;
276     int mmu_idx;
277     int privilege;
278     bool psw_n_nonzero;
279 } DisasContext;
280 
281 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently.  */
282 static int expand_sm_imm(int val)
283 {
284     if (val & PSW_SM_E) {
285         val = (val & ~PSW_SM_E) | PSW_E;
286     }
287     if (val & PSW_SM_W) {
288         val = (val & ~PSW_SM_W) | PSW_W;
289     }
290     return val;
291 }
292 
293 /* Inverted space register indicates 0 means sr0 not inferred from base.  */
294 static int expand_sr3x(int val)
295 {
296     return ~val;
297 }
298 
299 /* Convert the M:A bits within a memory insn to the tri-state value
300    we use for the final M.  */
301 static int ma_to_m(int val)
302 {
303     return val & 2 ? (val & 1 ? -1 : 1) : 0;
304 }
305 
306 /* Convert the sign of the displacement to a pre or post-modify.  */
307 static int pos_to_m(int val)
308 {
309     return val ? 1 : -1;
310 }
311 
312 static int neg_to_m(int val)
313 {
314     return val ? -1 : 1;
315 }
316 
317 /* Used for branch targets and fp memory ops.  */
318 static int expand_shl2(int val)
319 {
320     return val << 2;
321 }
322 
323 /* Used for fp memory ops.  */
324 static int expand_shl3(int val)
325 {
326     return val << 3;
327 }
328 
329 /* Used for assemble_21.  */
330 static int expand_shl11(int val)
331 {
332     return val << 11;
333 }
334 
335 
336 /* Include the auto-generated decoder.  */
337 #include "decode.inc.c"
338 
339 /* We are not using a goto_tb (for whatever reason), but have updated
340    the iaq (for whatever reason), so don't do it again on exit.  */
341 #define DISAS_IAQ_N_UPDATED  DISAS_TARGET_0
342 
343 /* We are exiting the TB, but have neither emitted a goto_tb, nor
344    updated the iaq for the next instruction to be executed.  */
345 #define DISAS_IAQ_N_STALE    DISAS_TARGET_1
346 
347 /* Similarly, but we want to return to the main loop immediately
348    to recognize unmasked interrupts.  */
349 #define DISAS_IAQ_N_STALE_EXIT      DISAS_TARGET_2
350 
351 /* global register indexes */
352 static TCGv_reg cpu_gr[32];
353 static TCGv_i64 cpu_sr[4];
354 static TCGv_i64 cpu_srH;
355 static TCGv_reg cpu_iaoq_f;
356 static TCGv_reg cpu_iaoq_b;
357 static TCGv_i64 cpu_iasq_f;
358 static TCGv_i64 cpu_iasq_b;
359 static TCGv_reg cpu_sar;
360 static TCGv_reg cpu_psw_n;
361 static TCGv_reg cpu_psw_v;
362 static TCGv_reg cpu_psw_cb;
363 static TCGv_reg cpu_psw_cb_msb;
364 
365 #include "exec/gen-icount.h"
366 
367 void hppa_translate_init(void)
368 {
369 #define DEF_VAR(V)  { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
370 
371     typedef struct { TCGv_reg *var; const char *name; int ofs; } GlobalVar;
372     static const GlobalVar vars[] = {
373         { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
374         DEF_VAR(psw_n),
375         DEF_VAR(psw_v),
376         DEF_VAR(psw_cb),
377         DEF_VAR(psw_cb_msb),
378         DEF_VAR(iaoq_f),
379         DEF_VAR(iaoq_b),
380     };
381 
382 #undef DEF_VAR
383 
384     /* Use the symbolic register names that match the disassembler.  */
385     static const char gr_names[32][4] = {
386         "r0",  "r1",  "r2",  "r3",  "r4",  "r5",  "r6",  "r7",
387         "r8",  "r9",  "r10", "r11", "r12", "r13", "r14", "r15",
388         "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
389         "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
390     };
391     /* SR[4-7] are not global registers so that we can index them.  */
392     static const char sr_names[5][4] = {
393         "sr0", "sr1", "sr2", "sr3", "srH"
394     };
395 
396     int i;
397 
398     cpu_gr[0] = NULL;
399     for (i = 1; i < 32; i++) {
400         cpu_gr[i] = tcg_global_mem_new(cpu_env,
401                                        offsetof(CPUHPPAState, gr[i]),
402                                        gr_names[i]);
403     }
404     for (i = 0; i < 4; i++) {
405         cpu_sr[i] = tcg_global_mem_new_i64(cpu_env,
406                                            offsetof(CPUHPPAState, sr[i]),
407                                            sr_names[i]);
408     }
409     cpu_srH = tcg_global_mem_new_i64(cpu_env,
410                                      offsetof(CPUHPPAState, sr[4]),
411                                      sr_names[4]);
412 
413     for (i = 0; i < ARRAY_SIZE(vars); ++i) {
414         const GlobalVar *v = &vars[i];
415         *v->var = tcg_global_mem_new(cpu_env, v->ofs, v->name);
416     }
417 
418     cpu_iasq_f = tcg_global_mem_new_i64(cpu_env,
419                                         offsetof(CPUHPPAState, iasq_f),
420                                         "iasq_f");
421     cpu_iasq_b = tcg_global_mem_new_i64(cpu_env,
422                                         offsetof(CPUHPPAState, iasq_b),
423                                         "iasq_b");
424 }
425 
426 static DisasCond cond_make_f(void)
427 {
428     return (DisasCond){
429         .c = TCG_COND_NEVER,
430         .a0 = NULL,
431         .a1 = NULL,
432     };
433 }
434 
435 static DisasCond cond_make_n(void)
436 {
437     return (DisasCond){
438         .c = TCG_COND_NE,
439         .a0 = cpu_psw_n,
440         .a0_is_n = true,
441         .a1 = NULL,
442         .a1_is_0 = true
443     };
444 }
445 
446 static DisasCond cond_make_0(TCGCond c, TCGv_reg a0)
447 {
448     DisasCond r = { .c = c, .a1 = NULL, .a1_is_0 = true };
449 
450     assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
451     r.a0 = tcg_temp_new();
452     tcg_gen_mov_reg(r.a0, a0);
453 
454     return r;
455 }
456 
457 static DisasCond cond_make(TCGCond c, TCGv_reg a0, TCGv_reg a1)
458 {
459     DisasCond r = { .c = c };
460 
461     assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
462     r.a0 = tcg_temp_new();
463     tcg_gen_mov_reg(r.a0, a0);
464     r.a1 = tcg_temp_new();
465     tcg_gen_mov_reg(r.a1, a1);
466 
467     return r;
468 }
469 
470 static void cond_prep(DisasCond *cond)
471 {
472     if (cond->a1_is_0) {
473         cond->a1_is_0 = false;
474         cond->a1 = tcg_const_reg(0);
475     }
476 }
477 
478 static void cond_free(DisasCond *cond)
479 {
480     switch (cond->c) {
481     default:
482         if (!cond->a0_is_n) {
483             tcg_temp_free(cond->a0);
484         }
485         if (!cond->a1_is_0) {
486             tcg_temp_free(cond->a1);
487         }
488         cond->a0_is_n = false;
489         cond->a1_is_0 = false;
490         cond->a0 = NULL;
491         cond->a1 = NULL;
492         /* fallthru */
493     case TCG_COND_ALWAYS:
494         cond->c = TCG_COND_NEVER;
495         break;
496     case TCG_COND_NEVER:
497         break;
498     }
499 }
500 
501 static TCGv_reg get_temp(DisasContext *ctx)
502 {
503     unsigned i = ctx->ntempr++;
504     g_assert(i < ARRAY_SIZE(ctx->tempr));
505     return ctx->tempr[i] = tcg_temp_new();
506 }
507 
508 #ifndef CONFIG_USER_ONLY
509 static TCGv_tl get_temp_tl(DisasContext *ctx)
510 {
511     unsigned i = ctx->ntempl++;
512     g_assert(i < ARRAY_SIZE(ctx->templ));
513     return ctx->templ[i] = tcg_temp_new_tl();
514 }
515 #endif
516 
517 static TCGv_reg load_const(DisasContext *ctx, target_sreg v)
518 {
519     TCGv_reg t = get_temp(ctx);
520     tcg_gen_movi_reg(t, v);
521     return t;
522 }
523 
524 static TCGv_reg load_gpr(DisasContext *ctx, unsigned reg)
525 {
526     if (reg == 0) {
527         TCGv_reg t = get_temp(ctx);
528         tcg_gen_movi_reg(t, 0);
529         return t;
530     } else {
531         return cpu_gr[reg];
532     }
533 }
534 
535 static TCGv_reg dest_gpr(DisasContext *ctx, unsigned reg)
536 {
537     if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
538         return get_temp(ctx);
539     } else {
540         return cpu_gr[reg];
541     }
542 }
543 
544 static void save_or_nullify(DisasContext *ctx, TCGv_reg dest, TCGv_reg t)
545 {
546     if (ctx->null_cond.c != TCG_COND_NEVER) {
547         cond_prep(&ctx->null_cond);
548         tcg_gen_movcond_reg(ctx->null_cond.c, dest, ctx->null_cond.a0,
549                            ctx->null_cond.a1, dest, t);
550     } else {
551         tcg_gen_mov_reg(dest, t);
552     }
553 }
554 
555 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_reg t)
556 {
557     if (reg != 0) {
558         save_or_nullify(ctx, cpu_gr[reg], t);
559     }
560 }
561 
562 #ifdef HOST_WORDS_BIGENDIAN
563 # define HI_OFS  0
564 # define LO_OFS  4
565 #else
566 # define HI_OFS  4
567 # define LO_OFS  0
568 #endif
569 
570 static TCGv_i32 load_frw_i32(unsigned rt)
571 {
572     TCGv_i32 ret = tcg_temp_new_i32();
573     tcg_gen_ld_i32(ret, cpu_env,
574                    offsetof(CPUHPPAState, fr[rt & 31])
575                    + (rt & 32 ? LO_OFS : HI_OFS));
576     return ret;
577 }
578 
579 static TCGv_i32 load_frw0_i32(unsigned rt)
580 {
581     if (rt == 0) {
582         return tcg_const_i32(0);
583     } else {
584         return load_frw_i32(rt);
585     }
586 }
587 
588 static TCGv_i64 load_frw0_i64(unsigned rt)
589 {
590     if (rt == 0) {
591         return tcg_const_i64(0);
592     } else {
593         TCGv_i64 ret = tcg_temp_new_i64();
594         tcg_gen_ld32u_i64(ret, cpu_env,
595                           offsetof(CPUHPPAState, fr[rt & 31])
596                           + (rt & 32 ? LO_OFS : HI_OFS));
597         return ret;
598     }
599 }
600 
601 static void save_frw_i32(unsigned rt, TCGv_i32 val)
602 {
603     tcg_gen_st_i32(val, cpu_env,
604                    offsetof(CPUHPPAState, fr[rt & 31])
605                    + (rt & 32 ? LO_OFS : HI_OFS));
606 }
607 
608 #undef HI_OFS
609 #undef LO_OFS
610 
611 static TCGv_i64 load_frd(unsigned rt)
612 {
613     TCGv_i64 ret = tcg_temp_new_i64();
614     tcg_gen_ld_i64(ret, cpu_env, offsetof(CPUHPPAState, fr[rt]));
615     return ret;
616 }
617 
618 static TCGv_i64 load_frd0(unsigned rt)
619 {
620     if (rt == 0) {
621         return tcg_const_i64(0);
622     } else {
623         return load_frd(rt);
624     }
625 }
626 
627 static void save_frd(unsigned rt, TCGv_i64 val)
628 {
629     tcg_gen_st_i64(val, cpu_env, offsetof(CPUHPPAState, fr[rt]));
630 }
631 
632 static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
633 {
634 #ifdef CONFIG_USER_ONLY
635     tcg_gen_movi_i64(dest, 0);
636 #else
637     if (reg < 4) {
638         tcg_gen_mov_i64(dest, cpu_sr[reg]);
639     } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
640         tcg_gen_mov_i64(dest, cpu_srH);
641     } else {
642         tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUHPPAState, sr[reg]));
643     }
644 #endif
645 }
646 
647 /* Skip over the implementation of an insn that has been nullified.
648    Use this when the insn is too complex for a conditional move.  */
649 static void nullify_over(DisasContext *ctx)
650 {
651     if (ctx->null_cond.c != TCG_COND_NEVER) {
652         /* The always condition should have been handled in the main loop.  */
653         assert(ctx->null_cond.c != TCG_COND_ALWAYS);
654 
655         ctx->null_lab = gen_new_label();
656         cond_prep(&ctx->null_cond);
657 
658         /* If we're using PSW[N], copy it to a temp because... */
659         if (ctx->null_cond.a0_is_n) {
660             ctx->null_cond.a0_is_n = false;
661             ctx->null_cond.a0 = tcg_temp_new();
662             tcg_gen_mov_reg(ctx->null_cond.a0, cpu_psw_n);
663         }
664         /* ... we clear it before branching over the implementation,
665            so that (1) it's clear after nullifying this insn and
666            (2) if this insn nullifies the next, PSW[N] is valid.  */
667         if (ctx->psw_n_nonzero) {
668             ctx->psw_n_nonzero = false;
669             tcg_gen_movi_reg(cpu_psw_n, 0);
670         }
671 
672         tcg_gen_brcond_reg(ctx->null_cond.c, ctx->null_cond.a0,
673                           ctx->null_cond.a1, ctx->null_lab);
674         cond_free(&ctx->null_cond);
675     }
676 }
677 
678 /* Save the current nullification state to PSW[N].  */
679 static void nullify_save(DisasContext *ctx)
680 {
681     if (ctx->null_cond.c == TCG_COND_NEVER) {
682         if (ctx->psw_n_nonzero) {
683             tcg_gen_movi_reg(cpu_psw_n, 0);
684         }
685         return;
686     }
687     if (!ctx->null_cond.a0_is_n) {
688         cond_prep(&ctx->null_cond);
689         tcg_gen_setcond_reg(ctx->null_cond.c, cpu_psw_n,
690                            ctx->null_cond.a0, ctx->null_cond.a1);
691         ctx->psw_n_nonzero = true;
692     }
693     cond_free(&ctx->null_cond);
694 }
695 
696 /* Set a PSW[N] to X.  The intention is that this is used immediately
697    before a goto_tb/exit_tb, so that there is no fallthru path to other
698    code within the TB.  Therefore we do not update psw_n_nonzero.  */
699 static void nullify_set(DisasContext *ctx, bool x)
700 {
701     if (ctx->psw_n_nonzero || x) {
702         tcg_gen_movi_reg(cpu_psw_n, x);
703     }
704 }
705 
706 /* Mark the end of an instruction that may have been nullified.
707    This is the pair to nullify_over.  Always returns true so that
708    it may be tail-called from a translate function.  */
709 static bool nullify_end(DisasContext *ctx)
710 {
711     TCGLabel *null_lab = ctx->null_lab;
712     DisasJumpType status = ctx->base.is_jmp;
713 
714     /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
715        For UPDATED, we cannot update on the nullified path.  */
716     assert(status != DISAS_IAQ_N_UPDATED);
717 
718     if (likely(null_lab == NULL)) {
719         /* The current insn wasn't conditional or handled the condition
720            applied to it without a branch, so the (new) setting of
721            NULL_COND can be applied directly to the next insn.  */
722         return true;
723     }
724     ctx->null_lab = NULL;
725 
726     if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
727         /* The next instruction will be unconditional,
728            and NULL_COND already reflects that.  */
729         gen_set_label(null_lab);
730     } else {
731         /* The insn that we just executed is itself nullifying the next
732            instruction.  Store the condition in the PSW[N] global.
733            We asserted PSW[N] = 0 in nullify_over, so that after the
734            label we have the proper value in place.  */
735         nullify_save(ctx);
736         gen_set_label(null_lab);
737         ctx->null_cond = cond_make_n();
738     }
739     if (status == DISAS_NORETURN) {
740         ctx->base.is_jmp = DISAS_NEXT;
741     }
742     return true;
743 }
744 
745 static void copy_iaoq_entry(TCGv_reg dest, target_ureg ival, TCGv_reg vval)
746 {
747     if (unlikely(ival == -1)) {
748         tcg_gen_mov_reg(dest, vval);
749     } else {
750         tcg_gen_movi_reg(dest, ival);
751     }
752 }
753 
754 static inline target_ureg iaoq_dest(DisasContext *ctx, target_sreg disp)
755 {
756     return ctx->iaoq_f + disp + 8;
757 }
758 
759 static void gen_excp_1(int exception)
760 {
761     TCGv_i32 t = tcg_const_i32(exception);
762     gen_helper_excp(cpu_env, t);
763     tcg_temp_free_i32(t);
764 }
765 
766 static void gen_excp(DisasContext *ctx, int exception)
767 {
768     copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
769     copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
770     nullify_save(ctx);
771     gen_excp_1(exception);
772     ctx->base.is_jmp = DISAS_NORETURN;
773 }
774 
775 static bool gen_excp_iir(DisasContext *ctx, int exc)
776 {
777     TCGv_reg tmp;
778 
779     nullify_over(ctx);
780     tmp = tcg_const_reg(ctx->insn);
781     tcg_gen_st_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[CR_IIR]));
782     tcg_temp_free(tmp);
783     gen_excp(ctx, exc);
784     return nullify_end(ctx);
785 }
786 
787 static bool gen_illegal(DisasContext *ctx)
788 {
789     return gen_excp_iir(ctx, EXCP_ILL);
790 }
791 
792 #ifdef CONFIG_USER_ONLY
793 #define CHECK_MOST_PRIVILEGED(EXCP) \
794     return gen_excp_iir(ctx, EXCP)
795 #else
796 #define CHECK_MOST_PRIVILEGED(EXCP) \
797     do {                                     \
798         if (ctx->privilege != 0) {           \
799             return gen_excp_iir(ctx, EXCP);  \
800         }                                    \
801     } while (0)
802 #endif
803 
804 static bool use_goto_tb(DisasContext *ctx, target_ureg dest)
805 {
806     /* Suppress goto_tb in the case of single-steping and IO.  */
807     if ((tb_cflags(ctx->base.tb) & CF_LAST_IO)
808         || ctx->base.singlestep_enabled) {
809         return false;
810     }
811     return true;
812 }
813 
814 /* If the next insn is to be nullified, and it's on the same page,
815    and we're not attempting to set a breakpoint on it, then we can
816    totally skip the nullified insn.  This avoids creating and
817    executing a TB that merely branches to the next TB.  */
818 static bool use_nullify_skip(DisasContext *ctx)
819 {
820     return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
821             && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
822 }
823 
824 static void gen_goto_tb(DisasContext *ctx, int which,
825                         target_ureg f, target_ureg b)
826 {
827     if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
828         tcg_gen_goto_tb(which);
829         tcg_gen_movi_reg(cpu_iaoq_f, f);
830         tcg_gen_movi_reg(cpu_iaoq_b, b);
831         tcg_gen_exit_tb(ctx->base.tb, which);
832     } else {
833         copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b);
834         copy_iaoq_entry(cpu_iaoq_b, b, ctx->iaoq_n_var);
835         if (ctx->base.singlestep_enabled) {
836             gen_excp_1(EXCP_DEBUG);
837         } else {
838             tcg_gen_lookup_and_goto_ptr();
839         }
840     }
841 }
842 
843 /* The parisc documentation describes only the general interpretation of
844    the conditions, without describing their exact implementation.  The
845    interpretations do not stand up well when considering ADD,C and SUB,B.
846    However, considering the Addition, Subtraction and Logical conditions
847    as a whole it would appear that these relations are similar to what
848    a traditional NZCV set of flags would produce.  */
849 
850 static DisasCond do_cond(unsigned cf, TCGv_reg res,
851                          TCGv_reg cb_msb, TCGv_reg sv)
852 {
853     DisasCond cond;
854     TCGv_reg tmp;
855 
856     switch (cf >> 1) {
857     case 0: /* Never / TR */
858         cond = cond_make_f();
859         break;
860     case 1: /* = / <>        (Z / !Z) */
861         cond = cond_make_0(TCG_COND_EQ, res);
862         break;
863     case 2: /* < / >=        (N / !N) */
864         cond = cond_make_0(TCG_COND_LT, res);
865         break;
866     case 3: /* <= / >        (N | Z / !N & !Z) */
867         cond = cond_make_0(TCG_COND_LE, res);
868         break;
869     case 4: /* NUV / UV      (!C / C) */
870         cond = cond_make_0(TCG_COND_EQ, cb_msb);
871         break;
872     case 5: /* ZNV / VNZ     (!C | Z / C & !Z) */
873         tmp = tcg_temp_new();
874         tcg_gen_neg_reg(tmp, cb_msb);
875         tcg_gen_and_reg(tmp, tmp, res);
876         cond = cond_make_0(TCG_COND_EQ, tmp);
877         tcg_temp_free(tmp);
878         break;
879     case 6: /* SV / NSV      (V / !V) */
880         cond = cond_make_0(TCG_COND_LT, sv);
881         break;
882     case 7: /* OD / EV */
883         tmp = tcg_temp_new();
884         tcg_gen_andi_reg(tmp, res, 1);
885         cond = cond_make_0(TCG_COND_NE, tmp);
886         tcg_temp_free(tmp);
887         break;
888     default:
889         g_assert_not_reached();
890     }
891     if (cf & 1) {
892         cond.c = tcg_invert_cond(cond.c);
893     }
894 
895     return cond;
896 }
897 
898 /* Similar, but for the special case of subtraction without borrow, we
899    can use the inputs directly.  This can allow other computation to be
900    deleted as unused.  */
901 
902 static DisasCond do_sub_cond(unsigned cf, TCGv_reg res,
903                              TCGv_reg in1, TCGv_reg in2, TCGv_reg sv)
904 {
905     DisasCond cond;
906 
907     switch (cf >> 1) {
908     case 1: /* = / <> */
909         cond = cond_make(TCG_COND_EQ, in1, in2);
910         break;
911     case 2: /* < / >= */
912         cond = cond_make(TCG_COND_LT, in1, in2);
913         break;
914     case 3: /* <= / > */
915         cond = cond_make(TCG_COND_LE, in1, in2);
916         break;
917     case 4: /* << / >>= */
918         cond = cond_make(TCG_COND_LTU, in1, in2);
919         break;
920     case 5: /* <<= / >> */
921         cond = cond_make(TCG_COND_LEU, in1, in2);
922         break;
923     default:
924         return do_cond(cf, res, sv, sv);
925     }
926     if (cf & 1) {
927         cond.c = tcg_invert_cond(cond.c);
928     }
929 
930     return cond;
931 }
932 
933 /* Similar, but for logicals, where the carry and overflow bits are not
934    computed, and use of them is undefined.  */
935 
936 static DisasCond do_log_cond(unsigned cf, TCGv_reg res)
937 {
938     switch (cf >> 1) {
939     case 4: case 5: case 6:
940         cf &= 1;
941         break;
942     }
943     return do_cond(cf, res, res, res);
944 }
945 
946 /* Similar, but for shift/extract/deposit conditions.  */
947 
948 static DisasCond do_sed_cond(unsigned orig, TCGv_reg res)
949 {
950     unsigned c, f;
951 
952     /* Convert the compressed condition codes to standard.
953        0-2 are the same as logicals (nv,<,<=), while 3 is OD.
954        4-7 are the reverse of 0-3.  */
955     c = orig & 3;
956     if (c == 3) {
957         c = 7;
958     }
959     f = (orig & 4) / 4;
960 
961     return do_log_cond(c * 2 + f, res);
962 }
963 
964 /* Similar, but for unit conditions.  */
965 
966 static DisasCond do_unit_cond(unsigned cf, TCGv_reg res,
967                               TCGv_reg in1, TCGv_reg in2)
968 {
969     DisasCond cond;
970     TCGv_reg tmp, cb = NULL;
971 
972     if (cf & 8) {
973         /* Since we want to test lots of carry-out bits all at once, do not
974          * do our normal thing and compute carry-in of bit B+1 since that
975          * leaves us with carry bits spread across two words.
976          */
977         cb = tcg_temp_new();
978         tmp = tcg_temp_new();
979         tcg_gen_or_reg(cb, in1, in2);
980         tcg_gen_and_reg(tmp, in1, in2);
981         tcg_gen_andc_reg(cb, cb, res);
982         tcg_gen_or_reg(cb, cb, tmp);
983         tcg_temp_free(tmp);
984     }
985 
986     switch (cf >> 1) {
987     case 0: /* never / TR */
988     case 1: /* undefined */
989     case 5: /* undefined */
990         cond = cond_make_f();
991         break;
992 
993     case 2: /* SBZ / NBZ */
994         /* See hasless(v,1) from
995          * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
996          */
997         tmp = tcg_temp_new();
998         tcg_gen_subi_reg(tmp, res, 0x01010101u);
999         tcg_gen_andc_reg(tmp, tmp, res);
1000         tcg_gen_andi_reg(tmp, tmp, 0x80808080u);
1001         cond = cond_make_0(TCG_COND_NE, tmp);
1002         tcg_temp_free(tmp);
1003         break;
1004 
1005     case 3: /* SHZ / NHZ */
1006         tmp = tcg_temp_new();
1007         tcg_gen_subi_reg(tmp, res, 0x00010001u);
1008         tcg_gen_andc_reg(tmp, tmp, res);
1009         tcg_gen_andi_reg(tmp, tmp, 0x80008000u);
1010         cond = cond_make_0(TCG_COND_NE, tmp);
1011         tcg_temp_free(tmp);
1012         break;
1013 
1014     case 4: /* SDC / NDC */
1015         tcg_gen_andi_reg(cb, cb, 0x88888888u);
1016         cond = cond_make_0(TCG_COND_NE, cb);
1017         break;
1018 
1019     case 6: /* SBC / NBC */
1020         tcg_gen_andi_reg(cb, cb, 0x80808080u);
1021         cond = cond_make_0(TCG_COND_NE, cb);
1022         break;
1023 
1024     case 7: /* SHC / NHC */
1025         tcg_gen_andi_reg(cb, cb, 0x80008000u);
1026         cond = cond_make_0(TCG_COND_NE, cb);
1027         break;
1028 
1029     default:
1030         g_assert_not_reached();
1031     }
1032     if (cf & 8) {
1033         tcg_temp_free(cb);
1034     }
1035     if (cf & 1) {
1036         cond.c = tcg_invert_cond(cond.c);
1037     }
1038 
1039     return cond;
1040 }
1041 
1042 /* Compute signed overflow for addition.  */
1043 static TCGv_reg do_add_sv(DisasContext *ctx, TCGv_reg res,
1044                           TCGv_reg in1, TCGv_reg in2)
1045 {
1046     TCGv_reg sv = get_temp(ctx);
1047     TCGv_reg tmp = tcg_temp_new();
1048 
1049     tcg_gen_xor_reg(sv, res, in1);
1050     tcg_gen_xor_reg(tmp, in1, in2);
1051     tcg_gen_andc_reg(sv, sv, tmp);
1052     tcg_temp_free(tmp);
1053 
1054     return sv;
1055 }
1056 
1057 /* Compute signed overflow for subtraction.  */
1058 static TCGv_reg do_sub_sv(DisasContext *ctx, TCGv_reg res,
1059                           TCGv_reg in1, TCGv_reg in2)
1060 {
1061     TCGv_reg sv = get_temp(ctx);
1062     TCGv_reg tmp = tcg_temp_new();
1063 
1064     tcg_gen_xor_reg(sv, res, in1);
1065     tcg_gen_xor_reg(tmp, in1, in2);
1066     tcg_gen_and_reg(sv, sv, tmp);
1067     tcg_temp_free(tmp);
1068 
1069     return sv;
1070 }
1071 
1072 static void do_add(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1073                    TCGv_reg in2, unsigned shift, bool is_l,
1074                    bool is_tsv, bool is_tc, bool is_c, unsigned cf)
1075 {
1076     TCGv_reg dest, cb, cb_msb, sv, tmp;
1077     unsigned c = cf >> 1;
1078     DisasCond cond;
1079 
1080     dest = tcg_temp_new();
1081     cb = NULL;
1082     cb_msb = NULL;
1083 
1084     if (shift) {
1085         tmp = get_temp(ctx);
1086         tcg_gen_shli_reg(tmp, in1, shift);
1087         in1 = tmp;
1088     }
1089 
1090     if (!is_l || c == 4 || c == 5) {
1091         TCGv_reg zero = tcg_const_reg(0);
1092         cb_msb = get_temp(ctx);
1093         tcg_gen_add2_reg(dest, cb_msb, in1, zero, in2, zero);
1094         if (is_c) {
1095             tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cpu_psw_cb_msb, zero);
1096         }
1097         tcg_temp_free(zero);
1098         if (!is_l) {
1099             cb = get_temp(ctx);
1100             tcg_gen_xor_reg(cb, in1, in2);
1101             tcg_gen_xor_reg(cb, cb, dest);
1102         }
1103     } else {
1104         tcg_gen_add_reg(dest, in1, in2);
1105         if (is_c) {
1106             tcg_gen_add_reg(dest, dest, cpu_psw_cb_msb);
1107         }
1108     }
1109 
1110     /* Compute signed overflow if required.  */
1111     sv = NULL;
1112     if (is_tsv || c == 6) {
1113         sv = do_add_sv(ctx, dest, in1, in2);
1114         if (is_tsv) {
1115             /* ??? Need to include overflow from shift.  */
1116             gen_helper_tsv(cpu_env, sv);
1117         }
1118     }
1119 
1120     /* Emit any conditional trap before any writeback.  */
1121     cond = do_cond(cf, dest, cb_msb, sv);
1122     if (is_tc) {
1123         cond_prep(&cond);
1124         tmp = tcg_temp_new();
1125         tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1126         gen_helper_tcond(cpu_env, tmp);
1127         tcg_temp_free(tmp);
1128     }
1129 
1130     /* Write back the result.  */
1131     if (!is_l) {
1132         save_or_nullify(ctx, cpu_psw_cb, cb);
1133         save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1134     }
1135     save_gpr(ctx, rt, dest);
1136     tcg_temp_free(dest);
1137 
1138     /* Install the new nullification.  */
1139     cond_free(&ctx->null_cond);
1140     ctx->null_cond = cond;
1141 }
1142 
1143 static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_sh *a,
1144                        bool is_l, bool is_tsv, bool is_tc, bool is_c)
1145 {
1146     TCGv_reg tcg_r1, tcg_r2;
1147 
1148     if (a->cf) {
1149         nullify_over(ctx);
1150     }
1151     tcg_r1 = load_gpr(ctx, a->r1);
1152     tcg_r2 = load_gpr(ctx, a->r2);
1153     do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l, is_tsv, is_tc, is_c, a->cf);
1154     return nullify_end(ctx);
1155 }
1156 
1157 static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a,
1158                        bool is_tsv, bool is_tc)
1159 {
1160     TCGv_reg tcg_im, tcg_r2;
1161 
1162     if (a->cf) {
1163         nullify_over(ctx);
1164     }
1165     tcg_im = load_const(ctx, a->i);
1166     tcg_r2 = load_gpr(ctx, a->r);
1167     do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf);
1168     return nullify_end(ctx);
1169 }
1170 
1171 static void do_sub(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1172                    TCGv_reg in2, bool is_tsv, bool is_b,
1173                    bool is_tc, unsigned cf)
1174 {
1175     TCGv_reg dest, sv, cb, cb_msb, zero, tmp;
1176     unsigned c = cf >> 1;
1177     DisasCond cond;
1178 
1179     dest = tcg_temp_new();
1180     cb = tcg_temp_new();
1181     cb_msb = tcg_temp_new();
1182 
1183     zero = tcg_const_reg(0);
1184     if (is_b) {
1185         /* DEST,C = IN1 + ~IN2 + C.  */
1186         tcg_gen_not_reg(cb, in2);
1187         tcg_gen_add2_reg(dest, cb_msb, in1, zero, cpu_psw_cb_msb, zero);
1188         tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cb, zero);
1189         tcg_gen_xor_reg(cb, cb, in1);
1190         tcg_gen_xor_reg(cb, cb, dest);
1191     } else {
1192         /* DEST,C = IN1 + ~IN2 + 1.  We can produce the same result in fewer
1193            operations by seeding the high word with 1 and subtracting.  */
1194         tcg_gen_movi_reg(cb_msb, 1);
1195         tcg_gen_sub2_reg(dest, cb_msb, in1, cb_msb, in2, zero);
1196         tcg_gen_eqv_reg(cb, in1, in2);
1197         tcg_gen_xor_reg(cb, cb, dest);
1198     }
1199     tcg_temp_free(zero);
1200 
1201     /* Compute signed overflow if required.  */
1202     sv = NULL;
1203     if (is_tsv || c == 6) {
1204         sv = do_sub_sv(ctx, dest, in1, in2);
1205         if (is_tsv) {
1206             gen_helper_tsv(cpu_env, sv);
1207         }
1208     }
1209 
1210     /* Compute the condition.  We cannot use the special case for borrow.  */
1211     if (!is_b) {
1212         cond = do_sub_cond(cf, dest, in1, in2, sv);
1213     } else {
1214         cond = do_cond(cf, dest, cb_msb, sv);
1215     }
1216 
1217     /* Emit any conditional trap before any writeback.  */
1218     if (is_tc) {
1219         cond_prep(&cond);
1220         tmp = tcg_temp_new();
1221         tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1222         gen_helper_tcond(cpu_env, tmp);
1223         tcg_temp_free(tmp);
1224     }
1225 
1226     /* Write back the result.  */
1227     save_or_nullify(ctx, cpu_psw_cb, cb);
1228     save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1229     save_gpr(ctx, rt, dest);
1230     tcg_temp_free(dest);
1231 
1232     /* Install the new nullification.  */
1233     cond_free(&ctx->null_cond);
1234     ctx->null_cond = cond;
1235 }
1236 
1237 static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf *a,
1238                        bool is_tsv, bool is_b, bool is_tc)
1239 {
1240     TCGv_reg tcg_r1, tcg_r2;
1241 
1242     if (a->cf) {
1243         nullify_over(ctx);
1244     }
1245     tcg_r1 = load_gpr(ctx, a->r1);
1246     tcg_r2 = load_gpr(ctx, a->r2);
1247     do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf);
1248     return nullify_end(ctx);
1249 }
1250 
1251 static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv)
1252 {
1253     TCGv_reg tcg_im, tcg_r2;
1254 
1255     if (a->cf) {
1256         nullify_over(ctx);
1257     }
1258     tcg_im = load_const(ctx, a->i);
1259     tcg_r2 = load_gpr(ctx, a->r);
1260     do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf);
1261     return nullify_end(ctx);
1262 }
1263 
1264 static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1265                       TCGv_reg in2, unsigned cf)
1266 {
1267     TCGv_reg dest, sv;
1268     DisasCond cond;
1269 
1270     dest = tcg_temp_new();
1271     tcg_gen_sub_reg(dest, in1, in2);
1272 
1273     /* Compute signed overflow if required.  */
1274     sv = NULL;
1275     if ((cf >> 1) == 6) {
1276         sv = do_sub_sv(ctx, dest, in1, in2);
1277     }
1278 
1279     /* Form the condition for the compare.  */
1280     cond = do_sub_cond(cf, dest, in1, in2, sv);
1281 
1282     /* Clear.  */
1283     tcg_gen_movi_reg(dest, 0);
1284     save_gpr(ctx, rt, dest);
1285     tcg_temp_free(dest);
1286 
1287     /* Install the new nullification.  */
1288     cond_free(&ctx->null_cond);
1289     ctx->null_cond = cond;
1290 }
1291 
1292 static void do_log(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1293                    TCGv_reg in2, unsigned cf,
1294                    void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1295 {
1296     TCGv_reg dest = dest_gpr(ctx, rt);
1297 
1298     /* Perform the operation, and writeback.  */
1299     fn(dest, in1, in2);
1300     save_gpr(ctx, rt, dest);
1301 
1302     /* Install the new nullification.  */
1303     cond_free(&ctx->null_cond);
1304     if (cf) {
1305         ctx->null_cond = do_log_cond(cf, dest);
1306     }
1307 }
1308 
1309 static bool do_log_reg(DisasContext *ctx, arg_rrr_cf *a,
1310                        void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1311 {
1312     TCGv_reg tcg_r1, tcg_r2;
1313 
1314     if (a->cf) {
1315         nullify_over(ctx);
1316     }
1317     tcg_r1 = load_gpr(ctx, a->r1);
1318     tcg_r2 = load_gpr(ctx, a->r2);
1319     do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, fn);
1320     return nullify_end(ctx);
1321 }
1322 
1323 static void do_unit(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1324                     TCGv_reg in2, unsigned cf, bool is_tc,
1325                     void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1326 {
1327     TCGv_reg dest;
1328     DisasCond cond;
1329 
1330     if (cf == 0) {
1331         dest = dest_gpr(ctx, rt);
1332         fn(dest, in1, in2);
1333         save_gpr(ctx, rt, dest);
1334         cond_free(&ctx->null_cond);
1335     } else {
1336         dest = tcg_temp_new();
1337         fn(dest, in1, in2);
1338 
1339         cond = do_unit_cond(cf, dest, in1, in2);
1340 
1341         if (is_tc) {
1342             TCGv_reg tmp = tcg_temp_new();
1343             cond_prep(&cond);
1344             tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1345             gen_helper_tcond(cpu_env, tmp);
1346             tcg_temp_free(tmp);
1347         }
1348         save_gpr(ctx, rt, dest);
1349 
1350         cond_free(&ctx->null_cond);
1351         ctx->null_cond = cond;
1352     }
1353 }
1354 
1355 #ifndef CONFIG_USER_ONLY
1356 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1357    from the top 2 bits of the base register.  There are a few system
1358    instructions that have a 3-bit space specifier, for which SR0 is
1359    not special.  To handle this, pass ~SP.  */
1360 static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_reg base)
1361 {
1362     TCGv_ptr ptr;
1363     TCGv_reg tmp;
1364     TCGv_i64 spc;
1365 
1366     if (sp != 0) {
1367         if (sp < 0) {
1368             sp = ~sp;
1369         }
1370         spc = get_temp_tl(ctx);
1371         load_spr(ctx, spc, sp);
1372         return spc;
1373     }
1374     if (ctx->tb_flags & TB_FLAG_SR_SAME) {
1375         return cpu_srH;
1376     }
1377 
1378     ptr = tcg_temp_new_ptr();
1379     tmp = tcg_temp_new();
1380     spc = get_temp_tl(ctx);
1381 
1382     tcg_gen_shri_reg(tmp, base, TARGET_REGISTER_BITS - 5);
1383     tcg_gen_andi_reg(tmp, tmp, 030);
1384     tcg_gen_trunc_reg_ptr(ptr, tmp);
1385     tcg_temp_free(tmp);
1386 
1387     tcg_gen_add_ptr(ptr, ptr, cpu_env);
1388     tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
1389     tcg_temp_free_ptr(ptr);
1390 
1391     return spc;
1392 }
1393 #endif
1394 
1395 static void form_gva(DisasContext *ctx, TCGv_tl *pgva, TCGv_reg *pofs,
1396                      unsigned rb, unsigned rx, int scale, target_sreg disp,
1397                      unsigned sp, int modify, bool is_phys)
1398 {
1399     TCGv_reg base = load_gpr(ctx, rb);
1400     TCGv_reg ofs;
1401 
1402     /* Note that RX is mutually exclusive with DISP.  */
1403     if (rx) {
1404         ofs = get_temp(ctx);
1405         tcg_gen_shli_reg(ofs, cpu_gr[rx], scale);
1406         tcg_gen_add_reg(ofs, ofs, base);
1407     } else if (disp || modify) {
1408         ofs = get_temp(ctx);
1409         tcg_gen_addi_reg(ofs, base, disp);
1410     } else {
1411         ofs = base;
1412     }
1413 
1414     *pofs = ofs;
1415 #ifdef CONFIG_USER_ONLY
1416     *pgva = (modify <= 0 ? ofs : base);
1417 #else
1418     TCGv_tl addr = get_temp_tl(ctx);
1419     tcg_gen_extu_reg_tl(addr, modify <= 0 ? ofs : base);
1420     if (ctx->tb_flags & PSW_W) {
1421         tcg_gen_andi_tl(addr, addr, 0x3fffffffffffffffull);
1422     }
1423     if (!is_phys) {
1424         tcg_gen_or_tl(addr, addr, space_select(ctx, sp, base));
1425     }
1426     *pgva = addr;
1427 #endif
1428 }
1429 
1430 /* Emit a memory load.  The modify parameter should be
1431  * < 0 for pre-modify,
1432  * > 0 for post-modify,
1433  * = 0 for no base register update.
1434  */
1435 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
1436                        unsigned rx, int scale, target_sreg disp,
1437                        unsigned sp, int modify, TCGMemOp mop)
1438 {
1439     TCGv_reg ofs;
1440     TCGv_tl addr;
1441 
1442     /* Caller uses nullify_over/nullify_end.  */
1443     assert(ctx->null_cond.c == TCG_COND_NEVER);
1444 
1445     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1446              ctx->mmu_idx == MMU_PHYS_IDX);
1447     tcg_gen_qemu_ld_reg(dest, addr, ctx->mmu_idx, mop);
1448     if (modify) {
1449         save_gpr(ctx, rb, ofs);
1450     }
1451 }
1452 
1453 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1454                        unsigned rx, int scale, target_sreg disp,
1455                        unsigned sp, int modify, TCGMemOp mop)
1456 {
1457     TCGv_reg ofs;
1458     TCGv_tl addr;
1459 
1460     /* Caller uses nullify_over/nullify_end.  */
1461     assert(ctx->null_cond.c == TCG_COND_NEVER);
1462 
1463     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1464              ctx->mmu_idx == MMU_PHYS_IDX);
1465     tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop);
1466     if (modify) {
1467         save_gpr(ctx, rb, ofs);
1468     }
1469 }
1470 
1471 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1472                         unsigned rx, int scale, target_sreg disp,
1473                         unsigned sp, int modify, TCGMemOp mop)
1474 {
1475     TCGv_reg ofs;
1476     TCGv_tl addr;
1477 
1478     /* Caller uses nullify_over/nullify_end.  */
1479     assert(ctx->null_cond.c == TCG_COND_NEVER);
1480 
1481     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1482              ctx->mmu_idx == MMU_PHYS_IDX);
1483     tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop);
1484     if (modify) {
1485         save_gpr(ctx, rb, ofs);
1486     }
1487 }
1488 
1489 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1490                         unsigned rx, int scale, target_sreg disp,
1491                         unsigned sp, int modify, TCGMemOp mop)
1492 {
1493     TCGv_reg ofs;
1494     TCGv_tl addr;
1495 
1496     /* Caller uses nullify_over/nullify_end.  */
1497     assert(ctx->null_cond.c == TCG_COND_NEVER);
1498 
1499     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1500              ctx->mmu_idx == MMU_PHYS_IDX);
1501     tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop);
1502     if (modify) {
1503         save_gpr(ctx, rb, ofs);
1504     }
1505 }
1506 
1507 #if TARGET_REGISTER_BITS == 64
1508 #define do_load_reg   do_load_64
1509 #define do_store_reg  do_store_64
1510 #else
1511 #define do_load_reg   do_load_32
1512 #define do_store_reg  do_store_32
1513 #endif
1514 
1515 static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1516                     unsigned rx, int scale, target_sreg disp,
1517                     unsigned sp, int modify, TCGMemOp mop)
1518 {
1519     TCGv_reg dest;
1520 
1521     nullify_over(ctx);
1522 
1523     if (modify == 0) {
1524         /* No base register update.  */
1525         dest = dest_gpr(ctx, rt);
1526     } else {
1527         /* Make sure if RT == RB, we see the result of the load.  */
1528         dest = get_temp(ctx);
1529     }
1530     do_load_reg(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
1531     save_gpr(ctx, rt, dest);
1532 
1533     return nullify_end(ctx);
1534 }
1535 
1536 static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1537                       unsigned rx, int scale, target_sreg disp,
1538                       unsigned sp, int modify)
1539 {
1540     TCGv_i32 tmp;
1541 
1542     nullify_over(ctx);
1543 
1544     tmp = tcg_temp_new_i32();
1545     do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1546     save_frw_i32(rt, tmp);
1547     tcg_temp_free_i32(tmp);
1548 
1549     if (rt == 0) {
1550         gen_helper_loaded_fr0(cpu_env);
1551     }
1552 
1553     return nullify_end(ctx);
1554 }
1555 
1556 static bool trans_fldw(DisasContext *ctx, arg_ldst *a)
1557 {
1558     return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1559                      a->disp, a->sp, a->m);
1560 }
1561 
1562 static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1563                       unsigned rx, int scale, target_sreg disp,
1564                       unsigned sp, int modify)
1565 {
1566     TCGv_i64 tmp;
1567 
1568     nullify_over(ctx);
1569 
1570     tmp = tcg_temp_new_i64();
1571     do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEQ);
1572     save_frd(rt, tmp);
1573     tcg_temp_free_i64(tmp);
1574 
1575     if (rt == 0) {
1576         gen_helper_loaded_fr0(cpu_env);
1577     }
1578 
1579     return nullify_end(ctx);
1580 }
1581 
1582 static bool trans_fldd(DisasContext *ctx, arg_ldst *a)
1583 {
1584     return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1585                      a->disp, a->sp, a->m);
1586 }
1587 
1588 static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1589                      target_sreg disp, unsigned sp,
1590                      int modify, TCGMemOp mop)
1591 {
1592     nullify_over(ctx);
1593     do_store_reg(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
1594     return nullify_end(ctx);
1595 }
1596 
1597 static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1598                        unsigned rx, int scale, target_sreg disp,
1599                        unsigned sp, int modify)
1600 {
1601     TCGv_i32 tmp;
1602 
1603     nullify_over(ctx);
1604 
1605     tmp = load_frw_i32(rt);
1606     do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1607     tcg_temp_free_i32(tmp);
1608 
1609     return nullify_end(ctx);
1610 }
1611 
1612 static bool trans_fstw(DisasContext *ctx, arg_ldst *a)
1613 {
1614     return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1615                       a->disp, a->sp, a->m);
1616 }
1617 
1618 static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1619                        unsigned rx, int scale, target_sreg disp,
1620                        unsigned sp, int modify)
1621 {
1622     TCGv_i64 tmp;
1623 
1624     nullify_over(ctx);
1625 
1626     tmp = load_frd(rt);
1627     do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEQ);
1628     tcg_temp_free_i64(tmp);
1629 
1630     return nullify_end(ctx);
1631 }
1632 
1633 static bool trans_fstd(DisasContext *ctx, arg_ldst *a)
1634 {
1635     return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1636                       a->disp, a->sp, a->m);
1637 }
1638 
1639 static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
1640                        void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
1641 {
1642     TCGv_i32 tmp;
1643 
1644     nullify_over(ctx);
1645     tmp = load_frw0_i32(ra);
1646 
1647     func(tmp, cpu_env, tmp);
1648 
1649     save_frw_i32(rt, tmp);
1650     tcg_temp_free_i32(tmp);
1651     return nullify_end(ctx);
1652 }
1653 
1654 static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
1655                        void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
1656 {
1657     TCGv_i32 dst;
1658     TCGv_i64 src;
1659 
1660     nullify_over(ctx);
1661     src = load_frd(ra);
1662     dst = tcg_temp_new_i32();
1663 
1664     func(dst, cpu_env, src);
1665 
1666     tcg_temp_free_i64(src);
1667     save_frw_i32(rt, dst);
1668     tcg_temp_free_i32(dst);
1669     return nullify_end(ctx);
1670 }
1671 
1672 static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
1673                        void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
1674 {
1675     TCGv_i64 tmp;
1676 
1677     nullify_over(ctx);
1678     tmp = load_frd0(ra);
1679 
1680     func(tmp, cpu_env, tmp);
1681 
1682     save_frd(rt, tmp);
1683     tcg_temp_free_i64(tmp);
1684     return nullify_end(ctx);
1685 }
1686 
1687 static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
1688                        void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
1689 {
1690     TCGv_i32 src;
1691     TCGv_i64 dst;
1692 
1693     nullify_over(ctx);
1694     src = load_frw0_i32(ra);
1695     dst = tcg_temp_new_i64();
1696 
1697     func(dst, cpu_env, src);
1698 
1699     tcg_temp_free_i32(src);
1700     save_frd(rt, dst);
1701     tcg_temp_free_i64(dst);
1702     return nullify_end(ctx);
1703 }
1704 
1705 static bool do_fop_weww(DisasContext *ctx, unsigned rt,
1706                         unsigned ra, unsigned rb,
1707                         void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
1708 {
1709     TCGv_i32 a, b;
1710 
1711     nullify_over(ctx);
1712     a = load_frw0_i32(ra);
1713     b = load_frw0_i32(rb);
1714 
1715     func(a, cpu_env, a, b);
1716 
1717     tcg_temp_free_i32(b);
1718     save_frw_i32(rt, a);
1719     tcg_temp_free_i32(a);
1720     return nullify_end(ctx);
1721 }
1722 
1723 static bool do_fop_dedd(DisasContext *ctx, unsigned rt,
1724                         unsigned ra, unsigned rb,
1725                         void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
1726 {
1727     TCGv_i64 a, b;
1728 
1729     nullify_over(ctx);
1730     a = load_frd0(ra);
1731     b = load_frd0(rb);
1732 
1733     func(a, cpu_env, a, b);
1734 
1735     tcg_temp_free_i64(b);
1736     save_frd(rt, a);
1737     tcg_temp_free_i64(a);
1738     return nullify_end(ctx);
1739 }
1740 
1741 /* Emit an unconditional branch to a direct target, which may or may not
1742    have already had nullification handled.  */
1743 static bool do_dbranch(DisasContext *ctx, target_ureg dest,
1744                        unsigned link, bool is_n)
1745 {
1746     if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1747         if (link != 0) {
1748             copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1749         }
1750         ctx->iaoq_n = dest;
1751         if (is_n) {
1752             ctx->null_cond.c = TCG_COND_ALWAYS;
1753         }
1754     } else {
1755         nullify_over(ctx);
1756 
1757         if (link != 0) {
1758             copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1759         }
1760 
1761         if (is_n && use_nullify_skip(ctx)) {
1762             nullify_set(ctx, 0);
1763             gen_goto_tb(ctx, 0, dest, dest + 4);
1764         } else {
1765             nullify_set(ctx, is_n);
1766             gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1767         }
1768 
1769         nullify_end(ctx);
1770 
1771         nullify_set(ctx, 0);
1772         gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
1773         ctx->base.is_jmp = DISAS_NORETURN;
1774     }
1775     return true;
1776 }
1777 
1778 /* Emit a conditional branch to a direct target.  If the branch itself
1779    is nullified, we should have already used nullify_over.  */
1780 static bool do_cbranch(DisasContext *ctx, target_sreg disp, bool is_n,
1781                        DisasCond *cond)
1782 {
1783     target_ureg dest = iaoq_dest(ctx, disp);
1784     TCGLabel *taken = NULL;
1785     TCGCond c = cond->c;
1786     bool n;
1787 
1788     assert(ctx->null_cond.c == TCG_COND_NEVER);
1789 
1790     /* Handle TRUE and NEVER as direct branches.  */
1791     if (c == TCG_COND_ALWAYS) {
1792         return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
1793     }
1794     if (c == TCG_COND_NEVER) {
1795         return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
1796     }
1797 
1798     taken = gen_new_label();
1799     cond_prep(cond);
1800     tcg_gen_brcond_reg(c, cond->a0, cond->a1, taken);
1801     cond_free(cond);
1802 
1803     /* Not taken: Condition not satisfied; nullify on backward branches. */
1804     n = is_n && disp < 0;
1805     if (n && use_nullify_skip(ctx)) {
1806         nullify_set(ctx, 0);
1807         gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
1808     } else {
1809         if (!n && ctx->null_lab) {
1810             gen_set_label(ctx->null_lab);
1811             ctx->null_lab = NULL;
1812         }
1813         nullify_set(ctx, n);
1814         if (ctx->iaoq_n == -1) {
1815             /* The temporary iaoq_n_var died at the branch above.
1816                Regenerate it here instead of saving it.  */
1817             tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
1818         }
1819         gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
1820     }
1821 
1822     gen_set_label(taken);
1823 
1824     /* Taken: Condition satisfied; nullify on forward branches.  */
1825     n = is_n && disp >= 0;
1826     if (n && use_nullify_skip(ctx)) {
1827         nullify_set(ctx, 0);
1828         gen_goto_tb(ctx, 1, dest, dest + 4);
1829     } else {
1830         nullify_set(ctx, n);
1831         gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
1832     }
1833 
1834     /* Not taken: the branch itself was nullified.  */
1835     if (ctx->null_lab) {
1836         gen_set_label(ctx->null_lab);
1837         ctx->null_lab = NULL;
1838         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
1839     } else {
1840         ctx->base.is_jmp = DISAS_NORETURN;
1841     }
1842     return true;
1843 }
1844 
1845 /* Emit an unconditional branch to an indirect target.  This handles
1846    nullification of the branch itself.  */
1847 static bool do_ibranch(DisasContext *ctx, TCGv_reg dest,
1848                        unsigned link, bool is_n)
1849 {
1850     TCGv_reg a0, a1, next, tmp;
1851     TCGCond c;
1852 
1853     assert(ctx->null_lab == NULL);
1854 
1855     if (ctx->null_cond.c == TCG_COND_NEVER) {
1856         if (link != 0) {
1857             copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1858         }
1859         next = get_temp(ctx);
1860         tcg_gen_mov_reg(next, dest);
1861         if (is_n) {
1862             if (use_nullify_skip(ctx)) {
1863                 tcg_gen_mov_reg(cpu_iaoq_f, next);
1864                 tcg_gen_addi_reg(cpu_iaoq_b, next, 4);
1865                 nullify_set(ctx, 0);
1866                 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1867                 return true;
1868             }
1869             ctx->null_cond.c = TCG_COND_ALWAYS;
1870         }
1871         ctx->iaoq_n = -1;
1872         ctx->iaoq_n_var = next;
1873     } else if (is_n && use_nullify_skip(ctx)) {
1874         /* The (conditional) branch, B, nullifies the next insn, N,
1875            and we're allowed to skip execution N (no single-step or
1876            tracepoint in effect).  Since the goto_ptr that we must use
1877            for the indirect branch consumes no special resources, we
1878            can (conditionally) skip B and continue execution.  */
1879         /* The use_nullify_skip test implies we have a known control path.  */
1880         tcg_debug_assert(ctx->iaoq_b != -1);
1881         tcg_debug_assert(ctx->iaoq_n != -1);
1882 
1883         /* We do have to handle the non-local temporary, DEST, before
1884            branching.  Since IOAQ_F is not really live at this point, we
1885            can simply store DEST optimistically.  Similarly with IAOQ_B.  */
1886         tcg_gen_mov_reg(cpu_iaoq_f, dest);
1887         tcg_gen_addi_reg(cpu_iaoq_b, dest, 4);
1888 
1889         nullify_over(ctx);
1890         if (link != 0) {
1891             tcg_gen_movi_reg(cpu_gr[link], ctx->iaoq_n);
1892         }
1893         tcg_gen_lookup_and_goto_ptr();
1894         return nullify_end(ctx);
1895     } else {
1896         cond_prep(&ctx->null_cond);
1897         c = ctx->null_cond.c;
1898         a0 = ctx->null_cond.a0;
1899         a1 = ctx->null_cond.a1;
1900 
1901         tmp = tcg_temp_new();
1902         next = get_temp(ctx);
1903 
1904         copy_iaoq_entry(tmp, ctx->iaoq_n, ctx->iaoq_n_var);
1905         tcg_gen_movcond_reg(c, next, a0, a1, tmp, dest);
1906         ctx->iaoq_n = -1;
1907         ctx->iaoq_n_var = next;
1908 
1909         if (link != 0) {
1910             tcg_gen_movcond_reg(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
1911         }
1912 
1913         if (is_n) {
1914             /* The branch nullifies the next insn, which means the state of N
1915                after the branch is the inverse of the state of N that applied
1916                to the branch.  */
1917             tcg_gen_setcond_reg(tcg_invert_cond(c), cpu_psw_n, a0, a1);
1918             cond_free(&ctx->null_cond);
1919             ctx->null_cond = cond_make_n();
1920             ctx->psw_n_nonzero = true;
1921         } else {
1922             cond_free(&ctx->null_cond);
1923         }
1924     }
1925     return true;
1926 }
1927 
1928 /* Implement
1929  *    if (IAOQ_Front{30..31} < GR[b]{30..31})
1930  *      IAOQ_Next{30..31} ← GR[b]{30..31};
1931  *    else
1932  *      IAOQ_Next{30..31} ← IAOQ_Front{30..31};
1933  * which keeps the privilege level from being increased.
1934  */
1935 static TCGv_reg do_ibranch_priv(DisasContext *ctx, TCGv_reg offset)
1936 {
1937     TCGv_reg dest;
1938     switch (ctx->privilege) {
1939     case 0:
1940         /* Privilege 0 is maximum and is allowed to decrease.  */
1941         return offset;
1942     case 3:
1943         /* Privilege 3 is minimum and is never allowed increase.  */
1944         dest = get_temp(ctx);
1945         tcg_gen_ori_reg(dest, offset, 3);
1946         break;
1947     default:
1948         dest = tcg_temp_new();
1949         tcg_gen_andi_reg(dest, offset, -4);
1950         tcg_gen_ori_reg(dest, dest, ctx->privilege);
1951         tcg_gen_movcond_reg(TCG_COND_GTU, dest, dest, offset, dest, offset);
1952         tcg_temp_free(dest);
1953         break;
1954     }
1955     return dest;
1956 }
1957 
1958 #ifdef CONFIG_USER_ONLY
1959 /* On Linux, page zero is normally marked execute only + gateway.
1960    Therefore normal read or write is supposed to fail, but specific
1961    offsets have kernel code mapped to raise permissions to implement
1962    system calls.  Handling this via an explicit check here, rather
1963    in than the "be disp(sr2,r0)" instruction that probably sent us
1964    here, is the easiest way to handle the branch delay slot on the
1965    aforementioned BE.  */
1966 static void do_page_zero(DisasContext *ctx)
1967 {
1968     /* If by some means we get here with PSW[N]=1, that implies that
1969        the B,GATE instruction would be skipped, and we'd fault on the
1970        next insn within the privilaged page.  */
1971     switch (ctx->null_cond.c) {
1972     case TCG_COND_NEVER:
1973         break;
1974     case TCG_COND_ALWAYS:
1975         tcg_gen_movi_reg(cpu_psw_n, 0);
1976         goto do_sigill;
1977     default:
1978         /* Since this is always the first (and only) insn within the
1979            TB, we should know the state of PSW[N] from TB->FLAGS.  */
1980         g_assert_not_reached();
1981     }
1982 
1983     /* Check that we didn't arrive here via some means that allowed
1984        non-sequential instruction execution.  Normally the PSW[B] bit
1985        detects this by disallowing the B,GATE instruction to execute
1986        under such conditions.  */
1987     if (ctx->iaoq_b != ctx->iaoq_f + 4) {
1988         goto do_sigill;
1989     }
1990 
1991     switch (ctx->iaoq_f & -4) {
1992     case 0x00: /* Null pointer call */
1993         gen_excp_1(EXCP_IMP);
1994         ctx->base.is_jmp = DISAS_NORETURN;
1995         break;
1996 
1997     case 0xb0: /* LWS */
1998         gen_excp_1(EXCP_SYSCALL_LWS);
1999         ctx->base.is_jmp = DISAS_NORETURN;
2000         break;
2001 
2002     case 0xe0: /* SET_THREAD_POINTER */
2003         tcg_gen_st_reg(cpu_gr[26], cpu_env, offsetof(CPUHPPAState, cr[27]));
2004         tcg_gen_ori_reg(cpu_iaoq_f, cpu_gr[31], 3);
2005         tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
2006         ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
2007         break;
2008 
2009     case 0x100: /* SYSCALL */
2010         gen_excp_1(EXCP_SYSCALL);
2011         ctx->base.is_jmp = DISAS_NORETURN;
2012         break;
2013 
2014     default:
2015     do_sigill:
2016         gen_excp_1(EXCP_ILL);
2017         ctx->base.is_jmp = DISAS_NORETURN;
2018         break;
2019     }
2020 }
2021 #endif
2022 
2023 static bool trans_nop(DisasContext *ctx, arg_nop *a)
2024 {
2025     cond_free(&ctx->null_cond);
2026     return true;
2027 }
2028 
2029 static bool trans_break(DisasContext *ctx, arg_break *a)
2030 {
2031     return gen_excp_iir(ctx, EXCP_BREAK);
2032 }
2033 
2034 static bool trans_sync(DisasContext *ctx, arg_sync *a)
2035 {
2036     /* No point in nullifying the memory barrier.  */
2037     tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
2038 
2039     cond_free(&ctx->null_cond);
2040     return true;
2041 }
2042 
2043 static bool trans_mfia(DisasContext *ctx, arg_mfia *a)
2044 {
2045     unsigned rt = a->t;
2046     TCGv_reg tmp = dest_gpr(ctx, rt);
2047     tcg_gen_movi_reg(tmp, ctx->iaoq_f);
2048     save_gpr(ctx, rt, tmp);
2049 
2050     cond_free(&ctx->null_cond);
2051     return true;
2052 }
2053 
2054 static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a)
2055 {
2056     unsigned rt = a->t;
2057     unsigned rs = a->sp;
2058     TCGv_i64 t0 = tcg_temp_new_i64();
2059     TCGv_reg t1 = tcg_temp_new();
2060 
2061     load_spr(ctx, t0, rs);
2062     tcg_gen_shri_i64(t0, t0, 32);
2063     tcg_gen_trunc_i64_reg(t1, t0);
2064 
2065     save_gpr(ctx, rt, t1);
2066     tcg_temp_free(t1);
2067     tcg_temp_free_i64(t0);
2068 
2069     cond_free(&ctx->null_cond);
2070     return true;
2071 }
2072 
2073 static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
2074 {
2075     unsigned rt = a->t;
2076     unsigned ctl = a->r;
2077     TCGv_reg tmp;
2078 
2079     switch (ctl) {
2080     case CR_SAR:
2081 #ifdef TARGET_HPPA64
2082         if (a->e == 0) {
2083             /* MFSAR without ,W masks low 5 bits.  */
2084             tmp = dest_gpr(ctx, rt);
2085             tcg_gen_andi_reg(tmp, cpu_sar, 31);
2086             save_gpr(ctx, rt, tmp);
2087             goto done;
2088         }
2089 #endif
2090         save_gpr(ctx, rt, cpu_sar);
2091         goto done;
2092     case CR_IT: /* Interval Timer */
2093         /* FIXME: Respect PSW_S bit.  */
2094         nullify_over(ctx);
2095         tmp = dest_gpr(ctx, rt);
2096         if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
2097             gen_io_start();
2098             gen_helper_read_interval_timer(tmp);
2099             gen_io_end();
2100             ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2101         } else {
2102             gen_helper_read_interval_timer(tmp);
2103         }
2104         save_gpr(ctx, rt, tmp);
2105         return nullify_end(ctx);
2106     case 26:
2107     case 27:
2108         break;
2109     default:
2110         /* All other control registers are privileged.  */
2111         CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2112         break;
2113     }
2114 
2115     tmp = get_temp(ctx);
2116     tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2117     save_gpr(ctx, rt, tmp);
2118 
2119  done:
2120     cond_free(&ctx->null_cond);
2121     return true;
2122 }
2123 
2124 static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a)
2125 {
2126     unsigned rr = a->r;
2127     unsigned rs = a->sp;
2128     TCGv_i64 t64;
2129 
2130     if (rs >= 5) {
2131         CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2132     }
2133     nullify_over(ctx);
2134 
2135     t64 = tcg_temp_new_i64();
2136     tcg_gen_extu_reg_i64(t64, load_gpr(ctx, rr));
2137     tcg_gen_shli_i64(t64, t64, 32);
2138 
2139     if (rs >= 4) {
2140         tcg_gen_st_i64(t64, cpu_env, offsetof(CPUHPPAState, sr[rs]));
2141         ctx->tb_flags &= ~TB_FLAG_SR_SAME;
2142     } else {
2143         tcg_gen_mov_i64(cpu_sr[rs], t64);
2144     }
2145     tcg_temp_free_i64(t64);
2146 
2147     return nullify_end(ctx);
2148 }
2149 
2150 static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
2151 {
2152     unsigned ctl = a->t;
2153     TCGv_reg reg = load_gpr(ctx, a->r);
2154     TCGv_reg tmp;
2155 
2156     if (ctl == CR_SAR) {
2157         tmp = tcg_temp_new();
2158         tcg_gen_andi_reg(tmp, reg, TARGET_REGISTER_BITS - 1);
2159         save_or_nullify(ctx, cpu_sar, tmp);
2160         tcg_temp_free(tmp);
2161 
2162         cond_free(&ctx->null_cond);
2163         return true;
2164     }
2165 
2166     /* All other control registers are privileged or read-only.  */
2167     CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2168 
2169 #ifndef CONFIG_USER_ONLY
2170     nullify_over(ctx);
2171     switch (ctl) {
2172     case CR_IT:
2173         gen_helper_write_interval_timer(cpu_env, reg);
2174         break;
2175     case CR_EIRR:
2176         gen_helper_write_eirr(cpu_env, reg);
2177         break;
2178     case CR_EIEM:
2179         gen_helper_write_eiem(cpu_env, reg);
2180         ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2181         break;
2182 
2183     case CR_IIASQ:
2184     case CR_IIAOQ:
2185         /* FIXME: Respect PSW_Q bit */
2186         /* The write advances the queue and stores to the back element.  */
2187         tmp = get_temp(ctx);
2188         tcg_gen_ld_reg(tmp, cpu_env,
2189                        offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2190         tcg_gen_st_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2191         tcg_gen_st_reg(reg, cpu_env,
2192                        offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2193         break;
2194 
2195     default:
2196         tcg_gen_st_reg(reg, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2197         break;
2198     }
2199     return nullify_end(ctx);
2200 #endif
2201 }
2202 
2203 static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a)
2204 {
2205     TCGv_reg tmp = tcg_temp_new();
2206 
2207     tcg_gen_not_reg(tmp, load_gpr(ctx, a->r));
2208     tcg_gen_andi_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
2209     save_or_nullify(ctx, cpu_sar, tmp);
2210     tcg_temp_free(tmp);
2211 
2212     cond_free(&ctx->null_cond);
2213     return true;
2214 }
2215 
2216 static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a)
2217 {
2218     TCGv_reg dest = dest_gpr(ctx, a->t);
2219 
2220 #ifdef CONFIG_USER_ONLY
2221     /* We don't implement space registers in user mode. */
2222     tcg_gen_movi_reg(dest, 0);
2223 #else
2224     TCGv_i64 t0 = tcg_temp_new_i64();
2225 
2226     tcg_gen_mov_i64(t0, space_select(ctx, a->sp, load_gpr(ctx, a->b)));
2227     tcg_gen_shri_i64(t0, t0, 32);
2228     tcg_gen_trunc_i64_reg(dest, t0);
2229 
2230     tcg_temp_free_i64(t0);
2231 #endif
2232     save_gpr(ctx, a->t, dest);
2233 
2234     cond_free(&ctx->null_cond);
2235     return true;
2236 }
2237 
2238 static bool trans_rsm(DisasContext *ctx, arg_rsm *a)
2239 {
2240     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2241 #ifndef CONFIG_USER_ONLY
2242     TCGv_reg tmp;
2243 
2244     nullify_over(ctx);
2245 
2246     tmp = get_temp(ctx);
2247     tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw));
2248     tcg_gen_andi_reg(tmp, tmp, ~a->i);
2249     gen_helper_swap_system_mask(tmp, cpu_env, tmp);
2250     save_gpr(ctx, a->t, tmp);
2251 
2252     /* Exit the TB to recognize new interrupts, e.g. PSW_M.  */
2253     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2254     return nullify_end(ctx);
2255 #endif
2256 }
2257 
2258 static bool trans_ssm(DisasContext *ctx, arg_ssm *a)
2259 {
2260     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2261 #ifndef CONFIG_USER_ONLY
2262     TCGv_reg tmp;
2263 
2264     nullify_over(ctx);
2265 
2266     tmp = get_temp(ctx);
2267     tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw));
2268     tcg_gen_ori_reg(tmp, tmp, a->i);
2269     gen_helper_swap_system_mask(tmp, cpu_env, tmp);
2270     save_gpr(ctx, a->t, tmp);
2271 
2272     /* Exit the TB to recognize new interrupts, e.g. PSW_I.  */
2273     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2274     return nullify_end(ctx);
2275 #endif
2276 }
2277 
2278 static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a)
2279 {
2280     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2281 #ifndef CONFIG_USER_ONLY
2282     TCGv_reg tmp, reg;
2283     nullify_over(ctx);
2284 
2285     reg = load_gpr(ctx, a->r);
2286     tmp = get_temp(ctx);
2287     gen_helper_swap_system_mask(tmp, cpu_env, reg);
2288 
2289     /* Exit the TB to recognize new interrupts.  */
2290     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2291     return nullify_end(ctx);
2292 #endif
2293 }
2294 
2295 static bool do_rfi(DisasContext *ctx, bool rfi_r)
2296 {
2297     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2298 #ifndef CONFIG_USER_ONLY
2299     nullify_over(ctx);
2300 
2301     if (rfi_r) {
2302         gen_helper_rfi_r(cpu_env);
2303     } else {
2304         gen_helper_rfi(cpu_env);
2305     }
2306     /* Exit the TB to recognize new interrupts.  */
2307     if (ctx->base.singlestep_enabled) {
2308         gen_excp_1(EXCP_DEBUG);
2309     } else {
2310         tcg_gen_exit_tb(NULL, 0);
2311     }
2312     ctx->base.is_jmp = DISAS_NORETURN;
2313 
2314     return nullify_end(ctx);
2315 #endif
2316 }
2317 
2318 static bool trans_rfi(DisasContext *ctx, arg_rfi *a)
2319 {
2320     return do_rfi(ctx, false);
2321 }
2322 
2323 static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a)
2324 {
2325     return do_rfi(ctx, true);
2326 }
2327 
2328 static bool trans_halt(DisasContext *ctx, arg_halt *a)
2329 {
2330     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2331 #ifndef CONFIG_USER_ONLY
2332     nullify_over(ctx);
2333     gen_helper_halt(cpu_env);
2334     ctx->base.is_jmp = DISAS_NORETURN;
2335     return nullify_end(ctx);
2336 #endif
2337 }
2338 
2339 static bool trans_reset(DisasContext *ctx, arg_reset *a)
2340 {
2341     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2342 #ifndef CONFIG_USER_ONLY
2343     nullify_over(ctx);
2344     gen_helper_reset(cpu_env);
2345     ctx->base.is_jmp = DISAS_NORETURN;
2346     return nullify_end(ctx);
2347 #endif
2348 }
2349 
2350 static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a)
2351 {
2352     if (a->m) {
2353         TCGv_reg dest = dest_gpr(ctx, a->b);
2354         TCGv_reg src1 = load_gpr(ctx, a->b);
2355         TCGv_reg src2 = load_gpr(ctx, a->x);
2356 
2357         /* The only thing we need to do is the base register modification.  */
2358         tcg_gen_add_reg(dest, src1, src2);
2359         save_gpr(ctx, a->b, dest);
2360     }
2361     cond_free(&ctx->null_cond);
2362     return true;
2363 }
2364 
2365 static bool trans_probe(DisasContext *ctx, arg_probe *a)
2366 {
2367     TCGv_reg dest, ofs;
2368     TCGv_i32 level, want;
2369     TCGv_tl addr;
2370 
2371     nullify_over(ctx);
2372 
2373     dest = dest_gpr(ctx, a->t);
2374     form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2375 
2376     if (a->imm) {
2377         level = tcg_const_i32(a->ri);
2378     } else {
2379         level = tcg_temp_new_i32();
2380         tcg_gen_trunc_reg_i32(level, load_gpr(ctx, a->ri));
2381         tcg_gen_andi_i32(level, level, 3);
2382     }
2383     want = tcg_const_i32(a->write ? PAGE_WRITE : PAGE_READ);
2384 
2385     gen_helper_probe(dest, cpu_env, addr, level, want);
2386 
2387     tcg_temp_free_i32(want);
2388     tcg_temp_free_i32(level);
2389 
2390     save_gpr(ctx, a->t, dest);
2391     return nullify_end(ctx);
2392 }
2393 
2394 static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a)
2395 {
2396     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2397 #ifndef CONFIG_USER_ONLY
2398     TCGv_tl addr;
2399     TCGv_reg ofs, reg;
2400 
2401     nullify_over(ctx);
2402 
2403     form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2404     reg = load_gpr(ctx, a->r);
2405     if (a->addr) {
2406         gen_helper_itlba(cpu_env, addr, reg);
2407     } else {
2408         gen_helper_itlbp(cpu_env, addr, reg);
2409     }
2410 
2411     /* Exit TB for ITLB change if mmu is enabled.  This *should* not be
2412        the case, since the OS TLB fill handler runs with mmu disabled.  */
2413     if (!a->data && (ctx->tb_flags & PSW_C)) {
2414         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2415     }
2416     return nullify_end(ctx);
2417 #endif
2418 }
2419 
2420 static bool trans_pxtlbx(DisasContext *ctx, arg_pxtlbx *a)
2421 {
2422     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2423 #ifndef CONFIG_USER_ONLY
2424     TCGv_tl addr;
2425     TCGv_reg ofs;
2426 
2427     nullify_over(ctx);
2428 
2429     form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2430     if (a->m) {
2431         save_gpr(ctx, a->b, ofs);
2432     }
2433     if (a->local) {
2434         gen_helper_ptlbe(cpu_env);
2435     } else {
2436         gen_helper_ptlb(cpu_env, addr);
2437     }
2438 
2439     /* Exit TB for TLB change if mmu is enabled.  */
2440     if (!a->data && (ctx->tb_flags & PSW_C)) {
2441         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2442     }
2443     return nullify_end(ctx);
2444 #endif
2445 }
2446 
2447 static bool trans_lpa(DisasContext *ctx, arg_ldst *a)
2448 {
2449     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2450 #ifndef CONFIG_USER_ONLY
2451     TCGv_tl vaddr;
2452     TCGv_reg ofs, paddr;
2453 
2454     nullify_over(ctx);
2455 
2456     form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2457 
2458     paddr = tcg_temp_new();
2459     gen_helper_lpa(paddr, cpu_env, vaddr);
2460 
2461     /* Note that physical address result overrides base modification.  */
2462     if (a->m) {
2463         save_gpr(ctx, a->b, ofs);
2464     }
2465     save_gpr(ctx, a->t, paddr);
2466     tcg_temp_free(paddr);
2467 
2468     return nullify_end(ctx);
2469 #endif
2470 }
2471 
2472 static bool trans_lci(DisasContext *ctx, arg_lci *a)
2473 {
2474     TCGv_reg ci;
2475 
2476     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2477 
2478     /* The Coherence Index is an implementation-defined function of the
2479        physical address.  Two addresses with the same CI have a coherent
2480        view of the cache.  Our implementation is to return 0 for all,
2481        since the entire address space is coherent.  */
2482     ci = tcg_const_reg(0);
2483     save_gpr(ctx, a->t, ci);
2484     tcg_temp_free(ci);
2485 
2486     cond_free(&ctx->null_cond);
2487     return true;
2488 }
2489 
2490 static bool trans_add(DisasContext *ctx, arg_rrr_cf_sh *a)
2491 {
2492     return do_add_reg(ctx, a, false, false, false, false);
2493 }
2494 
2495 static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_sh *a)
2496 {
2497     return do_add_reg(ctx, a, true, false, false, false);
2498 }
2499 
2500 static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_sh *a)
2501 {
2502     return do_add_reg(ctx, a, false, true, false, false);
2503 }
2504 
2505 static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_sh *a)
2506 {
2507     return do_add_reg(ctx, a, false, false, false, true);
2508 }
2509 
2510 static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_sh *a)
2511 {
2512     return do_add_reg(ctx, a, false, true, false, true);
2513 }
2514 
2515 static bool trans_sub(DisasContext *ctx, arg_rrr_cf *a)
2516 {
2517     return do_sub_reg(ctx, a, false, false, false);
2518 }
2519 
2520 static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf *a)
2521 {
2522     return do_sub_reg(ctx, a, true, false, false);
2523 }
2524 
2525 static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf *a)
2526 {
2527     return do_sub_reg(ctx, a, false, false, true);
2528 }
2529 
2530 static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf *a)
2531 {
2532     return do_sub_reg(ctx, a, true, false, true);
2533 }
2534 
2535 static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf *a)
2536 {
2537     return do_sub_reg(ctx, a, false, true, false);
2538 }
2539 
2540 static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf *a)
2541 {
2542     return do_sub_reg(ctx, a, true, true, false);
2543 }
2544 
2545 static bool trans_andcm(DisasContext *ctx, arg_rrr_cf *a)
2546 {
2547     return do_log_reg(ctx, a, tcg_gen_andc_reg);
2548 }
2549 
2550 static bool trans_and(DisasContext *ctx, arg_rrr_cf *a)
2551 {
2552     return do_log_reg(ctx, a, tcg_gen_and_reg);
2553 }
2554 
2555 static bool trans_or(DisasContext *ctx, arg_rrr_cf *a)
2556 {
2557     if (a->cf == 0) {
2558         unsigned r2 = a->r2;
2559         unsigned r1 = a->r1;
2560         unsigned rt = a->t;
2561 
2562         if (rt == 0) { /* NOP */
2563             cond_free(&ctx->null_cond);
2564             return true;
2565         }
2566         if (r2 == 0) { /* COPY */
2567             if (r1 == 0) {
2568                 TCGv_reg dest = dest_gpr(ctx, rt);
2569                 tcg_gen_movi_reg(dest, 0);
2570                 save_gpr(ctx, rt, dest);
2571             } else {
2572                 save_gpr(ctx, rt, cpu_gr[r1]);
2573             }
2574             cond_free(&ctx->null_cond);
2575             return true;
2576         }
2577 #ifndef CONFIG_USER_ONLY
2578         /* These are QEMU extensions and are nops in the real architecture:
2579          *
2580          * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2581          * or %r31,%r31,%r31 -- death loop; offline cpu
2582          *                      currently implemented as idle.
2583          */
2584         if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */
2585             TCGv_i32 tmp;
2586 
2587             /* No need to check for supervisor, as userland can only pause
2588                until the next timer interrupt.  */
2589             nullify_over(ctx);
2590 
2591             /* Advance the instruction queue.  */
2592             copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
2593             copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
2594             nullify_set(ctx, 0);
2595 
2596             /* Tell the qemu main loop to halt until this cpu has work.  */
2597             tmp = tcg_const_i32(1);
2598             tcg_gen_st_i32(tmp, cpu_env, -offsetof(HPPACPU, env) +
2599                                          offsetof(CPUState, halted));
2600             tcg_temp_free_i32(tmp);
2601             gen_excp_1(EXCP_HALTED);
2602             ctx->base.is_jmp = DISAS_NORETURN;
2603 
2604             return nullify_end(ctx);
2605         }
2606 #endif
2607     }
2608     return do_log_reg(ctx, a, tcg_gen_or_reg);
2609 }
2610 
2611 static bool trans_xor(DisasContext *ctx, arg_rrr_cf *a)
2612 {
2613     return do_log_reg(ctx, a, tcg_gen_xor_reg);
2614 }
2615 
2616 static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf *a)
2617 {
2618     TCGv_reg tcg_r1, tcg_r2;
2619 
2620     if (a->cf) {
2621         nullify_over(ctx);
2622     }
2623     tcg_r1 = load_gpr(ctx, a->r1);
2624     tcg_r2 = load_gpr(ctx, a->r2);
2625     do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf);
2626     return nullify_end(ctx);
2627 }
2628 
2629 static bool trans_uxor(DisasContext *ctx, arg_rrr_cf *a)
2630 {
2631     TCGv_reg tcg_r1, tcg_r2;
2632 
2633     if (a->cf) {
2634         nullify_over(ctx);
2635     }
2636     tcg_r1 = load_gpr(ctx, a->r1);
2637     tcg_r2 = load_gpr(ctx, a->r2);
2638     do_unit(ctx, a->t, tcg_r1, tcg_r2, a->cf, false, tcg_gen_xor_reg);
2639     return nullify_end(ctx);
2640 }
2641 
2642 static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf *a, bool is_tc)
2643 {
2644     TCGv_reg tcg_r1, tcg_r2, tmp;
2645 
2646     if (a->cf) {
2647         nullify_over(ctx);
2648     }
2649     tcg_r1 = load_gpr(ctx, a->r1);
2650     tcg_r2 = load_gpr(ctx, a->r2);
2651     tmp = get_temp(ctx);
2652     tcg_gen_not_reg(tmp, tcg_r2);
2653     do_unit(ctx, a->t, tcg_r1, tmp, a->cf, is_tc, tcg_gen_add_reg);
2654     return nullify_end(ctx);
2655 }
2656 
2657 static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf *a)
2658 {
2659     return do_uaddcm(ctx, a, false);
2660 }
2661 
2662 static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf *a)
2663 {
2664     return do_uaddcm(ctx, a, true);
2665 }
2666 
2667 static bool do_dcor(DisasContext *ctx, arg_rr_cf *a, bool is_i)
2668 {
2669     TCGv_reg tmp;
2670 
2671     nullify_over(ctx);
2672 
2673     tmp = get_temp(ctx);
2674     tcg_gen_shri_reg(tmp, cpu_psw_cb, 3);
2675     if (!is_i) {
2676         tcg_gen_not_reg(tmp, tmp);
2677     }
2678     tcg_gen_andi_reg(tmp, tmp, 0x11111111);
2679     tcg_gen_muli_reg(tmp, tmp, 6);
2680     do_unit(ctx, a->t, tmp, load_gpr(ctx, a->r), a->cf, false,
2681             is_i ? tcg_gen_add_reg : tcg_gen_sub_reg);
2682     return nullify_end(ctx);
2683 }
2684 
2685 static bool trans_dcor(DisasContext *ctx, arg_rr_cf *a)
2686 {
2687     return do_dcor(ctx, a, false);
2688 }
2689 
2690 static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf *a)
2691 {
2692     return do_dcor(ctx, a, true);
2693 }
2694 
2695 static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a)
2696 {
2697     TCGv_reg dest, add1, add2, addc, zero, in1, in2;
2698 
2699     nullify_over(ctx);
2700 
2701     in1 = load_gpr(ctx, a->r1);
2702     in2 = load_gpr(ctx, a->r2);
2703 
2704     add1 = tcg_temp_new();
2705     add2 = tcg_temp_new();
2706     addc = tcg_temp_new();
2707     dest = tcg_temp_new();
2708     zero = tcg_const_reg(0);
2709 
2710     /* Form R1 << 1 | PSW[CB]{8}.  */
2711     tcg_gen_add_reg(add1, in1, in1);
2712     tcg_gen_add_reg(add1, add1, cpu_psw_cb_msb);
2713 
2714     /* Add or subtract R2, depending on PSW[V].  Proper computation of
2715        carry{8} requires that we subtract via + ~R2 + 1, as described in
2716        the manual.  By extracting and masking V, we can produce the
2717        proper inputs to the addition without movcond.  */
2718     tcg_gen_sari_reg(addc, cpu_psw_v, TARGET_REGISTER_BITS - 1);
2719     tcg_gen_xor_reg(add2, in2, addc);
2720     tcg_gen_andi_reg(addc, addc, 1);
2721     /* ??? This is only correct for 32-bit.  */
2722     tcg_gen_add2_i32(dest, cpu_psw_cb_msb, add1, zero, add2, zero);
2723     tcg_gen_add2_i32(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero);
2724 
2725     tcg_temp_free(addc);
2726     tcg_temp_free(zero);
2727 
2728     /* Write back the result register.  */
2729     save_gpr(ctx, a->t, dest);
2730 
2731     /* Write back PSW[CB].  */
2732     tcg_gen_xor_reg(cpu_psw_cb, add1, add2);
2733     tcg_gen_xor_reg(cpu_psw_cb, cpu_psw_cb, dest);
2734 
2735     /* Write back PSW[V] for the division step.  */
2736     tcg_gen_neg_reg(cpu_psw_v, cpu_psw_cb_msb);
2737     tcg_gen_xor_reg(cpu_psw_v, cpu_psw_v, in2);
2738 
2739     /* Install the new nullification.  */
2740     if (a->cf) {
2741         TCGv_reg sv = NULL;
2742         if (a->cf >> 1 == 6) {
2743             /* ??? The lshift is supposed to contribute to overflow.  */
2744             sv = do_add_sv(ctx, dest, add1, add2);
2745         }
2746         ctx->null_cond = do_cond(a->cf, dest, cpu_psw_cb_msb, sv);
2747     }
2748 
2749     tcg_temp_free(add1);
2750     tcg_temp_free(add2);
2751     tcg_temp_free(dest);
2752 
2753     return nullify_end(ctx);
2754 }
2755 
2756 static bool trans_addi(DisasContext *ctx, arg_rri_cf *a)
2757 {
2758     return do_add_imm(ctx, a, false, false);
2759 }
2760 
2761 static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a)
2762 {
2763     return do_add_imm(ctx, a, true, false);
2764 }
2765 
2766 static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a)
2767 {
2768     return do_add_imm(ctx, a, false, true);
2769 }
2770 
2771 static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a)
2772 {
2773     return do_add_imm(ctx, a, true, true);
2774 }
2775 
2776 static bool trans_subi(DisasContext *ctx, arg_rri_cf *a)
2777 {
2778     return do_sub_imm(ctx, a, false);
2779 }
2780 
2781 static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a)
2782 {
2783     return do_sub_imm(ctx, a, true);
2784 }
2785 
2786 static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf *a)
2787 {
2788     TCGv_reg tcg_im, tcg_r2;
2789 
2790     if (a->cf) {
2791         nullify_over(ctx);
2792     }
2793 
2794     tcg_im = load_const(ctx, a->i);
2795     tcg_r2 = load_gpr(ctx, a->r);
2796     do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf);
2797 
2798     return nullify_end(ctx);
2799 }
2800 
2801 static bool trans_ld(DisasContext *ctx, arg_ldst *a)
2802 {
2803     return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0,
2804                    a->disp, a->sp, a->m, a->size | MO_TE);
2805 }
2806 
2807 static bool trans_st(DisasContext *ctx, arg_ldst *a)
2808 {
2809     assert(a->x == 0 && a->scale == 0);
2810     return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE);
2811 }
2812 
2813 static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
2814 {
2815     TCGMemOp mop = MO_TEUL | MO_ALIGN_16 | a->size;
2816     TCGv_reg zero, dest, ofs;
2817     TCGv_tl addr;
2818 
2819     nullify_over(ctx);
2820 
2821     if (a->m) {
2822         /* Base register modification.  Make sure if RT == RB,
2823            we see the result of the load.  */
2824         dest = get_temp(ctx);
2825     } else {
2826         dest = dest_gpr(ctx, a->t);
2827     }
2828 
2829     form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? a->size : 0,
2830              a->disp, a->sp, a->m, ctx->mmu_idx == MMU_PHYS_IDX);
2831     zero = tcg_const_reg(0);
2832     tcg_gen_atomic_xchg_reg(dest, addr, zero, ctx->mmu_idx, mop);
2833     if (a->m) {
2834         save_gpr(ctx, a->b, ofs);
2835     }
2836     save_gpr(ctx, a->t, dest);
2837 
2838     return nullify_end(ctx);
2839 }
2840 
2841 static bool trans_stby(DisasContext *ctx, arg_stby *a)
2842 {
2843     TCGv_reg ofs, val;
2844     TCGv_tl addr;
2845 
2846     nullify_over(ctx);
2847 
2848     form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
2849              ctx->mmu_idx == MMU_PHYS_IDX);
2850     val = load_gpr(ctx, a->r);
2851     if (a->a) {
2852         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2853             gen_helper_stby_e_parallel(cpu_env, addr, val);
2854         } else {
2855             gen_helper_stby_e(cpu_env, addr, val);
2856         }
2857     } else {
2858         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2859             gen_helper_stby_b_parallel(cpu_env, addr, val);
2860         } else {
2861             gen_helper_stby_b(cpu_env, addr, val);
2862         }
2863     }
2864     if (a->m) {
2865         tcg_gen_andi_reg(ofs, ofs, ~3);
2866         save_gpr(ctx, a->b, ofs);
2867     }
2868 
2869     return nullify_end(ctx);
2870 }
2871 
2872 static bool trans_lda(DisasContext *ctx, arg_ldst *a)
2873 {
2874     int hold_mmu_idx = ctx->mmu_idx;
2875 
2876     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2877     ctx->mmu_idx = MMU_PHYS_IDX;
2878     trans_ld(ctx, a);
2879     ctx->mmu_idx = hold_mmu_idx;
2880     return true;
2881 }
2882 
2883 static bool trans_sta(DisasContext *ctx, arg_ldst *a)
2884 {
2885     int hold_mmu_idx = ctx->mmu_idx;
2886 
2887     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2888     ctx->mmu_idx = MMU_PHYS_IDX;
2889     trans_st(ctx, a);
2890     ctx->mmu_idx = hold_mmu_idx;
2891     return true;
2892 }
2893 
2894 static bool trans_ldil(DisasContext *ctx, arg_ldil *a)
2895 {
2896     TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
2897 
2898     tcg_gen_movi_reg(tcg_rt, a->i);
2899     save_gpr(ctx, a->t, tcg_rt);
2900     cond_free(&ctx->null_cond);
2901     return true;
2902 }
2903 
2904 static bool trans_addil(DisasContext *ctx, arg_addil *a)
2905 {
2906     TCGv_reg tcg_rt = load_gpr(ctx, a->r);
2907     TCGv_reg tcg_r1 = dest_gpr(ctx, 1);
2908 
2909     tcg_gen_addi_reg(tcg_r1, tcg_rt, a->i);
2910     save_gpr(ctx, 1, tcg_r1);
2911     cond_free(&ctx->null_cond);
2912     return true;
2913 }
2914 
2915 static bool trans_ldo(DisasContext *ctx, arg_ldo *a)
2916 {
2917     TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
2918 
2919     /* Special case rb == 0, for the LDI pseudo-op.
2920        The COPY pseudo-op is handled for free within tcg_gen_addi_tl.  */
2921     if (a->b == 0) {
2922         tcg_gen_movi_reg(tcg_rt, a->i);
2923     } else {
2924         tcg_gen_addi_reg(tcg_rt, cpu_gr[a->b], a->i);
2925     }
2926     save_gpr(ctx, a->t, tcg_rt);
2927     cond_free(&ctx->null_cond);
2928     return true;
2929 }
2930 
2931 static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_reg in1,
2932                     unsigned c, unsigned f, unsigned n, int disp)
2933 {
2934     TCGv_reg dest, in2, sv;
2935     DisasCond cond;
2936 
2937     in2 = load_gpr(ctx, r);
2938     dest = get_temp(ctx);
2939 
2940     tcg_gen_sub_reg(dest, in1, in2);
2941 
2942     sv = NULL;
2943     if (c == 6) {
2944         sv = do_sub_sv(ctx, dest, in1, in2);
2945     }
2946 
2947     cond = do_sub_cond(c * 2 + f, dest, in1, in2, sv);
2948     return do_cbranch(ctx, disp, n, &cond);
2949 }
2950 
2951 static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a)
2952 {
2953     nullify_over(ctx);
2954     return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
2955 }
2956 
2957 static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a)
2958 {
2959     nullify_over(ctx);
2960     return do_cmpb(ctx, a->r, load_const(ctx, a->i), a->c, a->f, a->n, a->disp);
2961 }
2962 
2963 static bool do_addb(DisasContext *ctx, unsigned r, TCGv_reg in1,
2964                     unsigned c, unsigned f, unsigned n, int disp)
2965 {
2966     TCGv_reg dest, in2, sv, cb_msb;
2967     DisasCond cond;
2968 
2969     in2 = load_gpr(ctx, r);
2970     dest = dest_gpr(ctx, r);
2971     sv = NULL;
2972     cb_msb = NULL;
2973 
2974     switch (c) {
2975     default:
2976         tcg_gen_add_reg(dest, in1, in2);
2977         break;
2978     case 4: case 5:
2979         cb_msb = get_temp(ctx);
2980         tcg_gen_movi_reg(cb_msb, 0);
2981         tcg_gen_add2_reg(dest, cb_msb, in1, cb_msb, in2, cb_msb);
2982         break;
2983     case 6:
2984         tcg_gen_add_reg(dest, in1, in2);
2985         sv = do_add_sv(ctx, dest, in1, in2);
2986         break;
2987     }
2988 
2989     cond = do_cond(c * 2 + f, dest, cb_msb, sv);
2990     return do_cbranch(ctx, disp, n, &cond);
2991 }
2992 
2993 static bool trans_addb(DisasContext *ctx, arg_addb *a)
2994 {
2995     nullify_over(ctx);
2996     return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
2997 }
2998 
2999 static bool trans_addbi(DisasContext *ctx, arg_addbi *a)
3000 {
3001     nullify_over(ctx);
3002     return do_addb(ctx, a->r, load_const(ctx, a->i), a->c, a->f, a->n, a->disp);
3003 }
3004 
3005 static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a)
3006 {
3007     TCGv_reg tmp, tcg_r;
3008     DisasCond cond;
3009 
3010     nullify_over(ctx);
3011 
3012     tmp = tcg_temp_new();
3013     tcg_r = load_gpr(ctx, a->r);
3014     tcg_gen_shl_reg(tmp, tcg_r, cpu_sar);
3015 
3016     cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3017     tcg_temp_free(tmp);
3018     return do_cbranch(ctx, a->disp, a->n, &cond);
3019 }
3020 
3021 static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a)
3022 {
3023     TCGv_reg tmp, tcg_r;
3024     DisasCond cond;
3025 
3026     nullify_over(ctx);
3027 
3028     tmp = tcg_temp_new();
3029     tcg_r = load_gpr(ctx, a->r);
3030     tcg_gen_shli_reg(tmp, tcg_r, a->p);
3031 
3032     cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3033     tcg_temp_free(tmp);
3034     return do_cbranch(ctx, a->disp, a->n, &cond);
3035 }
3036 
3037 static bool trans_movb(DisasContext *ctx, arg_movb *a)
3038 {
3039     TCGv_reg dest;
3040     DisasCond cond;
3041 
3042     nullify_over(ctx);
3043 
3044     dest = dest_gpr(ctx, a->r2);
3045     if (a->r1 == 0) {
3046         tcg_gen_movi_reg(dest, 0);
3047     } else {
3048         tcg_gen_mov_reg(dest, cpu_gr[a->r1]);
3049     }
3050 
3051     cond = do_sed_cond(a->c, dest);
3052     return do_cbranch(ctx, a->disp, a->n, &cond);
3053 }
3054 
3055 static bool trans_movbi(DisasContext *ctx, arg_movbi *a)
3056 {
3057     TCGv_reg dest;
3058     DisasCond cond;
3059 
3060     nullify_over(ctx);
3061 
3062     dest = dest_gpr(ctx, a->r);
3063     tcg_gen_movi_reg(dest, a->i);
3064 
3065     cond = do_sed_cond(a->c, dest);
3066     return do_cbranch(ctx, a->disp, a->n, &cond);
3067 }
3068 
3069 static bool trans_shrpw_sar(DisasContext *ctx, arg_shrpw_sar *a)
3070 {
3071     TCGv_reg dest;
3072 
3073     if (a->c) {
3074         nullify_over(ctx);
3075     }
3076 
3077     dest = dest_gpr(ctx, a->t);
3078     if (a->r1 == 0) {
3079         tcg_gen_ext32u_reg(dest, load_gpr(ctx, a->r2));
3080         tcg_gen_shr_reg(dest, dest, cpu_sar);
3081     } else if (a->r1 == a->r2) {
3082         TCGv_i32 t32 = tcg_temp_new_i32();
3083         tcg_gen_trunc_reg_i32(t32, load_gpr(ctx, a->r2));
3084         tcg_gen_rotr_i32(t32, t32, cpu_sar);
3085         tcg_gen_extu_i32_reg(dest, t32);
3086         tcg_temp_free_i32(t32);
3087     } else {
3088         TCGv_i64 t = tcg_temp_new_i64();
3089         TCGv_i64 s = tcg_temp_new_i64();
3090 
3091         tcg_gen_concat_reg_i64(t, load_gpr(ctx, a->r2), load_gpr(ctx, a->r1));
3092         tcg_gen_extu_reg_i64(s, cpu_sar);
3093         tcg_gen_shr_i64(t, t, s);
3094         tcg_gen_trunc_i64_reg(dest, t);
3095 
3096         tcg_temp_free_i64(t);
3097         tcg_temp_free_i64(s);
3098     }
3099     save_gpr(ctx, a->t, dest);
3100 
3101     /* Install the new nullification.  */
3102     cond_free(&ctx->null_cond);
3103     if (a->c) {
3104         ctx->null_cond = do_sed_cond(a->c, dest);
3105     }
3106     return nullify_end(ctx);
3107 }
3108 
3109 static bool trans_shrpw_imm(DisasContext *ctx, arg_shrpw_imm *a)
3110 {
3111     unsigned sa = 31 - a->cpos;
3112     TCGv_reg dest, t2;
3113 
3114     if (a->c) {
3115         nullify_over(ctx);
3116     }
3117 
3118     dest = dest_gpr(ctx, a->t);
3119     t2 = load_gpr(ctx, a->r2);
3120     if (a->r1 == a->r2) {
3121         TCGv_i32 t32 = tcg_temp_new_i32();
3122         tcg_gen_trunc_reg_i32(t32, t2);
3123         tcg_gen_rotri_i32(t32, t32, sa);
3124         tcg_gen_extu_i32_reg(dest, t32);
3125         tcg_temp_free_i32(t32);
3126     } else if (a->r1 == 0) {
3127         tcg_gen_extract_reg(dest, t2, sa, 32 - sa);
3128     } else {
3129         TCGv_reg t0 = tcg_temp_new();
3130         tcg_gen_extract_reg(t0, t2, sa, 32 - sa);
3131         tcg_gen_deposit_reg(dest, t0, cpu_gr[a->r1], 32 - sa, sa);
3132         tcg_temp_free(t0);
3133     }
3134     save_gpr(ctx, a->t, dest);
3135 
3136     /* Install the new nullification.  */
3137     cond_free(&ctx->null_cond);
3138     if (a->c) {
3139         ctx->null_cond = do_sed_cond(a->c, dest);
3140     }
3141     return nullify_end(ctx);
3142 }
3143 
3144 static bool trans_extrw_sar(DisasContext *ctx, arg_extrw_sar *a)
3145 {
3146     unsigned len = 32 - a->clen;
3147     TCGv_reg dest, src, tmp;
3148 
3149     if (a->c) {
3150         nullify_over(ctx);
3151     }
3152 
3153     dest = dest_gpr(ctx, a->t);
3154     src = load_gpr(ctx, a->r);
3155     tmp = tcg_temp_new();
3156 
3157     /* Recall that SAR is using big-endian bit numbering.  */
3158     tcg_gen_xori_reg(tmp, cpu_sar, TARGET_REGISTER_BITS - 1);
3159     if (a->se) {
3160         tcg_gen_sar_reg(dest, src, tmp);
3161         tcg_gen_sextract_reg(dest, dest, 0, len);
3162     } else {
3163         tcg_gen_shr_reg(dest, src, tmp);
3164         tcg_gen_extract_reg(dest, dest, 0, len);
3165     }
3166     tcg_temp_free(tmp);
3167     save_gpr(ctx, a->t, dest);
3168 
3169     /* Install the new nullification.  */
3170     cond_free(&ctx->null_cond);
3171     if (a->c) {
3172         ctx->null_cond = do_sed_cond(a->c, dest);
3173     }
3174     return nullify_end(ctx);
3175 }
3176 
3177 static bool trans_extrw_imm(DisasContext *ctx, arg_extrw_imm *a)
3178 {
3179     unsigned len = 32 - a->clen;
3180     unsigned cpos = 31 - a->pos;
3181     TCGv_reg dest, src;
3182 
3183     if (a->c) {
3184         nullify_over(ctx);
3185     }
3186 
3187     dest = dest_gpr(ctx, a->t);
3188     src = load_gpr(ctx, a->r);
3189     if (a->se) {
3190         tcg_gen_sextract_reg(dest, src, cpos, len);
3191     } else {
3192         tcg_gen_extract_reg(dest, src, cpos, len);
3193     }
3194     save_gpr(ctx, a->t, dest);
3195 
3196     /* Install the new nullification.  */
3197     cond_free(&ctx->null_cond);
3198     if (a->c) {
3199         ctx->null_cond = do_sed_cond(a->c, dest);
3200     }
3201     return nullify_end(ctx);
3202 }
3203 
3204 static bool trans_depwi_imm(DisasContext *ctx, arg_depwi_imm *a)
3205 {
3206     unsigned len = 32 - a->clen;
3207     target_sreg mask0, mask1;
3208     TCGv_reg dest;
3209 
3210     if (a->c) {
3211         nullify_over(ctx);
3212     }
3213     if (a->cpos + len > 32) {
3214         len = 32 - a->cpos;
3215     }
3216 
3217     dest = dest_gpr(ctx, a->t);
3218     mask0 = deposit64(0, a->cpos, len, a->i);
3219     mask1 = deposit64(-1, a->cpos, len, a->i);
3220 
3221     if (a->nz) {
3222         TCGv_reg src = load_gpr(ctx, a->t);
3223         if (mask1 != -1) {
3224             tcg_gen_andi_reg(dest, src, mask1);
3225             src = dest;
3226         }
3227         tcg_gen_ori_reg(dest, src, mask0);
3228     } else {
3229         tcg_gen_movi_reg(dest, mask0);
3230     }
3231     save_gpr(ctx, a->t, dest);
3232 
3233     /* Install the new nullification.  */
3234     cond_free(&ctx->null_cond);
3235     if (a->c) {
3236         ctx->null_cond = do_sed_cond(a->c, dest);
3237     }
3238     return nullify_end(ctx);
3239 }
3240 
3241 static bool trans_depw_imm(DisasContext *ctx, arg_depw_imm *a)
3242 {
3243     unsigned rs = a->nz ? a->t : 0;
3244     unsigned len = 32 - a->clen;
3245     TCGv_reg dest, val;
3246 
3247     if (a->c) {
3248         nullify_over(ctx);
3249     }
3250     if (a->cpos + len > 32) {
3251         len = 32 - a->cpos;
3252     }
3253 
3254     dest = dest_gpr(ctx, a->t);
3255     val = load_gpr(ctx, a->r);
3256     if (rs == 0) {
3257         tcg_gen_deposit_z_reg(dest, val, a->cpos, len);
3258     } else {
3259         tcg_gen_deposit_reg(dest, cpu_gr[rs], val, a->cpos, len);
3260     }
3261     save_gpr(ctx, a->t, dest);
3262 
3263     /* Install the new nullification.  */
3264     cond_free(&ctx->null_cond);
3265     if (a->c) {
3266         ctx->null_cond = do_sed_cond(a->c, dest);
3267     }
3268     return nullify_end(ctx);
3269 }
3270 
3271 static bool do_depw_sar(DisasContext *ctx, unsigned rt, unsigned c,
3272                         unsigned nz, unsigned clen, TCGv_reg val)
3273 {
3274     unsigned rs = nz ? rt : 0;
3275     unsigned len = 32 - clen;
3276     TCGv_reg mask, tmp, shift, dest;
3277     unsigned msb = 1U << (len - 1);
3278 
3279     if (c) {
3280         nullify_over(ctx);
3281     }
3282 
3283     dest = dest_gpr(ctx, rt);
3284     shift = tcg_temp_new();
3285     tmp = tcg_temp_new();
3286 
3287     /* Convert big-endian bit numbering in SAR to left-shift.  */
3288     tcg_gen_xori_reg(shift, cpu_sar, TARGET_REGISTER_BITS - 1);
3289 
3290     mask = tcg_const_reg(msb + (msb - 1));
3291     tcg_gen_and_reg(tmp, val, mask);
3292     if (rs) {
3293         tcg_gen_shl_reg(mask, mask, shift);
3294         tcg_gen_shl_reg(tmp, tmp, shift);
3295         tcg_gen_andc_reg(dest, cpu_gr[rs], mask);
3296         tcg_gen_or_reg(dest, dest, tmp);
3297     } else {
3298         tcg_gen_shl_reg(dest, tmp, shift);
3299     }
3300     tcg_temp_free(shift);
3301     tcg_temp_free(mask);
3302     tcg_temp_free(tmp);
3303     save_gpr(ctx, rt, dest);
3304 
3305     /* Install the new nullification.  */
3306     cond_free(&ctx->null_cond);
3307     if (c) {
3308         ctx->null_cond = do_sed_cond(c, dest);
3309     }
3310     return nullify_end(ctx);
3311 }
3312 
3313 static bool trans_depw_sar(DisasContext *ctx, arg_depw_sar *a)
3314 {
3315     return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_gpr(ctx, a->r));
3316 }
3317 
3318 static bool trans_depwi_sar(DisasContext *ctx, arg_depwi_sar *a)
3319 {
3320     return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_const(ctx, a->i));
3321 }
3322 
3323 static bool trans_be(DisasContext *ctx, arg_be *a)
3324 {
3325     TCGv_reg tmp;
3326 
3327 #ifdef CONFIG_USER_ONLY
3328     /* ??? It seems like there should be a good way of using
3329        "be disp(sr2, r0)", the canonical gateway entry mechanism
3330        to our advantage.  But that appears to be inconvenient to
3331        manage along side branch delay slots.  Therefore we handle
3332        entry into the gateway page via absolute address.  */
3333     /* Since we don't implement spaces, just branch.  Do notice the special
3334        case of "be disp(*,r0)" using a direct branch to disp, so that we can
3335        goto_tb to the TB containing the syscall.  */
3336     if (a->b == 0) {
3337         return do_dbranch(ctx, a->disp, a->l, a->n);
3338     }
3339 #else
3340     nullify_over(ctx);
3341 #endif
3342 
3343     tmp = get_temp(ctx);
3344     tcg_gen_addi_reg(tmp, load_gpr(ctx, a->b), a->disp);
3345     tmp = do_ibranch_priv(ctx, tmp);
3346 
3347 #ifdef CONFIG_USER_ONLY
3348     return do_ibranch(ctx, tmp, a->l, a->n);
3349 #else
3350     TCGv_i64 new_spc = tcg_temp_new_i64();
3351 
3352     load_spr(ctx, new_spc, a->sp);
3353     if (a->l) {
3354         copy_iaoq_entry(cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
3355         tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f);
3356     }
3357     if (a->n && use_nullify_skip(ctx)) {
3358         tcg_gen_mov_reg(cpu_iaoq_f, tmp);
3359         tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
3360         tcg_gen_mov_i64(cpu_iasq_f, new_spc);
3361         tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f);
3362     } else {
3363         copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3364         if (ctx->iaoq_b == -1) {
3365             tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3366         }
3367         tcg_gen_mov_reg(cpu_iaoq_b, tmp);
3368         tcg_gen_mov_i64(cpu_iasq_b, new_spc);
3369         nullify_set(ctx, a->n);
3370     }
3371     tcg_temp_free_i64(new_spc);
3372     tcg_gen_lookup_and_goto_ptr();
3373     ctx->base.is_jmp = DISAS_NORETURN;
3374     return nullify_end(ctx);
3375 #endif
3376 }
3377 
3378 static bool trans_bl(DisasContext *ctx, arg_bl *a)
3379 {
3380     return do_dbranch(ctx, iaoq_dest(ctx, a->disp), a->l, a->n);
3381 }
3382 
3383 static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a)
3384 {
3385     target_ureg dest = iaoq_dest(ctx, a->disp);
3386 
3387     /* Make sure the caller hasn't done something weird with the queue.
3388      * ??? This is not quite the same as the PSW[B] bit, which would be
3389      * expensive to track.  Real hardware will trap for
3390      *    b  gateway
3391      *    b  gateway+4  (in delay slot of first branch)
3392      * However, checking for a non-sequential instruction queue *will*
3393      * diagnose the security hole
3394      *    b  gateway
3395      *    b  evil
3396      * in which instructions at evil would run with increased privs.
3397      */
3398     if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) {
3399         return gen_illegal(ctx);
3400     }
3401 
3402 #ifndef CONFIG_USER_ONLY
3403     if (ctx->tb_flags & PSW_C) {
3404         CPUHPPAState *env = ctx->cs->env_ptr;
3405         int type = hppa_artype_for_page(env, ctx->base.pc_next);
3406         /* If we could not find a TLB entry, then we need to generate an
3407            ITLB miss exception so the kernel will provide it.
3408            The resulting TLB fill operation will invalidate this TB and
3409            we will re-translate, at which point we *will* be able to find
3410            the TLB entry and determine if this is in fact a gateway page.  */
3411         if (type < 0) {
3412             gen_excp(ctx, EXCP_ITLB_MISS);
3413             return true;
3414         }
3415         /* No change for non-gateway pages or for priv decrease.  */
3416         if (type >= 4 && type - 4 < ctx->privilege) {
3417             dest = deposit32(dest, 0, 2, type - 4);
3418         }
3419     } else {
3420         dest &= -4;  /* priv = 0 */
3421     }
3422 #endif
3423 
3424     return do_dbranch(ctx, dest, a->l, a->n);
3425 }
3426 
3427 static bool trans_blr(DisasContext *ctx, arg_blr *a)
3428 {
3429     TCGv_reg tmp = get_temp(ctx);
3430 
3431     tcg_gen_shli_reg(tmp, load_gpr(ctx, a->x), 3);
3432     tcg_gen_addi_reg(tmp, tmp, ctx->iaoq_f + 8);
3433     /* The computation here never changes privilege level.  */
3434     return do_ibranch(ctx, tmp, a->l, a->n);
3435 }
3436 
3437 static bool trans_bv(DisasContext *ctx, arg_bv *a)
3438 {
3439     TCGv_reg dest;
3440 
3441     if (a->x == 0) {
3442         dest = load_gpr(ctx, a->b);
3443     } else {
3444         dest = get_temp(ctx);
3445         tcg_gen_shli_reg(dest, load_gpr(ctx, a->x), 3);
3446         tcg_gen_add_reg(dest, dest, load_gpr(ctx, a->b));
3447     }
3448     dest = do_ibranch_priv(ctx, dest);
3449     return do_ibranch(ctx, dest, 0, a->n);
3450 }
3451 
3452 static bool trans_bve(DisasContext *ctx, arg_bve *a)
3453 {
3454     TCGv_reg dest;
3455 
3456 #ifdef CONFIG_USER_ONLY
3457     dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3458     return do_ibranch(ctx, dest, a->l, a->n);
3459 #else
3460     nullify_over(ctx);
3461     dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3462 
3463     copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3464     if (ctx->iaoq_b == -1) {
3465         tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3466     }
3467     copy_iaoq_entry(cpu_iaoq_b, -1, dest);
3468     tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest));
3469     if (a->l) {
3470         copy_iaoq_entry(cpu_gr[a->l], ctx->iaoq_n, ctx->iaoq_n_var);
3471     }
3472     nullify_set(ctx, a->n);
3473     tcg_gen_lookup_and_goto_ptr();
3474     ctx->base.is_jmp = DISAS_NORETURN;
3475     return nullify_end(ctx);
3476 #endif
3477 }
3478 
3479 /*
3480  * Float class 0
3481  */
3482 
3483 static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3484 {
3485     tcg_gen_mov_i32(dst, src);
3486 }
3487 
3488 static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a)
3489 {
3490     return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f);
3491 }
3492 
3493 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3494 {
3495     tcg_gen_mov_i64(dst, src);
3496 }
3497 
3498 static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a)
3499 {
3500     return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d);
3501 }
3502 
3503 static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3504 {
3505     tcg_gen_andi_i32(dst, src, INT32_MAX);
3506 }
3507 
3508 static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a)
3509 {
3510     return do_fop_wew(ctx, a->t, a->r, gen_fabs_f);
3511 }
3512 
3513 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3514 {
3515     tcg_gen_andi_i64(dst, src, INT64_MAX);
3516 }
3517 
3518 static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a)
3519 {
3520     return do_fop_ded(ctx, a->t, a->r, gen_fabs_d);
3521 }
3522 
3523 static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a)
3524 {
3525     return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s);
3526 }
3527 
3528 static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a)
3529 {
3530     return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d);
3531 }
3532 
3533 static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a)
3534 {
3535     return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s);
3536 }
3537 
3538 static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a)
3539 {
3540     return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d);
3541 }
3542 
3543 static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3544 {
3545     tcg_gen_xori_i32(dst, src, INT32_MIN);
3546 }
3547 
3548 static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a)
3549 {
3550     return do_fop_wew(ctx, a->t, a->r, gen_fneg_f);
3551 }
3552 
3553 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3554 {
3555     tcg_gen_xori_i64(dst, src, INT64_MIN);
3556 }
3557 
3558 static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a)
3559 {
3560     return do_fop_ded(ctx, a->t, a->r, gen_fneg_d);
3561 }
3562 
3563 static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3564 {
3565     tcg_gen_ori_i32(dst, src, INT32_MIN);
3566 }
3567 
3568 static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a)
3569 {
3570     return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f);
3571 }
3572 
3573 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3574 {
3575     tcg_gen_ori_i64(dst, src, INT64_MIN);
3576 }
3577 
3578 static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a)
3579 {
3580     return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d);
3581 }
3582 
3583 /*
3584  * Float class 1
3585  */
3586 
3587 static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a)
3588 {
3589     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s);
3590 }
3591 
3592 static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a)
3593 {
3594     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d);
3595 }
3596 
3597 static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a)
3598 {
3599     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s);
3600 }
3601 
3602 static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a)
3603 {
3604     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s);
3605 }
3606 
3607 static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a)
3608 {
3609     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d);
3610 }
3611 
3612 static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a)
3613 {
3614     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d);
3615 }
3616 
3617 static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a)
3618 {
3619     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w);
3620 }
3621 
3622 static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a)
3623 {
3624     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w);
3625 }
3626 
3627 static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a)
3628 {
3629     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw);
3630 }
3631 
3632 static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a)
3633 {
3634     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw);
3635 }
3636 
3637 static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a)
3638 {
3639     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w);
3640 }
3641 
3642 static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a)
3643 {
3644     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w);
3645 }
3646 
3647 static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a)
3648 {
3649     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw);
3650 }
3651 
3652 static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a)
3653 {
3654     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw);
3655 }
3656 
3657 static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a)
3658 {
3659     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s);
3660 }
3661 
3662 static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a)
3663 {
3664     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s);
3665 }
3666 
3667 static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a)
3668 {
3669     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d);
3670 }
3671 
3672 static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a)
3673 {
3674     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d);
3675 }
3676 
3677 static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a)
3678 {
3679     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw);
3680 }
3681 
3682 static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a)
3683 {
3684     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw);
3685 }
3686 
3687 static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a)
3688 {
3689     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw);
3690 }
3691 
3692 static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a)
3693 {
3694     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw);
3695 }
3696 
3697 static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a)
3698 {
3699     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw);
3700 }
3701 
3702 static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a)
3703 {
3704     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw);
3705 }
3706 
3707 static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a)
3708 {
3709     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw);
3710 }
3711 
3712 static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a)
3713 {
3714     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw);
3715 }
3716 
3717 /*
3718  * Float class 2
3719  */
3720 
3721 static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a)
3722 {
3723     TCGv_i32 ta, tb, tc, ty;
3724 
3725     nullify_over(ctx);
3726 
3727     ta = load_frw0_i32(a->r1);
3728     tb = load_frw0_i32(a->r2);
3729     ty = tcg_const_i32(a->y);
3730     tc = tcg_const_i32(a->c);
3731 
3732     gen_helper_fcmp_s(cpu_env, ta, tb, ty, tc);
3733 
3734     tcg_temp_free_i32(ta);
3735     tcg_temp_free_i32(tb);
3736     tcg_temp_free_i32(ty);
3737     tcg_temp_free_i32(tc);
3738 
3739     return nullify_end(ctx);
3740 }
3741 
3742 static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a)
3743 {
3744     TCGv_i64 ta, tb;
3745     TCGv_i32 tc, ty;
3746 
3747     nullify_over(ctx);
3748 
3749     ta = load_frd0(a->r1);
3750     tb = load_frd0(a->r2);
3751     ty = tcg_const_i32(a->y);
3752     tc = tcg_const_i32(a->c);
3753 
3754     gen_helper_fcmp_d(cpu_env, ta, tb, ty, tc);
3755 
3756     tcg_temp_free_i64(ta);
3757     tcg_temp_free_i64(tb);
3758     tcg_temp_free_i32(ty);
3759     tcg_temp_free_i32(tc);
3760 
3761     return nullify_end(ctx);
3762 }
3763 
3764 static bool trans_ftest(DisasContext *ctx, arg_ftest *a)
3765 {
3766     TCGv_reg t;
3767 
3768     nullify_over(ctx);
3769 
3770     t = get_temp(ctx);
3771     tcg_gen_ld32u_reg(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
3772 
3773     if (a->y == 1) {
3774         int mask;
3775         bool inv = false;
3776 
3777         switch (a->c) {
3778         case 0: /* simple */
3779             tcg_gen_andi_reg(t, t, 0x4000000);
3780             ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3781             goto done;
3782         case 2: /* rej */
3783             inv = true;
3784             /* fallthru */
3785         case 1: /* acc */
3786             mask = 0x43ff800;
3787             break;
3788         case 6: /* rej8 */
3789             inv = true;
3790             /* fallthru */
3791         case 5: /* acc8 */
3792             mask = 0x43f8000;
3793             break;
3794         case 9: /* acc6 */
3795             mask = 0x43e0000;
3796             break;
3797         case 13: /* acc4 */
3798             mask = 0x4380000;
3799             break;
3800         case 17: /* acc2 */
3801             mask = 0x4200000;
3802             break;
3803         default:
3804             gen_illegal(ctx);
3805             return true;
3806         }
3807         if (inv) {
3808             TCGv_reg c = load_const(ctx, mask);
3809             tcg_gen_or_reg(t, t, c);
3810             ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
3811         } else {
3812             tcg_gen_andi_reg(t, t, mask);
3813             ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
3814         }
3815     } else {
3816         unsigned cbit = (a->y ^ 1) - 1;
3817 
3818         tcg_gen_extract_reg(t, t, 21 - cbit, 1);
3819         ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3820         tcg_temp_free(t);
3821     }
3822 
3823  done:
3824     return nullify_end(ctx);
3825 }
3826 
3827 /*
3828  * Float class 2
3829  */
3830 
3831 static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a)
3832 {
3833     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s);
3834 }
3835 
3836 static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a)
3837 {
3838     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d);
3839 }
3840 
3841 static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a)
3842 {
3843     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s);
3844 }
3845 
3846 static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a)
3847 {
3848     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d);
3849 }
3850 
3851 static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a)
3852 {
3853     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s);
3854 }
3855 
3856 static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a)
3857 {
3858     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d);
3859 }
3860 
3861 static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a)
3862 {
3863     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s);
3864 }
3865 
3866 static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a)
3867 {
3868     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d);
3869 }
3870 
3871 static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a)
3872 {
3873     TCGv_i64 x, y;
3874 
3875     nullify_over(ctx);
3876 
3877     x = load_frw0_i64(a->r1);
3878     y = load_frw0_i64(a->r2);
3879     tcg_gen_mul_i64(x, x, y);
3880     save_frd(a->t, x);
3881     tcg_temp_free_i64(x);
3882     tcg_temp_free_i64(y);
3883 
3884     return nullify_end(ctx);
3885 }
3886 
3887 /* Convert the fmpyadd single-precision register encodings to standard.  */
3888 static inline int fmpyadd_s_reg(unsigned r)
3889 {
3890     return (r & 16) * 2 + 16 + (r & 15);
3891 }
3892 
3893 static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
3894 {
3895     int tm = fmpyadd_s_reg(a->tm);
3896     int ra = fmpyadd_s_reg(a->ra);
3897     int ta = fmpyadd_s_reg(a->ta);
3898     int rm2 = fmpyadd_s_reg(a->rm2);
3899     int rm1 = fmpyadd_s_reg(a->rm1);
3900 
3901     nullify_over(ctx);
3902 
3903     do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
3904     do_fop_weww(ctx, ta, ta, ra,
3905                 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
3906 
3907     return nullify_end(ctx);
3908 }
3909 
3910 static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a)
3911 {
3912     return do_fmpyadd_s(ctx, a, false);
3913 }
3914 
3915 static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a)
3916 {
3917     return do_fmpyadd_s(ctx, a, true);
3918 }
3919 
3920 static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
3921 {
3922     nullify_over(ctx);
3923 
3924     do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d);
3925     do_fop_dedd(ctx, a->ta, a->ta, a->ra,
3926                 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
3927 
3928     return nullify_end(ctx);
3929 }
3930 
3931 static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a)
3932 {
3933     return do_fmpyadd_d(ctx, a, false);
3934 }
3935 
3936 static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a)
3937 {
3938     return do_fmpyadd_d(ctx, a, true);
3939 }
3940 
3941 static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a)
3942 {
3943     TCGv_i32 x, y, z;
3944 
3945     nullify_over(ctx);
3946     x = load_frw0_i32(a->rm1);
3947     y = load_frw0_i32(a->rm2);
3948     z = load_frw0_i32(a->ra3);
3949 
3950     if (a->neg) {
3951         gen_helper_fmpynfadd_s(x, cpu_env, x, y, z);
3952     } else {
3953         gen_helper_fmpyfadd_s(x, cpu_env, x, y, z);
3954     }
3955 
3956     tcg_temp_free_i32(y);
3957     tcg_temp_free_i32(z);
3958     save_frw_i32(a->t, x);
3959     tcg_temp_free_i32(x);
3960     return nullify_end(ctx);
3961 }
3962 
3963 static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a)
3964 {
3965     TCGv_i64 x, y, z;
3966 
3967     nullify_over(ctx);
3968     x = load_frd0(a->rm1);
3969     y = load_frd0(a->rm2);
3970     z = load_frd0(a->ra3);
3971 
3972     if (a->neg) {
3973         gen_helper_fmpynfadd_d(x, cpu_env, x, y, z);
3974     } else {
3975         gen_helper_fmpyfadd_d(x, cpu_env, x, y, z);
3976     }
3977 
3978     tcg_temp_free_i64(y);
3979     tcg_temp_free_i64(z);
3980     save_frd(a->t, x);
3981     tcg_temp_free_i64(x);
3982     return nullify_end(ctx);
3983 }
3984 
3985 static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
3986 {
3987     DisasContext *ctx = container_of(dcbase, DisasContext, base);
3988     int bound;
3989 
3990     ctx->cs = cs;
3991     ctx->tb_flags = ctx->base.tb->flags;
3992 
3993 #ifdef CONFIG_USER_ONLY
3994     ctx->privilege = MMU_USER_IDX;
3995     ctx->mmu_idx = MMU_USER_IDX;
3996     ctx->iaoq_f = ctx->base.pc_first | MMU_USER_IDX;
3997     ctx->iaoq_b = ctx->base.tb->cs_base | MMU_USER_IDX;
3998 #else
3999     ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
4000     ctx->mmu_idx = (ctx->tb_flags & PSW_D ? ctx->privilege : MMU_PHYS_IDX);
4001 
4002     /* Recover the IAOQ values from the GVA + PRIV.  */
4003     uint64_t cs_base = ctx->base.tb->cs_base;
4004     uint64_t iasq_f = cs_base & ~0xffffffffull;
4005     int32_t diff = cs_base;
4006 
4007     ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege;
4008     ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1);
4009 #endif
4010     ctx->iaoq_n = -1;
4011     ctx->iaoq_n_var = NULL;
4012 
4013     /* Bound the number of instructions by those left on the page.  */
4014     bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
4015     ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
4016 
4017     ctx->ntempr = 0;
4018     ctx->ntempl = 0;
4019     memset(ctx->tempr, 0, sizeof(ctx->tempr));
4020     memset(ctx->templ, 0, sizeof(ctx->templ));
4021 }
4022 
4023 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
4024 {
4025     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4026 
4027     /* Seed the nullification status from PSW[N], as saved in TB->FLAGS.  */
4028     ctx->null_cond = cond_make_f();
4029     ctx->psw_n_nonzero = false;
4030     if (ctx->tb_flags & PSW_N) {
4031         ctx->null_cond.c = TCG_COND_ALWAYS;
4032         ctx->psw_n_nonzero = true;
4033     }
4034     ctx->null_lab = NULL;
4035 }
4036 
4037 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4038 {
4039     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4040 
4041     tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b);
4042 }
4043 
4044 static bool hppa_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
4045                                       const CPUBreakpoint *bp)
4046 {
4047     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4048 
4049     gen_excp(ctx, EXCP_DEBUG);
4050     ctx->base.pc_next += 4;
4051     return true;
4052 }
4053 
4054 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4055 {
4056     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4057     CPUHPPAState *env = cs->env_ptr;
4058     DisasJumpType ret;
4059     int i, n;
4060 
4061     /* Execute one insn.  */
4062 #ifdef CONFIG_USER_ONLY
4063     if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
4064         do_page_zero(ctx);
4065         ret = ctx->base.is_jmp;
4066         assert(ret != DISAS_NEXT);
4067     } else
4068 #endif
4069     {
4070         /* Always fetch the insn, even if nullified, so that we check
4071            the page permissions for execute.  */
4072         uint32_t insn = cpu_ldl_code(env, ctx->base.pc_next);
4073 
4074         /* Set up the IA queue for the next insn.
4075            This will be overwritten by a branch.  */
4076         if (ctx->iaoq_b == -1) {
4077             ctx->iaoq_n = -1;
4078             ctx->iaoq_n_var = get_temp(ctx);
4079             tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
4080         } else {
4081             ctx->iaoq_n = ctx->iaoq_b + 4;
4082             ctx->iaoq_n_var = NULL;
4083         }
4084 
4085         if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
4086             ctx->null_cond.c = TCG_COND_NEVER;
4087             ret = DISAS_NEXT;
4088         } else {
4089             ctx->insn = insn;
4090             if (!decode(ctx, insn)) {
4091                 gen_illegal(ctx);
4092             }
4093             ret = ctx->base.is_jmp;
4094             assert(ctx->null_lab == NULL);
4095         }
4096     }
4097 
4098     /* Free any temporaries allocated.  */
4099     for (i = 0, n = ctx->ntempr; i < n; ++i) {
4100         tcg_temp_free(ctx->tempr[i]);
4101         ctx->tempr[i] = NULL;
4102     }
4103     for (i = 0, n = ctx->ntempl; i < n; ++i) {
4104         tcg_temp_free_tl(ctx->templ[i]);
4105         ctx->templ[i] = NULL;
4106     }
4107     ctx->ntempr = 0;
4108     ctx->ntempl = 0;
4109 
4110     /* Advance the insn queue.  Note that this check also detects
4111        a priority change within the instruction queue.  */
4112     if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
4113         if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1
4114             && use_goto_tb(ctx, ctx->iaoq_b)
4115             && (ctx->null_cond.c == TCG_COND_NEVER
4116                 || ctx->null_cond.c == TCG_COND_ALWAYS)) {
4117             nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
4118             gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
4119             ctx->base.is_jmp = ret = DISAS_NORETURN;
4120         } else {
4121             ctx->base.is_jmp = ret = DISAS_IAQ_N_STALE;
4122         }
4123     }
4124     ctx->iaoq_f = ctx->iaoq_b;
4125     ctx->iaoq_b = ctx->iaoq_n;
4126     ctx->base.pc_next += 4;
4127 
4128     if (ret == DISAS_NORETURN || ret == DISAS_IAQ_N_UPDATED) {
4129         return;
4130     }
4131     if (ctx->iaoq_f == -1) {
4132         tcg_gen_mov_reg(cpu_iaoq_f, cpu_iaoq_b);
4133         copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
4134 #ifndef CONFIG_USER_ONLY
4135         tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
4136 #endif
4137         nullify_save(ctx);
4138         ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
4139     } else if (ctx->iaoq_b == -1) {
4140         tcg_gen_mov_reg(cpu_iaoq_b, ctx->iaoq_n_var);
4141     }
4142 }
4143 
4144 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4145 {
4146     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4147     DisasJumpType is_jmp = ctx->base.is_jmp;
4148 
4149     switch (is_jmp) {
4150     case DISAS_NORETURN:
4151         break;
4152     case DISAS_TOO_MANY:
4153     case DISAS_IAQ_N_STALE:
4154     case DISAS_IAQ_N_STALE_EXIT:
4155         copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
4156         copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
4157         nullify_save(ctx);
4158         /* FALLTHRU */
4159     case DISAS_IAQ_N_UPDATED:
4160         if (ctx->base.singlestep_enabled) {
4161             gen_excp_1(EXCP_DEBUG);
4162         } else if (is_jmp == DISAS_IAQ_N_STALE_EXIT) {
4163             tcg_gen_exit_tb(NULL, 0);
4164         } else {
4165             tcg_gen_lookup_and_goto_ptr();
4166         }
4167         break;
4168     default:
4169         g_assert_not_reached();
4170     }
4171 }
4172 
4173 static void hppa_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
4174 {
4175     target_ulong pc = dcbase->pc_first;
4176 
4177 #ifdef CONFIG_USER_ONLY
4178     switch (pc) {
4179     case 0x00:
4180         qemu_log("IN:\n0x00000000:  (null)\n");
4181         return;
4182     case 0xb0:
4183         qemu_log("IN:\n0x000000b0:  light-weight-syscall\n");
4184         return;
4185     case 0xe0:
4186         qemu_log("IN:\n0x000000e0:  set-thread-pointer-syscall\n");
4187         return;
4188     case 0x100:
4189         qemu_log("IN:\n0x00000100:  syscall\n");
4190         return;
4191     }
4192 #endif
4193 
4194     qemu_log("IN: %s\n", lookup_symbol(pc));
4195     log_target_disas(cs, pc, dcbase->tb->size);
4196 }
4197 
4198 static const TranslatorOps hppa_tr_ops = {
4199     .init_disas_context = hppa_tr_init_disas_context,
4200     .tb_start           = hppa_tr_tb_start,
4201     .insn_start         = hppa_tr_insn_start,
4202     .breakpoint_check   = hppa_tr_breakpoint_check,
4203     .translate_insn     = hppa_tr_translate_insn,
4204     .tb_stop            = hppa_tr_tb_stop,
4205     .disas_log          = hppa_tr_disas_log,
4206 };
4207 
4208 void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
4209 
4210 {
4211     DisasContext ctx;
4212     translator_loop(&hppa_tr_ops, &ctx.base, cs, tb);
4213 }
4214 
4215 void restore_state_to_opc(CPUHPPAState *env, TranslationBlock *tb,
4216                           target_ulong *data)
4217 {
4218     env->iaoq_f = data[0];
4219     if (data[1] != (target_ureg)-1) {
4220         env->iaoq_b = data[1];
4221     }
4222     /* Since we were executing the instruction at IAOQ_F, and took some
4223        sort of action that provoked the cpu_restore_state, we can infer
4224        that the instruction was not nullified.  */
4225     env->psw_n = 0;
4226 }
4227