xref: /openbmc/qemu/target/hppa/translate.c (revision eab15862)
1 /*
2  * HPPA emulation cpu translation for qemu.
3  *
4  * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
25 #include "tcg-op.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "trace-tcg.h"
31 #include "exec/log.h"
32 
33 /* Since we have a distinction between register size and address size,
34    we need to redefine all of these.  */
35 
36 #undef TCGv
37 #undef tcg_temp_new
38 #undef tcg_global_reg_new
39 #undef tcg_global_mem_new
40 #undef tcg_temp_local_new
41 #undef tcg_temp_free
42 
43 #if TARGET_LONG_BITS == 64
44 #define TCGv_tl              TCGv_i64
45 #define tcg_temp_new_tl      tcg_temp_new_i64
46 #define tcg_temp_free_tl     tcg_temp_free_i64
47 #if TARGET_REGISTER_BITS == 64
48 #define tcg_gen_extu_reg_tl  tcg_gen_mov_i64
49 #else
50 #define tcg_gen_extu_reg_tl  tcg_gen_extu_i32_i64
51 #endif
52 #else
53 #define TCGv_tl              TCGv_i32
54 #define tcg_temp_new_tl      tcg_temp_new_i32
55 #define tcg_temp_free_tl     tcg_temp_free_i32
56 #define tcg_gen_extu_reg_tl  tcg_gen_mov_i32
57 #endif
58 
59 #if TARGET_REGISTER_BITS == 64
60 #define TCGv_reg             TCGv_i64
61 
62 #define tcg_temp_new         tcg_temp_new_i64
63 #define tcg_global_reg_new   tcg_global_reg_new_i64
64 #define tcg_global_mem_new   tcg_global_mem_new_i64
65 #define tcg_temp_local_new   tcg_temp_local_new_i64
66 #define tcg_temp_free        tcg_temp_free_i64
67 
68 #define tcg_gen_movi_reg     tcg_gen_movi_i64
69 #define tcg_gen_mov_reg      tcg_gen_mov_i64
70 #define tcg_gen_ld8u_reg     tcg_gen_ld8u_i64
71 #define tcg_gen_ld8s_reg     tcg_gen_ld8s_i64
72 #define tcg_gen_ld16u_reg    tcg_gen_ld16u_i64
73 #define tcg_gen_ld16s_reg    tcg_gen_ld16s_i64
74 #define tcg_gen_ld32u_reg    tcg_gen_ld32u_i64
75 #define tcg_gen_ld32s_reg    tcg_gen_ld32s_i64
76 #define tcg_gen_ld_reg       tcg_gen_ld_i64
77 #define tcg_gen_st8_reg      tcg_gen_st8_i64
78 #define tcg_gen_st16_reg     tcg_gen_st16_i64
79 #define tcg_gen_st32_reg     tcg_gen_st32_i64
80 #define tcg_gen_st_reg       tcg_gen_st_i64
81 #define tcg_gen_add_reg      tcg_gen_add_i64
82 #define tcg_gen_addi_reg     tcg_gen_addi_i64
83 #define tcg_gen_sub_reg      tcg_gen_sub_i64
84 #define tcg_gen_neg_reg      tcg_gen_neg_i64
85 #define tcg_gen_subfi_reg    tcg_gen_subfi_i64
86 #define tcg_gen_subi_reg     tcg_gen_subi_i64
87 #define tcg_gen_and_reg      tcg_gen_and_i64
88 #define tcg_gen_andi_reg     tcg_gen_andi_i64
89 #define tcg_gen_or_reg       tcg_gen_or_i64
90 #define tcg_gen_ori_reg      tcg_gen_ori_i64
91 #define tcg_gen_xor_reg      tcg_gen_xor_i64
92 #define tcg_gen_xori_reg     tcg_gen_xori_i64
93 #define tcg_gen_not_reg      tcg_gen_not_i64
94 #define tcg_gen_shl_reg      tcg_gen_shl_i64
95 #define tcg_gen_shli_reg     tcg_gen_shli_i64
96 #define tcg_gen_shr_reg      tcg_gen_shr_i64
97 #define tcg_gen_shri_reg     tcg_gen_shri_i64
98 #define tcg_gen_sar_reg      tcg_gen_sar_i64
99 #define tcg_gen_sari_reg     tcg_gen_sari_i64
100 #define tcg_gen_brcond_reg   tcg_gen_brcond_i64
101 #define tcg_gen_brcondi_reg  tcg_gen_brcondi_i64
102 #define tcg_gen_setcond_reg  tcg_gen_setcond_i64
103 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i64
104 #define tcg_gen_mul_reg      tcg_gen_mul_i64
105 #define tcg_gen_muli_reg     tcg_gen_muli_i64
106 #define tcg_gen_div_reg      tcg_gen_div_i64
107 #define tcg_gen_rem_reg      tcg_gen_rem_i64
108 #define tcg_gen_divu_reg     tcg_gen_divu_i64
109 #define tcg_gen_remu_reg     tcg_gen_remu_i64
110 #define tcg_gen_discard_reg  tcg_gen_discard_i64
111 #define tcg_gen_trunc_reg_i32 tcg_gen_extrl_i64_i32
112 #define tcg_gen_trunc_i64_reg tcg_gen_mov_i64
113 #define tcg_gen_extu_i32_reg tcg_gen_extu_i32_i64
114 #define tcg_gen_ext_i32_reg  tcg_gen_ext_i32_i64
115 #define tcg_gen_extu_reg_i64 tcg_gen_mov_i64
116 #define tcg_gen_ext_reg_i64  tcg_gen_mov_i64
117 #define tcg_gen_ext8u_reg    tcg_gen_ext8u_i64
118 #define tcg_gen_ext8s_reg    tcg_gen_ext8s_i64
119 #define tcg_gen_ext16u_reg   tcg_gen_ext16u_i64
120 #define tcg_gen_ext16s_reg   tcg_gen_ext16s_i64
121 #define tcg_gen_ext32u_reg   tcg_gen_ext32u_i64
122 #define tcg_gen_ext32s_reg   tcg_gen_ext32s_i64
123 #define tcg_gen_bswap16_reg  tcg_gen_bswap16_i64
124 #define tcg_gen_bswap32_reg  tcg_gen_bswap32_i64
125 #define tcg_gen_bswap64_reg  tcg_gen_bswap64_i64
126 #define tcg_gen_concat_reg_i64 tcg_gen_concat32_i64
127 #define tcg_gen_andc_reg     tcg_gen_andc_i64
128 #define tcg_gen_eqv_reg      tcg_gen_eqv_i64
129 #define tcg_gen_nand_reg     tcg_gen_nand_i64
130 #define tcg_gen_nor_reg      tcg_gen_nor_i64
131 #define tcg_gen_orc_reg      tcg_gen_orc_i64
132 #define tcg_gen_clz_reg      tcg_gen_clz_i64
133 #define tcg_gen_ctz_reg      tcg_gen_ctz_i64
134 #define tcg_gen_clzi_reg     tcg_gen_clzi_i64
135 #define tcg_gen_ctzi_reg     tcg_gen_ctzi_i64
136 #define tcg_gen_clrsb_reg    tcg_gen_clrsb_i64
137 #define tcg_gen_ctpop_reg    tcg_gen_ctpop_i64
138 #define tcg_gen_rotl_reg     tcg_gen_rotl_i64
139 #define tcg_gen_rotli_reg    tcg_gen_rotli_i64
140 #define tcg_gen_rotr_reg     tcg_gen_rotr_i64
141 #define tcg_gen_rotri_reg    tcg_gen_rotri_i64
142 #define tcg_gen_deposit_reg  tcg_gen_deposit_i64
143 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i64
144 #define tcg_gen_extract_reg  tcg_gen_extract_i64
145 #define tcg_gen_sextract_reg tcg_gen_sextract_i64
146 #define tcg_const_reg        tcg_const_i64
147 #define tcg_const_local_reg  tcg_const_local_i64
148 #define tcg_gen_movcond_reg  tcg_gen_movcond_i64
149 #define tcg_gen_add2_reg     tcg_gen_add2_i64
150 #define tcg_gen_sub2_reg     tcg_gen_sub2_i64
151 #define tcg_gen_qemu_ld_reg  tcg_gen_qemu_ld_i64
152 #define tcg_gen_qemu_st_reg  tcg_gen_qemu_st_i64
153 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i64
154 #if UINTPTR_MAX == UINT32_MAX
155 # define tcg_gen_trunc_reg_ptr(p, r) \
156     tcg_gen_trunc_i64_i32(TCGV_PTR_TO_NAT(p), r)
157 #else
158 # define tcg_gen_trunc_reg_ptr(p, r) \
159     tcg_gen_mov_i64(TCGV_PTR_TO_NAT(p), r)
160 #endif
161 #else
162 #define TCGv_reg             TCGv_i32
163 #define tcg_temp_new         tcg_temp_new_i32
164 #define tcg_global_reg_new   tcg_global_reg_new_i32
165 #define tcg_global_mem_new   tcg_global_mem_new_i32
166 #define tcg_temp_local_new   tcg_temp_local_new_i32
167 #define tcg_temp_free        tcg_temp_free_i32
168 
169 #define tcg_gen_movi_reg     tcg_gen_movi_i32
170 #define tcg_gen_mov_reg      tcg_gen_mov_i32
171 #define tcg_gen_ld8u_reg     tcg_gen_ld8u_i32
172 #define tcg_gen_ld8s_reg     tcg_gen_ld8s_i32
173 #define tcg_gen_ld16u_reg    tcg_gen_ld16u_i32
174 #define tcg_gen_ld16s_reg    tcg_gen_ld16s_i32
175 #define tcg_gen_ld32u_reg    tcg_gen_ld_i32
176 #define tcg_gen_ld32s_reg    tcg_gen_ld_i32
177 #define tcg_gen_ld_reg       tcg_gen_ld_i32
178 #define tcg_gen_st8_reg      tcg_gen_st8_i32
179 #define tcg_gen_st16_reg     tcg_gen_st16_i32
180 #define tcg_gen_st32_reg     tcg_gen_st32_i32
181 #define tcg_gen_st_reg       tcg_gen_st_i32
182 #define tcg_gen_add_reg      tcg_gen_add_i32
183 #define tcg_gen_addi_reg     tcg_gen_addi_i32
184 #define tcg_gen_sub_reg      tcg_gen_sub_i32
185 #define tcg_gen_neg_reg      tcg_gen_neg_i32
186 #define tcg_gen_subfi_reg    tcg_gen_subfi_i32
187 #define tcg_gen_subi_reg     tcg_gen_subi_i32
188 #define tcg_gen_and_reg      tcg_gen_and_i32
189 #define tcg_gen_andi_reg     tcg_gen_andi_i32
190 #define tcg_gen_or_reg       tcg_gen_or_i32
191 #define tcg_gen_ori_reg      tcg_gen_ori_i32
192 #define tcg_gen_xor_reg      tcg_gen_xor_i32
193 #define tcg_gen_xori_reg     tcg_gen_xori_i32
194 #define tcg_gen_not_reg      tcg_gen_not_i32
195 #define tcg_gen_shl_reg      tcg_gen_shl_i32
196 #define tcg_gen_shli_reg     tcg_gen_shli_i32
197 #define tcg_gen_shr_reg      tcg_gen_shr_i32
198 #define tcg_gen_shri_reg     tcg_gen_shri_i32
199 #define tcg_gen_sar_reg      tcg_gen_sar_i32
200 #define tcg_gen_sari_reg     tcg_gen_sari_i32
201 #define tcg_gen_brcond_reg   tcg_gen_brcond_i32
202 #define tcg_gen_brcondi_reg  tcg_gen_brcondi_i32
203 #define tcg_gen_setcond_reg  tcg_gen_setcond_i32
204 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i32
205 #define tcg_gen_mul_reg      tcg_gen_mul_i32
206 #define tcg_gen_muli_reg     tcg_gen_muli_i32
207 #define tcg_gen_div_reg      tcg_gen_div_i32
208 #define tcg_gen_rem_reg      tcg_gen_rem_i32
209 #define tcg_gen_divu_reg     tcg_gen_divu_i32
210 #define tcg_gen_remu_reg     tcg_gen_remu_i32
211 #define tcg_gen_discard_reg  tcg_gen_discard_i32
212 #define tcg_gen_trunc_reg_i32 tcg_gen_mov_i32
213 #define tcg_gen_trunc_i64_reg tcg_gen_extrl_i64_i32
214 #define tcg_gen_extu_i32_reg tcg_gen_mov_i32
215 #define tcg_gen_ext_i32_reg  tcg_gen_mov_i32
216 #define tcg_gen_extu_reg_i64 tcg_gen_extu_i32_i64
217 #define tcg_gen_ext_reg_i64  tcg_gen_ext_i32_i64
218 #define tcg_gen_ext8u_reg    tcg_gen_ext8u_i32
219 #define tcg_gen_ext8s_reg    tcg_gen_ext8s_i32
220 #define tcg_gen_ext16u_reg   tcg_gen_ext16u_i32
221 #define tcg_gen_ext16s_reg   tcg_gen_ext16s_i32
222 #define tcg_gen_ext32u_reg   tcg_gen_mov_i32
223 #define tcg_gen_ext32s_reg   tcg_gen_mov_i32
224 #define tcg_gen_bswap16_reg  tcg_gen_bswap16_i32
225 #define tcg_gen_bswap32_reg  tcg_gen_bswap32_i32
226 #define tcg_gen_concat_reg_i64 tcg_gen_concat_i32_i64
227 #define tcg_gen_andc_reg     tcg_gen_andc_i32
228 #define tcg_gen_eqv_reg      tcg_gen_eqv_i32
229 #define tcg_gen_nand_reg     tcg_gen_nand_i32
230 #define tcg_gen_nor_reg      tcg_gen_nor_i32
231 #define tcg_gen_orc_reg      tcg_gen_orc_i32
232 #define tcg_gen_clz_reg      tcg_gen_clz_i32
233 #define tcg_gen_ctz_reg      tcg_gen_ctz_i32
234 #define tcg_gen_clzi_reg     tcg_gen_clzi_i32
235 #define tcg_gen_ctzi_reg     tcg_gen_ctzi_i32
236 #define tcg_gen_clrsb_reg    tcg_gen_clrsb_i32
237 #define tcg_gen_ctpop_reg    tcg_gen_ctpop_i32
238 #define tcg_gen_rotl_reg     tcg_gen_rotl_i32
239 #define tcg_gen_rotli_reg    tcg_gen_rotli_i32
240 #define tcg_gen_rotr_reg     tcg_gen_rotr_i32
241 #define tcg_gen_rotri_reg    tcg_gen_rotri_i32
242 #define tcg_gen_deposit_reg  tcg_gen_deposit_i32
243 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i32
244 #define tcg_gen_extract_reg  tcg_gen_extract_i32
245 #define tcg_gen_sextract_reg tcg_gen_sextract_i32
246 #define tcg_const_reg        tcg_const_i32
247 #define tcg_const_local_reg  tcg_const_local_i32
248 #define tcg_gen_movcond_reg  tcg_gen_movcond_i32
249 #define tcg_gen_add2_reg     tcg_gen_add2_i32
250 #define tcg_gen_sub2_reg     tcg_gen_sub2_i32
251 #define tcg_gen_qemu_ld_reg  tcg_gen_qemu_ld_i32
252 #define tcg_gen_qemu_st_reg  tcg_gen_qemu_st_i32
253 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i32
254 #if UINTPTR_MAX == UINT32_MAX
255 # define tcg_gen_trunc_reg_ptr(p, r) \
256     tcg_gen_mov_i32(TCGV_PTR_TO_NAT(p), r)
257 #else
258 # define tcg_gen_trunc_reg_ptr(p, r) \
259     tcg_gen_extu_i32_i64(TCGV_PTR_TO_NAT(p), r)
260 #endif
261 #endif /* TARGET_REGISTER_BITS */
262 
263 typedef struct DisasCond {
264     TCGCond c;
265     TCGv_reg a0, a1;
266     bool a0_is_n;
267     bool a1_is_0;
268 } DisasCond;
269 
270 typedef struct DisasContext {
271     DisasContextBase base;
272     CPUState *cs;
273 
274     target_ureg iaoq_f;
275     target_ureg iaoq_b;
276     target_ureg iaoq_n;
277     TCGv_reg iaoq_n_var;
278 
279     int ntempr, ntempl;
280     TCGv_reg tempr[8];
281     TCGv_tl  templ[4];
282 
283     DisasCond null_cond;
284     TCGLabel *null_lab;
285 
286     uint32_t insn;
287     uint32_t tb_flags;
288     int mmu_idx;
289     int privilege;
290     bool psw_n_nonzero;
291 } DisasContext;
292 
293 /* Target-specific return values from translate_one, indicating the
294    state of the TB.  Note that DISAS_NEXT indicates that we are not
295    exiting the TB.  */
296 
297 /* We are not using a goto_tb (for whatever reason), but have updated
298    the iaq (for whatever reason), so don't do it again on exit.  */
299 #define DISAS_IAQ_N_UPDATED  DISAS_TARGET_0
300 
301 /* We are exiting the TB, but have neither emitted a goto_tb, nor
302    updated the iaq for the next instruction to be executed.  */
303 #define DISAS_IAQ_N_STALE    DISAS_TARGET_1
304 
305 /* Similarly, but we want to return to the main loop immediately
306    to recognize unmasked interrupts.  */
307 #define DISAS_IAQ_N_STALE_EXIT      DISAS_TARGET_2
308 
309 typedef struct DisasInsn {
310     uint32_t insn, mask;
311     DisasJumpType (*trans)(DisasContext *ctx, uint32_t insn,
312                            const struct DisasInsn *f);
313     union {
314         void (*ttt)(TCGv_reg, TCGv_reg, TCGv_reg);
315         void (*weww)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32);
316         void (*dedd)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64);
317         void (*wew)(TCGv_i32, TCGv_env, TCGv_i32);
318         void (*ded)(TCGv_i64, TCGv_env, TCGv_i64);
319         void (*wed)(TCGv_i32, TCGv_env, TCGv_i64);
320         void (*dew)(TCGv_i64, TCGv_env, TCGv_i32);
321     } f;
322 } DisasInsn;
323 
324 /* global register indexes */
325 static TCGv_reg cpu_gr[32];
326 static TCGv_i64 cpu_sr[4];
327 static TCGv_i64 cpu_srH;
328 static TCGv_reg cpu_iaoq_f;
329 static TCGv_reg cpu_iaoq_b;
330 static TCGv_i64 cpu_iasq_f;
331 static TCGv_i64 cpu_iasq_b;
332 static TCGv_reg cpu_sar;
333 static TCGv_reg cpu_psw_n;
334 static TCGv_reg cpu_psw_v;
335 static TCGv_reg cpu_psw_cb;
336 static TCGv_reg cpu_psw_cb_msb;
337 
338 #include "exec/gen-icount.h"
339 
340 void hppa_translate_init(void)
341 {
342 #define DEF_VAR(V)  { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
343 
344     typedef struct { TCGv_reg *var; const char *name; int ofs; } GlobalVar;
345     static const GlobalVar vars[] = {
346         { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
347         DEF_VAR(psw_n),
348         DEF_VAR(psw_v),
349         DEF_VAR(psw_cb),
350         DEF_VAR(psw_cb_msb),
351         DEF_VAR(iaoq_f),
352         DEF_VAR(iaoq_b),
353     };
354 
355 #undef DEF_VAR
356 
357     /* Use the symbolic register names that match the disassembler.  */
358     static const char gr_names[32][4] = {
359         "r0",  "r1",  "r2",  "r3",  "r4",  "r5",  "r6",  "r7",
360         "r8",  "r9",  "r10", "r11", "r12", "r13", "r14", "r15",
361         "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
362         "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
363     };
364     /* SR[4-7] are not global registers so that we can index them.  */
365     static const char sr_names[5][4] = {
366         "sr0", "sr1", "sr2", "sr3", "srH"
367     };
368 
369     int i;
370 
371     cpu_gr[0] = NULL;
372     for (i = 1; i < 32; i++) {
373         cpu_gr[i] = tcg_global_mem_new(cpu_env,
374                                        offsetof(CPUHPPAState, gr[i]),
375                                        gr_names[i]);
376     }
377     for (i = 0; i < 4; i++) {
378         cpu_sr[i] = tcg_global_mem_new_i64(cpu_env,
379                                            offsetof(CPUHPPAState, sr[i]),
380                                            sr_names[i]);
381     }
382     cpu_srH = tcg_global_mem_new_i64(cpu_env,
383                                      offsetof(CPUHPPAState, sr[4]),
384                                      sr_names[4]);
385 
386     for (i = 0; i < ARRAY_SIZE(vars); ++i) {
387         const GlobalVar *v = &vars[i];
388         *v->var = tcg_global_mem_new(cpu_env, v->ofs, v->name);
389     }
390 
391     cpu_iasq_f = tcg_global_mem_new_i64(cpu_env,
392                                         offsetof(CPUHPPAState, iasq_f),
393                                         "iasq_f");
394     cpu_iasq_b = tcg_global_mem_new_i64(cpu_env,
395                                         offsetof(CPUHPPAState, iasq_b),
396                                         "iasq_b");
397 }
398 
399 static DisasCond cond_make_f(void)
400 {
401     return (DisasCond){
402         .c = TCG_COND_NEVER,
403         .a0 = NULL,
404         .a1 = NULL,
405     };
406 }
407 
408 static DisasCond cond_make_n(void)
409 {
410     return (DisasCond){
411         .c = TCG_COND_NE,
412         .a0 = cpu_psw_n,
413         .a0_is_n = true,
414         .a1 = NULL,
415         .a1_is_0 = true
416     };
417 }
418 
419 static DisasCond cond_make_0(TCGCond c, TCGv_reg a0)
420 {
421     DisasCond r = { .c = c, .a1 = NULL, .a1_is_0 = true };
422 
423     assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
424     r.a0 = tcg_temp_new();
425     tcg_gen_mov_reg(r.a0, a0);
426 
427     return r;
428 }
429 
430 static DisasCond cond_make(TCGCond c, TCGv_reg a0, TCGv_reg a1)
431 {
432     DisasCond r = { .c = c };
433 
434     assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
435     r.a0 = tcg_temp_new();
436     tcg_gen_mov_reg(r.a0, a0);
437     r.a1 = tcg_temp_new();
438     tcg_gen_mov_reg(r.a1, a1);
439 
440     return r;
441 }
442 
443 static void cond_prep(DisasCond *cond)
444 {
445     if (cond->a1_is_0) {
446         cond->a1_is_0 = false;
447         cond->a1 = tcg_const_reg(0);
448     }
449 }
450 
451 static void cond_free(DisasCond *cond)
452 {
453     switch (cond->c) {
454     default:
455         if (!cond->a0_is_n) {
456             tcg_temp_free(cond->a0);
457         }
458         if (!cond->a1_is_0) {
459             tcg_temp_free(cond->a1);
460         }
461         cond->a0_is_n = false;
462         cond->a1_is_0 = false;
463         cond->a0 = NULL;
464         cond->a1 = NULL;
465         /* fallthru */
466     case TCG_COND_ALWAYS:
467         cond->c = TCG_COND_NEVER;
468         break;
469     case TCG_COND_NEVER:
470         break;
471     }
472 }
473 
474 static TCGv_reg get_temp(DisasContext *ctx)
475 {
476     unsigned i = ctx->ntempr++;
477     g_assert(i < ARRAY_SIZE(ctx->tempr));
478     return ctx->tempr[i] = tcg_temp_new();
479 }
480 
481 #ifndef CONFIG_USER_ONLY
482 static TCGv_tl get_temp_tl(DisasContext *ctx)
483 {
484     unsigned i = ctx->ntempl++;
485     g_assert(i < ARRAY_SIZE(ctx->templ));
486     return ctx->templ[i] = tcg_temp_new_tl();
487 }
488 #endif
489 
490 static TCGv_reg load_const(DisasContext *ctx, target_sreg v)
491 {
492     TCGv_reg t = get_temp(ctx);
493     tcg_gen_movi_reg(t, v);
494     return t;
495 }
496 
497 static TCGv_reg load_gpr(DisasContext *ctx, unsigned reg)
498 {
499     if (reg == 0) {
500         TCGv_reg t = get_temp(ctx);
501         tcg_gen_movi_reg(t, 0);
502         return t;
503     } else {
504         return cpu_gr[reg];
505     }
506 }
507 
508 static TCGv_reg dest_gpr(DisasContext *ctx, unsigned reg)
509 {
510     if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
511         return get_temp(ctx);
512     } else {
513         return cpu_gr[reg];
514     }
515 }
516 
517 static void save_or_nullify(DisasContext *ctx, TCGv_reg dest, TCGv_reg t)
518 {
519     if (ctx->null_cond.c != TCG_COND_NEVER) {
520         cond_prep(&ctx->null_cond);
521         tcg_gen_movcond_reg(ctx->null_cond.c, dest, ctx->null_cond.a0,
522                            ctx->null_cond.a1, dest, t);
523     } else {
524         tcg_gen_mov_reg(dest, t);
525     }
526 }
527 
528 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_reg t)
529 {
530     if (reg != 0) {
531         save_or_nullify(ctx, cpu_gr[reg], t);
532     }
533 }
534 
535 #ifdef HOST_WORDS_BIGENDIAN
536 # define HI_OFS  0
537 # define LO_OFS  4
538 #else
539 # define HI_OFS  4
540 # define LO_OFS  0
541 #endif
542 
543 static TCGv_i32 load_frw_i32(unsigned rt)
544 {
545     TCGv_i32 ret = tcg_temp_new_i32();
546     tcg_gen_ld_i32(ret, cpu_env,
547                    offsetof(CPUHPPAState, fr[rt & 31])
548                    + (rt & 32 ? LO_OFS : HI_OFS));
549     return ret;
550 }
551 
552 static TCGv_i32 load_frw0_i32(unsigned rt)
553 {
554     if (rt == 0) {
555         return tcg_const_i32(0);
556     } else {
557         return load_frw_i32(rt);
558     }
559 }
560 
561 static TCGv_i64 load_frw0_i64(unsigned rt)
562 {
563     if (rt == 0) {
564         return tcg_const_i64(0);
565     } else {
566         TCGv_i64 ret = tcg_temp_new_i64();
567         tcg_gen_ld32u_i64(ret, cpu_env,
568                           offsetof(CPUHPPAState, fr[rt & 31])
569                           + (rt & 32 ? LO_OFS : HI_OFS));
570         return ret;
571     }
572 }
573 
574 static void save_frw_i32(unsigned rt, TCGv_i32 val)
575 {
576     tcg_gen_st_i32(val, cpu_env,
577                    offsetof(CPUHPPAState, fr[rt & 31])
578                    + (rt & 32 ? LO_OFS : HI_OFS));
579 }
580 
581 #undef HI_OFS
582 #undef LO_OFS
583 
584 static TCGv_i64 load_frd(unsigned rt)
585 {
586     TCGv_i64 ret = tcg_temp_new_i64();
587     tcg_gen_ld_i64(ret, cpu_env, offsetof(CPUHPPAState, fr[rt]));
588     return ret;
589 }
590 
591 static TCGv_i64 load_frd0(unsigned rt)
592 {
593     if (rt == 0) {
594         return tcg_const_i64(0);
595     } else {
596         return load_frd(rt);
597     }
598 }
599 
600 static void save_frd(unsigned rt, TCGv_i64 val)
601 {
602     tcg_gen_st_i64(val, cpu_env, offsetof(CPUHPPAState, fr[rt]));
603 }
604 
605 static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
606 {
607 #ifdef CONFIG_USER_ONLY
608     tcg_gen_movi_i64(dest, 0);
609 #else
610     if (reg < 4) {
611         tcg_gen_mov_i64(dest, cpu_sr[reg]);
612     } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
613         tcg_gen_mov_i64(dest, cpu_srH);
614     } else {
615         tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUHPPAState, sr[reg]));
616     }
617 #endif
618 }
619 
620 /* Skip over the implementation of an insn that has been nullified.
621    Use this when the insn is too complex for a conditional move.  */
622 static void nullify_over(DisasContext *ctx)
623 {
624     if (ctx->null_cond.c != TCG_COND_NEVER) {
625         /* The always condition should have been handled in the main loop.  */
626         assert(ctx->null_cond.c != TCG_COND_ALWAYS);
627 
628         ctx->null_lab = gen_new_label();
629         cond_prep(&ctx->null_cond);
630 
631         /* If we're using PSW[N], copy it to a temp because... */
632         if (ctx->null_cond.a0_is_n) {
633             ctx->null_cond.a0_is_n = false;
634             ctx->null_cond.a0 = tcg_temp_new();
635             tcg_gen_mov_reg(ctx->null_cond.a0, cpu_psw_n);
636         }
637         /* ... we clear it before branching over the implementation,
638            so that (1) it's clear after nullifying this insn and
639            (2) if this insn nullifies the next, PSW[N] is valid.  */
640         if (ctx->psw_n_nonzero) {
641             ctx->psw_n_nonzero = false;
642             tcg_gen_movi_reg(cpu_psw_n, 0);
643         }
644 
645         tcg_gen_brcond_reg(ctx->null_cond.c, ctx->null_cond.a0,
646                           ctx->null_cond.a1, ctx->null_lab);
647         cond_free(&ctx->null_cond);
648     }
649 }
650 
651 /* Save the current nullification state to PSW[N].  */
652 static void nullify_save(DisasContext *ctx)
653 {
654     if (ctx->null_cond.c == TCG_COND_NEVER) {
655         if (ctx->psw_n_nonzero) {
656             tcg_gen_movi_reg(cpu_psw_n, 0);
657         }
658         return;
659     }
660     if (!ctx->null_cond.a0_is_n) {
661         cond_prep(&ctx->null_cond);
662         tcg_gen_setcond_reg(ctx->null_cond.c, cpu_psw_n,
663                            ctx->null_cond.a0, ctx->null_cond.a1);
664         ctx->psw_n_nonzero = true;
665     }
666     cond_free(&ctx->null_cond);
667 }
668 
669 /* Set a PSW[N] to X.  The intention is that this is used immediately
670    before a goto_tb/exit_tb, so that there is no fallthru path to other
671    code within the TB.  Therefore we do not update psw_n_nonzero.  */
672 static void nullify_set(DisasContext *ctx, bool x)
673 {
674     if (ctx->psw_n_nonzero || x) {
675         tcg_gen_movi_reg(cpu_psw_n, x);
676     }
677 }
678 
679 /* Mark the end of an instruction that may have been nullified.
680    This is the pair to nullify_over.  */
681 static DisasJumpType nullify_end(DisasContext *ctx, DisasJumpType status)
682 {
683     TCGLabel *null_lab = ctx->null_lab;
684 
685     /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
686        For UPDATED, we cannot update on the nullified path.  */
687     assert(status != DISAS_IAQ_N_UPDATED);
688 
689     if (likely(null_lab == NULL)) {
690         /* The current insn wasn't conditional or handled the condition
691            applied to it without a branch, so the (new) setting of
692            NULL_COND can be applied directly to the next insn.  */
693         return status;
694     }
695     ctx->null_lab = NULL;
696 
697     if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
698         /* The next instruction will be unconditional,
699            and NULL_COND already reflects that.  */
700         gen_set_label(null_lab);
701     } else {
702         /* The insn that we just executed is itself nullifying the next
703            instruction.  Store the condition in the PSW[N] global.
704            We asserted PSW[N] = 0 in nullify_over, so that after the
705            label we have the proper value in place.  */
706         nullify_save(ctx);
707         gen_set_label(null_lab);
708         ctx->null_cond = cond_make_n();
709     }
710     if (status == DISAS_NORETURN) {
711         status = DISAS_NEXT;
712     }
713     return status;
714 }
715 
716 static void copy_iaoq_entry(TCGv_reg dest, target_ureg ival, TCGv_reg vval)
717 {
718     if (unlikely(ival == -1)) {
719         tcg_gen_mov_reg(dest, vval);
720     } else {
721         tcg_gen_movi_reg(dest, ival);
722     }
723 }
724 
725 static inline target_ureg iaoq_dest(DisasContext *ctx, target_sreg disp)
726 {
727     return ctx->iaoq_f + disp + 8;
728 }
729 
730 static void gen_excp_1(int exception)
731 {
732     TCGv_i32 t = tcg_const_i32(exception);
733     gen_helper_excp(cpu_env, t);
734     tcg_temp_free_i32(t);
735 }
736 
737 static DisasJumpType gen_excp(DisasContext *ctx, int exception)
738 {
739     copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
740     copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
741     nullify_save(ctx);
742     gen_excp_1(exception);
743     return DISAS_NORETURN;
744 }
745 
746 static DisasJumpType gen_excp_iir(DisasContext *ctx, int exc)
747 {
748     TCGv_reg tmp = tcg_const_reg(ctx->insn);
749     tcg_gen_st_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[CR_IIR]));
750     tcg_temp_free(tmp);
751     return gen_excp(ctx, exc);
752 }
753 
754 static DisasJumpType gen_illegal(DisasContext *ctx)
755 {
756     nullify_over(ctx);
757     return nullify_end(ctx, gen_excp_iir(ctx, EXCP_ILL));
758 }
759 
760 #define CHECK_MOST_PRIVILEGED(EXCP)                               \
761     do {                                                          \
762         if (ctx->privilege != 0) {                                \
763             nullify_over(ctx);                                    \
764             return nullify_end(ctx, gen_excp_iir(ctx, EXCP));     \
765         }                                                         \
766     } while (0)
767 
768 static bool use_goto_tb(DisasContext *ctx, target_ureg dest)
769 {
770     /* Suppress goto_tb in the case of single-steping and IO.  */
771     if ((tb_cflags(ctx->base.tb) & CF_LAST_IO) || ctx->base.singlestep_enabled) {
772         return false;
773     }
774     return true;
775 }
776 
777 /* If the next insn is to be nullified, and it's on the same page,
778    and we're not attempting to set a breakpoint on it, then we can
779    totally skip the nullified insn.  This avoids creating and
780    executing a TB that merely branches to the next TB.  */
781 static bool use_nullify_skip(DisasContext *ctx)
782 {
783     return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
784             && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
785 }
786 
787 static void gen_goto_tb(DisasContext *ctx, int which,
788                         target_ureg f, target_ureg b)
789 {
790     if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
791         tcg_gen_goto_tb(which);
792         tcg_gen_movi_reg(cpu_iaoq_f, f);
793         tcg_gen_movi_reg(cpu_iaoq_b, b);
794         tcg_gen_exit_tb((uintptr_t)ctx->base.tb + which);
795     } else {
796         copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b);
797         copy_iaoq_entry(cpu_iaoq_b, b, ctx->iaoq_n_var);
798         if (ctx->base.singlestep_enabled) {
799             gen_excp_1(EXCP_DEBUG);
800         } else {
801             tcg_gen_lookup_and_goto_ptr();
802         }
803     }
804 }
805 
806 /* PA has a habit of taking the LSB of a field and using that as the sign,
807    with the rest of the field becoming the least significant bits.  */
808 static target_sreg low_sextract(uint32_t val, int pos, int len)
809 {
810     target_ureg x = -(target_ureg)extract32(val, pos, 1);
811     x = (x << (len - 1)) | extract32(val, pos + 1, len - 1);
812     return x;
813 }
814 
815 static unsigned assemble_rt64(uint32_t insn)
816 {
817     unsigned r1 = extract32(insn, 6, 1);
818     unsigned r0 = extract32(insn, 0, 5);
819     return r1 * 32 + r0;
820 }
821 
822 static unsigned assemble_ra64(uint32_t insn)
823 {
824     unsigned r1 = extract32(insn, 7, 1);
825     unsigned r0 = extract32(insn, 21, 5);
826     return r1 * 32 + r0;
827 }
828 
829 static unsigned assemble_rb64(uint32_t insn)
830 {
831     unsigned r1 = extract32(insn, 12, 1);
832     unsigned r0 = extract32(insn, 16, 5);
833     return r1 * 32 + r0;
834 }
835 
836 static unsigned assemble_rc64(uint32_t insn)
837 {
838     unsigned r2 = extract32(insn, 8, 1);
839     unsigned r1 = extract32(insn, 13, 3);
840     unsigned r0 = extract32(insn, 9, 2);
841     return r2 * 32 + r1 * 4 + r0;
842 }
843 
844 static unsigned assemble_sr3(uint32_t insn)
845 {
846     unsigned s2 = extract32(insn, 13, 1);
847     unsigned s0 = extract32(insn, 14, 2);
848     return s2 * 4 + s0;
849 }
850 
851 static target_sreg assemble_12(uint32_t insn)
852 {
853     target_ureg x = -(target_ureg)(insn & 1);
854     x = (x <<  1) | extract32(insn, 2, 1);
855     x = (x << 10) | extract32(insn, 3, 10);
856     return x;
857 }
858 
859 static target_sreg assemble_16(uint32_t insn)
860 {
861     /* Take the name from PA2.0, which produces a 16-bit number
862        only with wide mode; otherwise a 14-bit number.  Since we don't
863        implement wide mode, this is always the 14-bit number.  */
864     return low_sextract(insn, 0, 14);
865 }
866 
867 static target_sreg assemble_16a(uint32_t insn)
868 {
869     /* Take the name from PA2.0, which produces a 14-bit shifted number
870        only with wide mode; otherwise a 12-bit shifted number.  Since we
871        don't implement wide mode, this is always the 12-bit number.  */
872     target_ureg x = -(target_ureg)(insn & 1);
873     x = (x << 11) | extract32(insn, 2, 11);
874     return x << 2;
875 }
876 
877 static target_sreg assemble_17(uint32_t insn)
878 {
879     target_ureg x = -(target_ureg)(insn & 1);
880     x = (x <<  5) | extract32(insn, 16, 5);
881     x = (x <<  1) | extract32(insn, 2, 1);
882     x = (x << 10) | extract32(insn, 3, 10);
883     return x << 2;
884 }
885 
886 static target_sreg assemble_21(uint32_t insn)
887 {
888     target_ureg x = -(target_ureg)(insn & 1);
889     x = (x << 11) | extract32(insn, 1, 11);
890     x = (x <<  2) | extract32(insn, 14, 2);
891     x = (x <<  5) | extract32(insn, 16, 5);
892     x = (x <<  2) | extract32(insn, 12, 2);
893     return x << 11;
894 }
895 
896 static target_sreg assemble_22(uint32_t insn)
897 {
898     target_ureg x = -(target_ureg)(insn & 1);
899     x = (x << 10) | extract32(insn, 16, 10);
900     x = (x <<  1) | extract32(insn, 2, 1);
901     x = (x << 10) | extract32(insn, 3, 10);
902     return x << 2;
903 }
904 
905 /* The parisc documentation describes only the general interpretation of
906    the conditions, without describing their exact implementation.  The
907    interpretations do not stand up well when considering ADD,C and SUB,B.
908    However, considering the Addition, Subtraction and Logical conditions
909    as a whole it would appear that these relations are similar to what
910    a traditional NZCV set of flags would produce.  */
911 
912 static DisasCond do_cond(unsigned cf, TCGv_reg res,
913                          TCGv_reg cb_msb, TCGv_reg sv)
914 {
915     DisasCond cond;
916     TCGv_reg tmp;
917 
918     switch (cf >> 1) {
919     case 0: /* Never / TR */
920         cond = cond_make_f();
921         break;
922     case 1: /* = / <>        (Z / !Z) */
923         cond = cond_make_0(TCG_COND_EQ, res);
924         break;
925     case 2: /* < / >=        (N / !N) */
926         cond = cond_make_0(TCG_COND_LT, res);
927         break;
928     case 3: /* <= / >        (N | Z / !N & !Z) */
929         cond = cond_make_0(TCG_COND_LE, res);
930         break;
931     case 4: /* NUV / UV      (!C / C) */
932         cond = cond_make_0(TCG_COND_EQ, cb_msb);
933         break;
934     case 5: /* ZNV / VNZ     (!C | Z / C & !Z) */
935         tmp = tcg_temp_new();
936         tcg_gen_neg_reg(tmp, cb_msb);
937         tcg_gen_and_reg(tmp, tmp, res);
938         cond = cond_make_0(TCG_COND_EQ, tmp);
939         tcg_temp_free(tmp);
940         break;
941     case 6: /* SV / NSV      (V / !V) */
942         cond = cond_make_0(TCG_COND_LT, sv);
943         break;
944     case 7: /* OD / EV */
945         tmp = tcg_temp_new();
946         tcg_gen_andi_reg(tmp, res, 1);
947         cond = cond_make_0(TCG_COND_NE, tmp);
948         tcg_temp_free(tmp);
949         break;
950     default:
951         g_assert_not_reached();
952     }
953     if (cf & 1) {
954         cond.c = tcg_invert_cond(cond.c);
955     }
956 
957     return cond;
958 }
959 
960 /* Similar, but for the special case of subtraction without borrow, we
961    can use the inputs directly.  This can allow other computation to be
962    deleted as unused.  */
963 
964 static DisasCond do_sub_cond(unsigned cf, TCGv_reg res,
965                              TCGv_reg in1, TCGv_reg in2, TCGv_reg sv)
966 {
967     DisasCond cond;
968 
969     switch (cf >> 1) {
970     case 1: /* = / <> */
971         cond = cond_make(TCG_COND_EQ, in1, in2);
972         break;
973     case 2: /* < / >= */
974         cond = cond_make(TCG_COND_LT, in1, in2);
975         break;
976     case 3: /* <= / > */
977         cond = cond_make(TCG_COND_LE, in1, in2);
978         break;
979     case 4: /* << / >>= */
980         cond = cond_make(TCG_COND_LTU, in1, in2);
981         break;
982     case 5: /* <<= / >> */
983         cond = cond_make(TCG_COND_LEU, in1, in2);
984         break;
985     default:
986         return do_cond(cf, res, sv, sv);
987     }
988     if (cf & 1) {
989         cond.c = tcg_invert_cond(cond.c);
990     }
991 
992     return cond;
993 }
994 
995 /* Similar, but for logicals, where the carry and overflow bits are not
996    computed, and use of them is undefined.  */
997 
998 static DisasCond do_log_cond(unsigned cf, TCGv_reg res)
999 {
1000     switch (cf >> 1) {
1001     case 4: case 5: case 6:
1002         cf &= 1;
1003         break;
1004     }
1005     return do_cond(cf, res, res, res);
1006 }
1007 
1008 /* Similar, but for shift/extract/deposit conditions.  */
1009 
1010 static DisasCond do_sed_cond(unsigned orig, TCGv_reg res)
1011 {
1012     unsigned c, f;
1013 
1014     /* Convert the compressed condition codes to standard.
1015        0-2 are the same as logicals (nv,<,<=), while 3 is OD.
1016        4-7 are the reverse of 0-3.  */
1017     c = orig & 3;
1018     if (c == 3) {
1019         c = 7;
1020     }
1021     f = (orig & 4) / 4;
1022 
1023     return do_log_cond(c * 2 + f, res);
1024 }
1025 
1026 /* Similar, but for unit conditions.  */
1027 
1028 static DisasCond do_unit_cond(unsigned cf, TCGv_reg res,
1029                               TCGv_reg in1, TCGv_reg in2)
1030 {
1031     DisasCond cond;
1032     TCGv_reg tmp, cb = NULL;
1033 
1034     if (cf & 8) {
1035         /* Since we want to test lots of carry-out bits all at once, do not
1036          * do our normal thing and compute carry-in of bit B+1 since that
1037          * leaves us with carry bits spread across two words.
1038          */
1039         cb = tcg_temp_new();
1040         tmp = tcg_temp_new();
1041         tcg_gen_or_reg(cb, in1, in2);
1042         tcg_gen_and_reg(tmp, in1, in2);
1043         tcg_gen_andc_reg(cb, cb, res);
1044         tcg_gen_or_reg(cb, cb, tmp);
1045         tcg_temp_free(tmp);
1046     }
1047 
1048     switch (cf >> 1) {
1049     case 0: /* never / TR */
1050     case 1: /* undefined */
1051     case 5: /* undefined */
1052         cond = cond_make_f();
1053         break;
1054 
1055     case 2: /* SBZ / NBZ */
1056         /* See hasless(v,1) from
1057          * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
1058          */
1059         tmp = tcg_temp_new();
1060         tcg_gen_subi_reg(tmp, res, 0x01010101u);
1061         tcg_gen_andc_reg(tmp, tmp, res);
1062         tcg_gen_andi_reg(tmp, tmp, 0x80808080u);
1063         cond = cond_make_0(TCG_COND_NE, tmp);
1064         tcg_temp_free(tmp);
1065         break;
1066 
1067     case 3: /* SHZ / NHZ */
1068         tmp = tcg_temp_new();
1069         tcg_gen_subi_reg(tmp, res, 0x00010001u);
1070         tcg_gen_andc_reg(tmp, tmp, res);
1071         tcg_gen_andi_reg(tmp, tmp, 0x80008000u);
1072         cond = cond_make_0(TCG_COND_NE, tmp);
1073         tcg_temp_free(tmp);
1074         break;
1075 
1076     case 4: /* SDC / NDC */
1077         tcg_gen_andi_reg(cb, cb, 0x88888888u);
1078         cond = cond_make_0(TCG_COND_NE, cb);
1079         break;
1080 
1081     case 6: /* SBC / NBC */
1082         tcg_gen_andi_reg(cb, cb, 0x80808080u);
1083         cond = cond_make_0(TCG_COND_NE, cb);
1084         break;
1085 
1086     case 7: /* SHC / NHC */
1087         tcg_gen_andi_reg(cb, cb, 0x80008000u);
1088         cond = cond_make_0(TCG_COND_NE, cb);
1089         break;
1090 
1091     default:
1092         g_assert_not_reached();
1093     }
1094     if (cf & 8) {
1095         tcg_temp_free(cb);
1096     }
1097     if (cf & 1) {
1098         cond.c = tcg_invert_cond(cond.c);
1099     }
1100 
1101     return cond;
1102 }
1103 
1104 /* Compute signed overflow for addition.  */
1105 static TCGv_reg do_add_sv(DisasContext *ctx, TCGv_reg res,
1106                           TCGv_reg in1, TCGv_reg in2)
1107 {
1108     TCGv_reg sv = get_temp(ctx);
1109     TCGv_reg tmp = tcg_temp_new();
1110 
1111     tcg_gen_xor_reg(sv, res, in1);
1112     tcg_gen_xor_reg(tmp, in1, in2);
1113     tcg_gen_andc_reg(sv, sv, tmp);
1114     tcg_temp_free(tmp);
1115 
1116     return sv;
1117 }
1118 
1119 /* Compute signed overflow for subtraction.  */
1120 static TCGv_reg do_sub_sv(DisasContext *ctx, TCGv_reg res,
1121                           TCGv_reg in1, TCGv_reg in2)
1122 {
1123     TCGv_reg sv = get_temp(ctx);
1124     TCGv_reg tmp = tcg_temp_new();
1125 
1126     tcg_gen_xor_reg(sv, res, in1);
1127     tcg_gen_xor_reg(tmp, in1, in2);
1128     tcg_gen_and_reg(sv, sv, tmp);
1129     tcg_temp_free(tmp);
1130 
1131     return sv;
1132 }
1133 
1134 static DisasJumpType do_add(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1135                             TCGv_reg in2, unsigned shift, bool is_l,
1136                             bool is_tsv, bool is_tc, bool is_c, unsigned cf)
1137 {
1138     TCGv_reg dest, cb, cb_msb, sv, tmp;
1139     unsigned c = cf >> 1;
1140     DisasCond cond;
1141 
1142     dest = tcg_temp_new();
1143     cb = NULL;
1144     cb_msb = NULL;
1145 
1146     if (shift) {
1147         tmp = get_temp(ctx);
1148         tcg_gen_shli_reg(tmp, in1, shift);
1149         in1 = tmp;
1150     }
1151 
1152     if (!is_l || c == 4 || c == 5) {
1153         TCGv_reg zero = tcg_const_reg(0);
1154         cb_msb = get_temp(ctx);
1155         tcg_gen_add2_reg(dest, cb_msb, in1, zero, in2, zero);
1156         if (is_c) {
1157             tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cpu_psw_cb_msb, zero);
1158         }
1159         tcg_temp_free(zero);
1160         if (!is_l) {
1161             cb = get_temp(ctx);
1162             tcg_gen_xor_reg(cb, in1, in2);
1163             tcg_gen_xor_reg(cb, cb, dest);
1164         }
1165     } else {
1166         tcg_gen_add_reg(dest, in1, in2);
1167         if (is_c) {
1168             tcg_gen_add_reg(dest, dest, cpu_psw_cb_msb);
1169         }
1170     }
1171 
1172     /* Compute signed overflow if required.  */
1173     sv = NULL;
1174     if (is_tsv || c == 6) {
1175         sv = do_add_sv(ctx, dest, in1, in2);
1176         if (is_tsv) {
1177             /* ??? Need to include overflow from shift.  */
1178             gen_helper_tsv(cpu_env, sv);
1179         }
1180     }
1181 
1182     /* Emit any conditional trap before any writeback.  */
1183     cond = do_cond(cf, dest, cb_msb, sv);
1184     if (is_tc) {
1185         cond_prep(&cond);
1186         tmp = tcg_temp_new();
1187         tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1188         gen_helper_tcond(cpu_env, tmp);
1189         tcg_temp_free(tmp);
1190     }
1191 
1192     /* Write back the result.  */
1193     if (!is_l) {
1194         save_or_nullify(ctx, cpu_psw_cb, cb);
1195         save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1196     }
1197     save_gpr(ctx, rt, dest);
1198     tcg_temp_free(dest);
1199 
1200     /* Install the new nullification.  */
1201     cond_free(&ctx->null_cond);
1202     ctx->null_cond = cond;
1203     return DISAS_NEXT;
1204 }
1205 
1206 static DisasJumpType do_sub(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1207                             TCGv_reg in2, bool is_tsv, bool is_b,
1208                             bool is_tc, unsigned cf)
1209 {
1210     TCGv_reg dest, sv, cb, cb_msb, zero, tmp;
1211     unsigned c = cf >> 1;
1212     DisasCond cond;
1213 
1214     dest = tcg_temp_new();
1215     cb = tcg_temp_new();
1216     cb_msb = tcg_temp_new();
1217 
1218     zero = tcg_const_reg(0);
1219     if (is_b) {
1220         /* DEST,C = IN1 + ~IN2 + C.  */
1221         tcg_gen_not_reg(cb, in2);
1222         tcg_gen_add2_reg(dest, cb_msb, in1, zero, cpu_psw_cb_msb, zero);
1223         tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cb, zero);
1224         tcg_gen_xor_reg(cb, cb, in1);
1225         tcg_gen_xor_reg(cb, cb, dest);
1226     } else {
1227         /* DEST,C = IN1 + ~IN2 + 1.  We can produce the same result in fewer
1228            operations by seeding the high word with 1 and subtracting.  */
1229         tcg_gen_movi_reg(cb_msb, 1);
1230         tcg_gen_sub2_reg(dest, cb_msb, in1, cb_msb, in2, zero);
1231         tcg_gen_eqv_reg(cb, in1, in2);
1232         tcg_gen_xor_reg(cb, cb, dest);
1233     }
1234     tcg_temp_free(zero);
1235 
1236     /* Compute signed overflow if required.  */
1237     sv = NULL;
1238     if (is_tsv || c == 6) {
1239         sv = do_sub_sv(ctx, dest, in1, in2);
1240         if (is_tsv) {
1241             gen_helper_tsv(cpu_env, sv);
1242         }
1243     }
1244 
1245     /* Compute the condition.  We cannot use the special case for borrow.  */
1246     if (!is_b) {
1247         cond = do_sub_cond(cf, dest, in1, in2, sv);
1248     } else {
1249         cond = do_cond(cf, dest, cb_msb, sv);
1250     }
1251 
1252     /* Emit any conditional trap before any writeback.  */
1253     if (is_tc) {
1254         cond_prep(&cond);
1255         tmp = tcg_temp_new();
1256         tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1257         gen_helper_tcond(cpu_env, tmp);
1258         tcg_temp_free(tmp);
1259     }
1260 
1261     /* Write back the result.  */
1262     save_or_nullify(ctx, cpu_psw_cb, cb);
1263     save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1264     save_gpr(ctx, rt, dest);
1265     tcg_temp_free(dest);
1266 
1267     /* Install the new nullification.  */
1268     cond_free(&ctx->null_cond);
1269     ctx->null_cond = cond;
1270     return DISAS_NEXT;
1271 }
1272 
1273 static DisasJumpType do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1274                                TCGv_reg in2, unsigned cf)
1275 {
1276     TCGv_reg dest, sv;
1277     DisasCond cond;
1278 
1279     dest = tcg_temp_new();
1280     tcg_gen_sub_reg(dest, in1, in2);
1281 
1282     /* Compute signed overflow if required.  */
1283     sv = NULL;
1284     if ((cf >> 1) == 6) {
1285         sv = do_sub_sv(ctx, dest, in1, in2);
1286     }
1287 
1288     /* Form the condition for the compare.  */
1289     cond = do_sub_cond(cf, dest, in1, in2, sv);
1290 
1291     /* Clear.  */
1292     tcg_gen_movi_reg(dest, 0);
1293     save_gpr(ctx, rt, dest);
1294     tcg_temp_free(dest);
1295 
1296     /* Install the new nullification.  */
1297     cond_free(&ctx->null_cond);
1298     ctx->null_cond = cond;
1299     return DISAS_NEXT;
1300 }
1301 
1302 static DisasJumpType do_log(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1303                             TCGv_reg in2, unsigned cf,
1304                             void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1305 {
1306     TCGv_reg dest = dest_gpr(ctx, rt);
1307 
1308     /* Perform the operation, and writeback.  */
1309     fn(dest, in1, in2);
1310     save_gpr(ctx, rt, dest);
1311 
1312     /* Install the new nullification.  */
1313     cond_free(&ctx->null_cond);
1314     if (cf) {
1315         ctx->null_cond = do_log_cond(cf, dest);
1316     }
1317     return DISAS_NEXT;
1318 }
1319 
1320 static DisasJumpType do_unit(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1321                              TCGv_reg in2, unsigned cf, bool is_tc,
1322                              void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1323 {
1324     TCGv_reg dest;
1325     DisasCond cond;
1326 
1327     if (cf == 0) {
1328         dest = dest_gpr(ctx, rt);
1329         fn(dest, in1, in2);
1330         save_gpr(ctx, rt, dest);
1331         cond_free(&ctx->null_cond);
1332     } else {
1333         dest = tcg_temp_new();
1334         fn(dest, in1, in2);
1335 
1336         cond = do_unit_cond(cf, dest, in1, in2);
1337 
1338         if (is_tc) {
1339             TCGv_reg tmp = tcg_temp_new();
1340             cond_prep(&cond);
1341             tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1342             gen_helper_tcond(cpu_env, tmp);
1343             tcg_temp_free(tmp);
1344         }
1345         save_gpr(ctx, rt, dest);
1346 
1347         cond_free(&ctx->null_cond);
1348         ctx->null_cond = cond;
1349     }
1350     return DISAS_NEXT;
1351 }
1352 
1353 #ifndef CONFIG_USER_ONLY
1354 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1355    from the top 2 bits of the base register.  There are a few system
1356    instructions that have a 3-bit space specifier, for which SR0 is
1357    not special.  To handle this, pass ~SP.  */
1358 static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_reg base)
1359 {
1360     TCGv_ptr ptr;
1361     TCGv_reg tmp;
1362     TCGv_i64 spc;
1363 
1364     if (sp != 0) {
1365         if (sp < 0) {
1366             sp = ~sp;
1367         }
1368         spc = get_temp_tl(ctx);
1369         load_spr(ctx, spc, sp);
1370         return spc;
1371     }
1372     if (ctx->tb_flags & TB_FLAG_SR_SAME) {
1373         return cpu_srH;
1374     }
1375 
1376     ptr = tcg_temp_new_ptr();
1377     tmp = tcg_temp_new();
1378     spc = get_temp_tl(ctx);
1379 
1380     tcg_gen_shri_reg(tmp, base, TARGET_REGISTER_BITS - 5);
1381     tcg_gen_andi_reg(tmp, tmp, 030);
1382     tcg_gen_trunc_reg_ptr(ptr, tmp);
1383     tcg_temp_free(tmp);
1384 
1385     tcg_gen_add_ptr(ptr, ptr, cpu_env);
1386     tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
1387     tcg_temp_free_ptr(ptr);
1388 
1389     return spc;
1390 }
1391 #endif
1392 
1393 static void form_gva(DisasContext *ctx, TCGv_tl *pgva, TCGv_reg *pofs,
1394                      unsigned rb, unsigned rx, int scale, target_sreg disp,
1395                      unsigned sp, int modify, bool is_phys)
1396 {
1397     TCGv_reg base = load_gpr(ctx, rb);
1398     TCGv_reg ofs;
1399 
1400     /* Note that RX is mutually exclusive with DISP.  */
1401     if (rx) {
1402         ofs = get_temp(ctx);
1403         tcg_gen_shli_reg(ofs, cpu_gr[rx], scale);
1404         tcg_gen_add_reg(ofs, ofs, base);
1405     } else if (disp || modify) {
1406         ofs = get_temp(ctx);
1407         tcg_gen_addi_reg(ofs, base, disp);
1408     } else {
1409         ofs = base;
1410     }
1411 
1412     *pofs = ofs;
1413 #ifdef CONFIG_USER_ONLY
1414     *pgva = (modify <= 0 ? ofs : base);
1415 #else
1416     TCGv_tl addr = get_temp_tl(ctx);
1417     tcg_gen_extu_reg_tl(addr, modify <= 0 ? ofs : base);
1418     if (ctx->tb_flags & PSW_W) {
1419         tcg_gen_andi_tl(addr, addr, 0x3fffffffffffffffull);
1420     }
1421     if (!is_phys) {
1422         tcg_gen_or_tl(addr, addr, space_select(ctx, sp, base));
1423     }
1424     *pgva = addr;
1425 #endif
1426 }
1427 
1428 /* Emit a memory load.  The modify parameter should be
1429  * < 0 for pre-modify,
1430  * > 0 for post-modify,
1431  * = 0 for no base register update.
1432  */
1433 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
1434                        unsigned rx, int scale, target_sreg disp,
1435                        unsigned sp, int modify, TCGMemOp mop)
1436 {
1437     TCGv_reg ofs;
1438     TCGv_tl addr;
1439 
1440     /* Caller uses nullify_over/nullify_end.  */
1441     assert(ctx->null_cond.c == TCG_COND_NEVER);
1442 
1443     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1444              ctx->mmu_idx == MMU_PHYS_IDX);
1445     tcg_gen_qemu_ld_reg(dest, addr, ctx->mmu_idx, mop);
1446     if (modify) {
1447         save_gpr(ctx, rb, ofs);
1448     }
1449 }
1450 
1451 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1452                        unsigned rx, int scale, target_sreg disp,
1453                        unsigned sp, int modify, TCGMemOp mop)
1454 {
1455     TCGv_reg ofs;
1456     TCGv_tl addr;
1457 
1458     /* Caller uses nullify_over/nullify_end.  */
1459     assert(ctx->null_cond.c == TCG_COND_NEVER);
1460 
1461     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1462              ctx->mmu_idx == MMU_PHYS_IDX);
1463     tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop);
1464     if (modify) {
1465         save_gpr(ctx, rb, ofs);
1466     }
1467 }
1468 
1469 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1470                         unsigned rx, int scale, target_sreg disp,
1471                         unsigned sp, int modify, TCGMemOp mop)
1472 {
1473     TCGv_reg ofs;
1474     TCGv_tl addr;
1475 
1476     /* Caller uses nullify_over/nullify_end.  */
1477     assert(ctx->null_cond.c == TCG_COND_NEVER);
1478 
1479     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1480              ctx->mmu_idx == MMU_PHYS_IDX);
1481     tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop);
1482     if (modify) {
1483         save_gpr(ctx, rb, ofs);
1484     }
1485 }
1486 
1487 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1488                         unsigned rx, int scale, target_sreg disp,
1489                         unsigned sp, int modify, TCGMemOp mop)
1490 {
1491     TCGv_reg ofs;
1492     TCGv_tl addr;
1493 
1494     /* Caller uses nullify_over/nullify_end.  */
1495     assert(ctx->null_cond.c == TCG_COND_NEVER);
1496 
1497     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1498              ctx->mmu_idx == MMU_PHYS_IDX);
1499     tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop);
1500     if (modify) {
1501         save_gpr(ctx, rb, ofs);
1502     }
1503 }
1504 
1505 #if TARGET_REGISTER_BITS == 64
1506 #define do_load_reg   do_load_64
1507 #define do_store_reg  do_store_64
1508 #else
1509 #define do_load_reg   do_load_32
1510 #define do_store_reg  do_store_32
1511 #endif
1512 
1513 static DisasJumpType do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1514                              unsigned rx, int scale, target_sreg disp,
1515                              unsigned sp, int modify, TCGMemOp mop)
1516 {
1517     TCGv_reg dest;
1518 
1519     nullify_over(ctx);
1520 
1521     if (modify == 0) {
1522         /* No base register update.  */
1523         dest = dest_gpr(ctx, rt);
1524     } else {
1525         /* Make sure if RT == RB, we see the result of the load.  */
1526         dest = get_temp(ctx);
1527     }
1528     do_load_reg(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
1529     save_gpr(ctx, rt, dest);
1530 
1531     return nullify_end(ctx, DISAS_NEXT);
1532 }
1533 
1534 static DisasJumpType do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1535                                unsigned rx, int scale, target_sreg disp,
1536                                unsigned sp, int modify)
1537 {
1538     TCGv_i32 tmp;
1539 
1540     nullify_over(ctx);
1541 
1542     tmp = tcg_temp_new_i32();
1543     do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1544     save_frw_i32(rt, tmp);
1545     tcg_temp_free_i32(tmp);
1546 
1547     if (rt == 0) {
1548         gen_helper_loaded_fr0(cpu_env);
1549     }
1550 
1551     return nullify_end(ctx, DISAS_NEXT);
1552 }
1553 
1554 static DisasJumpType do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1555                                unsigned rx, int scale, target_sreg disp,
1556                                unsigned sp, int modify)
1557 {
1558     TCGv_i64 tmp;
1559 
1560     nullify_over(ctx);
1561 
1562     tmp = tcg_temp_new_i64();
1563     do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEQ);
1564     save_frd(rt, tmp);
1565     tcg_temp_free_i64(tmp);
1566 
1567     if (rt == 0) {
1568         gen_helper_loaded_fr0(cpu_env);
1569     }
1570 
1571     return nullify_end(ctx, DISAS_NEXT);
1572 }
1573 
1574 static DisasJumpType do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1575                               target_sreg disp, unsigned sp,
1576                               int modify, TCGMemOp mop)
1577 {
1578     nullify_over(ctx);
1579     do_store_reg(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
1580     return nullify_end(ctx, DISAS_NEXT);
1581 }
1582 
1583 static DisasJumpType do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1584                                 unsigned rx, int scale, target_sreg disp,
1585                                 unsigned sp, int modify)
1586 {
1587     TCGv_i32 tmp;
1588 
1589     nullify_over(ctx);
1590 
1591     tmp = load_frw_i32(rt);
1592     do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1593     tcg_temp_free_i32(tmp);
1594 
1595     return nullify_end(ctx, DISAS_NEXT);
1596 }
1597 
1598 static DisasJumpType do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1599                                 unsigned rx, int scale, target_sreg disp,
1600                                 unsigned sp, int modify)
1601 {
1602     TCGv_i64 tmp;
1603 
1604     nullify_over(ctx);
1605 
1606     tmp = load_frd(rt);
1607     do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEQ);
1608     tcg_temp_free_i64(tmp);
1609 
1610     return nullify_end(ctx, DISAS_NEXT);
1611 }
1612 
1613 static DisasJumpType do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
1614                                 void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
1615 {
1616     TCGv_i32 tmp;
1617 
1618     nullify_over(ctx);
1619     tmp = load_frw0_i32(ra);
1620 
1621     func(tmp, cpu_env, tmp);
1622 
1623     save_frw_i32(rt, tmp);
1624     tcg_temp_free_i32(tmp);
1625     return nullify_end(ctx, DISAS_NEXT);
1626 }
1627 
1628 static DisasJumpType do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
1629                                 void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
1630 {
1631     TCGv_i32 dst;
1632     TCGv_i64 src;
1633 
1634     nullify_over(ctx);
1635     src = load_frd(ra);
1636     dst = tcg_temp_new_i32();
1637 
1638     func(dst, cpu_env, src);
1639 
1640     tcg_temp_free_i64(src);
1641     save_frw_i32(rt, dst);
1642     tcg_temp_free_i32(dst);
1643     return nullify_end(ctx, DISAS_NEXT);
1644 }
1645 
1646 static DisasJumpType do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
1647                                 void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
1648 {
1649     TCGv_i64 tmp;
1650 
1651     nullify_over(ctx);
1652     tmp = load_frd0(ra);
1653 
1654     func(tmp, cpu_env, tmp);
1655 
1656     save_frd(rt, tmp);
1657     tcg_temp_free_i64(tmp);
1658     return nullify_end(ctx, DISAS_NEXT);
1659 }
1660 
1661 static DisasJumpType do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
1662                                 void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
1663 {
1664     TCGv_i32 src;
1665     TCGv_i64 dst;
1666 
1667     nullify_over(ctx);
1668     src = load_frw0_i32(ra);
1669     dst = tcg_temp_new_i64();
1670 
1671     func(dst, cpu_env, src);
1672 
1673     tcg_temp_free_i32(src);
1674     save_frd(rt, dst);
1675     tcg_temp_free_i64(dst);
1676     return nullify_end(ctx, DISAS_NEXT);
1677 }
1678 
1679 static DisasJumpType do_fop_weww(DisasContext *ctx, unsigned rt,
1680                                  unsigned ra, unsigned rb,
1681                                  void (*func)(TCGv_i32, TCGv_env,
1682                                               TCGv_i32, TCGv_i32))
1683 {
1684     TCGv_i32 a, b;
1685 
1686     nullify_over(ctx);
1687     a = load_frw0_i32(ra);
1688     b = load_frw0_i32(rb);
1689 
1690     func(a, cpu_env, a, b);
1691 
1692     tcg_temp_free_i32(b);
1693     save_frw_i32(rt, a);
1694     tcg_temp_free_i32(a);
1695     return nullify_end(ctx, DISAS_NEXT);
1696 }
1697 
1698 static DisasJumpType do_fop_dedd(DisasContext *ctx, unsigned rt,
1699                                  unsigned ra, unsigned rb,
1700                                  void (*func)(TCGv_i64, TCGv_env,
1701                                               TCGv_i64, TCGv_i64))
1702 {
1703     TCGv_i64 a, b;
1704 
1705     nullify_over(ctx);
1706     a = load_frd0(ra);
1707     b = load_frd0(rb);
1708 
1709     func(a, cpu_env, a, b);
1710 
1711     tcg_temp_free_i64(b);
1712     save_frd(rt, a);
1713     tcg_temp_free_i64(a);
1714     return nullify_end(ctx, DISAS_NEXT);
1715 }
1716 
1717 /* Emit an unconditional branch to a direct target, which may or may not
1718    have already had nullification handled.  */
1719 static DisasJumpType do_dbranch(DisasContext *ctx, target_ureg dest,
1720                                 unsigned link, bool is_n)
1721 {
1722     if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1723         if (link != 0) {
1724             copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1725         }
1726         ctx->iaoq_n = dest;
1727         if (is_n) {
1728             ctx->null_cond.c = TCG_COND_ALWAYS;
1729         }
1730         return DISAS_NEXT;
1731     } else {
1732         nullify_over(ctx);
1733 
1734         if (link != 0) {
1735             copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1736         }
1737 
1738         if (is_n && use_nullify_skip(ctx)) {
1739             nullify_set(ctx, 0);
1740             gen_goto_tb(ctx, 0, dest, dest + 4);
1741         } else {
1742             nullify_set(ctx, is_n);
1743             gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1744         }
1745 
1746         nullify_end(ctx, DISAS_NEXT);
1747 
1748         nullify_set(ctx, 0);
1749         gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
1750         return DISAS_NORETURN;
1751     }
1752 }
1753 
1754 /* Emit a conditional branch to a direct target.  If the branch itself
1755    is nullified, we should have already used nullify_over.  */
1756 static DisasJumpType do_cbranch(DisasContext *ctx, target_sreg disp, bool is_n,
1757                                 DisasCond *cond)
1758 {
1759     target_ureg dest = iaoq_dest(ctx, disp);
1760     TCGLabel *taken = NULL;
1761     TCGCond c = cond->c;
1762     bool n;
1763 
1764     assert(ctx->null_cond.c == TCG_COND_NEVER);
1765 
1766     /* Handle TRUE and NEVER as direct branches.  */
1767     if (c == TCG_COND_ALWAYS) {
1768         return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
1769     }
1770     if (c == TCG_COND_NEVER) {
1771         return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
1772     }
1773 
1774     taken = gen_new_label();
1775     cond_prep(cond);
1776     tcg_gen_brcond_reg(c, cond->a0, cond->a1, taken);
1777     cond_free(cond);
1778 
1779     /* Not taken: Condition not satisfied; nullify on backward branches. */
1780     n = is_n && disp < 0;
1781     if (n && use_nullify_skip(ctx)) {
1782         nullify_set(ctx, 0);
1783         gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
1784     } else {
1785         if (!n && ctx->null_lab) {
1786             gen_set_label(ctx->null_lab);
1787             ctx->null_lab = NULL;
1788         }
1789         nullify_set(ctx, n);
1790         if (ctx->iaoq_n == -1) {
1791             /* The temporary iaoq_n_var died at the branch above.
1792                Regenerate it here instead of saving it.  */
1793             tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
1794         }
1795         gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
1796     }
1797 
1798     gen_set_label(taken);
1799 
1800     /* Taken: Condition satisfied; nullify on forward branches.  */
1801     n = is_n && disp >= 0;
1802     if (n && use_nullify_skip(ctx)) {
1803         nullify_set(ctx, 0);
1804         gen_goto_tb(ctx, 1, dest, dest + 4);
1805     } else {
1806         nullify_set(ctx, n);
1807         gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
1808     }
1809 
1810     /* Not taken: the branch itself was nullified.  */
1811     if (ctx->null_lab) {
1812         gen_set_label(ctx->null_lab);
1813         ctx->null_lab = NULL;
1814         return DISAS_IAQ_N_STALE;
1815     } else {
1816         return DISAS_NORETURN;
1817     }
1818 }
1819 
1820 /* Emit an unconditional branch to an indirect target.  This handles
1821    nullification of the branch itself.  */
1822 static DisasJumpType do_ibranch(DisasContext *ctx, TCGv_reg dest,
1823                                 unsigned link, bool is_n)
1824 {
1825     TCGv_reg a0, a1, next, tmp;
1826     TCGCond c;
1827 
1828     assert(ctx->null_lab == NULL);
1829 
1830     if (ctx->null_cond.c == TCG_COND_NEVER) {
1831         if (link != 0) {
1832             copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1833         }
1834         next = get_temp(ctx);
1835         tcg_gen_mov_reg(next, dest);
1836         if (is_n) {
1837             if (use_nullify_skip(ctx)) {
1838                 tcg_gen_mov_reg(cpu_iaoq_f, next);
1839                 tcg_gen_addi_reg(cpu_iaoq_b, next, 4);
1840                 nullify_set(ctx, 0);
1841                 return DISAS_IAQ_N_UPDATED;
1842             }
1843             ctx->null_cond.c = TCG_COND_ALWAYS;
1844         }
1845         ctx->iaoq_n = -1;
1846         ctx->iaoq_n_var = next;
1847     } else if (is_n && use_nullify_skip(ctx)) {
1848         /* The (conditional) branch, B, nullifies the next insn, N,
1849            and we're allowed to skip execution N (no single-step or
1850            tracepoint in effect).  Since the goto_ptr that we must use
1851            for the indirect branch consumes no special resources, we
1852            can (conditionally) skip B and continue execution.  */
1853         /* The use_nullify_skip test implies we have a known control path.  */
1854         tcg_debug_assert(ctx->iaoq_b != -1);
1855         tcg_debug_assert(ctx->iaoq_n != -1);
1856 
1857         /* We do have to handle the non-local temporary, DEST, before
1858            branching.  Since IOAQ_F is not really live at this point, we
1859            can simply store DEST optimistically.  Similarly with IAOQ_B.  */
1860         tcg_gen_mov_reg(cpu_iaoq_f, dest);
1861         tcg_gen_addi_reg(cpu_iaoq_b, dest, 4);
1862 
1863         nullify_over(ctx);
1864         if (link != 0) {
1865             tcg_gen_movi_reg(cpu_gr[link], ctx->iaoq_n);
1866         }
1867         tcg_gen_lookup_and_goto_ptr();
1868         return nullify_end(ctx, DISAS_NEXT);
1869     } else {
1870         cond_prep(&ctx->null_cond);
1871         c = ctx->null_cond.c;
1872         a0 = ctx->null_cond.a0;
1873         a1 = ctx->null_cond.a1;
1874 
1875         tmp = tcg_temp_new();
1876         next = get_temp(ctx);
1877 
1878         copy_iaoq_entry(tmp, ctx->iaoq_n, ctx->iaoq_n_var);
1879         tcg_gen_movcond_reg(c, next, a0, a1, tmp, dest);
1880         ctx->iaoq_n = -1;
1881         ctx->iaoq_n_var = next;
1882 
1883         if (link != 0) {
1884             tcg_gen_movcond_reg(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
1885         }
1886 
1887         if (is_n) {
1888             /* The branch nullifies the next insn, which means the state of N
1889                after the branch is the inverse of the state of N that applied
1890                to the branch.  */
1891             tcg_gen_setcond_reg(tcg_invert_cond(c), cpu_psw_n, a0, a1);
1892             cond_free(&ctx->null_cond);
1893             ctx->null_cond = cond_make_n();
1894             ctx->psw_n_nonzero = true;
1895         } else {
1896             cond_free(&ctx->null_cond);
1897         }
1898     }
1899 
1900     return DISAS_NEXT;
1901 }
1902 
1903 /* Implement
1904  *    if (IAOQ_Front{30..31} < GR[b]{30..31})
1905  *      IAOQ_Next{30..31} ← GR[b]{30..31};
1906  *    else
1907  *      IAOQ_Next{30..31} ← IAOQ_Front{30..31};
1908  * which keeps the privilege level from being increased.
1909  */
1910 static TCGv_reg do_ibranch_priv(DisasContext *ctx, TCGv_reg offset)
1911 {
1912     TCGv_reg dest;
1913     switch (ctx->privilege) {
1914     case 0:
1915         /* Privilege 0 is maximum and is allowed to decrease.  */
1916         return offset;
1917     case 3:
1918         /* Privilege 3 is minimum and is never allowed increase.  */
1919         dest = get_temp(ctx);
1920         tcg_gen_ori_reg(dest, offset, 3);
1921         break;
1922     default:
1923         dest = tcg_temp_new();
1924         tcg_gen_andi_reg(dest, offset, -4);
1925         tcg_gen_ori_reg(dest, dest, ctx->privilege);
1926         tcg_gen_movcond_reg(TCG_COND_GTU, dest, dest, offset, dest, offset);
1927         tcg_temp_free(dest);
1928         break;
1929     }
1930     return dest;
1931 }
1932 
1933 #ifdef CONFIG_USER_ONLY
1934 /* On Linux, page zero is normally marked execute only + gateway.
1935    Therefore normal read or write is supposed to fail, but specific
1936    offsets have kernel code mapped to raise permissions to implement
1937    system calls.  Handling this via an explicit check here, rather
1938    in than the "be disp(sr2,r0)" instruction that probably sent us
1939    here, is the easiest way to handle the branch delay slot on the
1940    aforementioned BE.  */
1941 static DisasJumpType do_page_zero(DisasContext *ctx)
1942 {
1943     /* If by some means we get here with PSW[N]=1, that implies that
1944        the B,GATE instruction would be skipped, and we'd fault on the
1945        next insn within the privilaged page.  */
1946     switch (ctx->null_cond.c) {
1947     case TCG_COND_NEVER:
1948         break;
1949     case TCG_COND_ALWAYS:
1950         tcg_gen_movi_reg(cpu_psw_n, 0);
1951         goto do_sigill;
1952     default:
1953         /* Since this is always the first (and only) insn within the
1954            TB, we should know the state of PSW[N] from TB->FLAGS.  */
1955         g_assert_not_reached();
1956     }
1957 
1958     /* Check that we didn't arrive here via some means that allowed
1959        non-sequential instruction execution.  Normally the PSW[B] bit
1960        detects this by disallowing the B,GATE instruction to execute
1961        under such conditions.  */
1962     if (ctx->iaoq_b != ctx->iaoq_f + 4) {
1963         goto do_sigill;
1964     }
1965 
1966     switch (ctx->iaoq_f & -4) {
1967     case 0x00: /* Null pointer call */
1968         gen_excp_1(EXCP_IMP);
1969         return DISAS_NORETURN;
1970 
1971     case 0xb0: /* LWS */
1972         gen_excp_1(EXCP_SYSCALL_LWS);
1973         return DISAS_NORETURN;
1974 
1975     case 0xe0: /* SET_THREAD_POINTER */
1976         tcg_gen_st_reg(cpu_gr[26], cpu_env, offsetof(CPUHPPAState, cr[27]));
1977         tcg_gen_ori_reg(cpu_iaoq_f, cpu_gr[31], 3);
1978         tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
1979         return DISAS_IAQ_N_UPDATED;
1980 
1981     case 0x100: /* SYSCALL */
1982         gen_excp_1(EXCP_SYSCALL);
1983         return DISAS_NORETURN;
1984 
1985     default:
1986     do_sigill:
1987         gen_excp_1(EXCP_ILL);
1988         return DISAS_NORETURN;
1989     }
1990 }
1991 #endif
1992 
1993 static DisasJumpType trans_nop(DisasContext *ctx, uint32_t insn,
1994                                const DisasInsn *di)
1995 {
1996     cond_free(&ctx->null_cond);
1997     return DISAS_NEXT;
1998 }
1999 
2000 static DisasJumpType trans_break(DisasContext *ctx, uint32_t insn,
2001                                  const DisasInsn *di)
2002 {
2003     nullify_over(ctx);
2004     return nullify_end(ctx, gen_excp_iir(ctx, EXCP_BREAK));
2005 }
2006 
2007 static DisasJumpType trans_sync(DisasContext *ctx, uint32_t insn,
2008                                 const DisasInsn *di)
2009 {
2010     /* No point in nullifying the memory barrier.  */
2011     tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
2012 
2013     cond_free(&ctx->null_cond);
2014     return DISAS_NEXT;
2015 }
2016 
2017 static DisasJumpType trans_mfia(DisasContext *ctx, uint32_t insn,
2018                                 const DisasInsn *di)
2019 {
2020     unsigned rt = extract32(insn, 0, 5);
2021     TCGv_reg tmp = dest_gpr(ctx, rt);
2022     tcg_gen_movi_reg(tmp, ctx->iaoq_f);
2023     save_gpr(ctx, rt, tmp);
2024 
2025     cond_free(&ctx->null_cond);
2026     return DISAS_NEXT;
2027 }
2028 
2029 static DisasJumpType trans_mfsp(DisasContext *ctx, uint32_t insn,
2030                                 const DisasInsn *di)
2031 {
2032     unsigned rt = extract32(insn, 0, 5);
2033     unsigned rs = assemble_sr3(insn);
2034     TCGv_i64 t0 = tcg_temp_new_i64();
2035     TCGv_reg t1 = tcg_temp_new();
2036 
2037     load_spr(ctx, t0, rs);
2038     tcg_gen_shri_i64(t0, t0, 32);
2039     tcg_gen_trunc_i64_reg(t1, t0);
2040 
2041     save_gpr(ctx, rt, t1);
2042     tcg_temp_free(t1);
2043     tcg_temp_free_i64(t0);
2044 
2045     cond_free(&ctx->null_cond);
2046     return DISAS_NEXT;
2047 }
2048 
2049 static DisasJumpType trans_mfctl(DisasContext *ctx, uint32_t insn,
2050                                  const DisasInsn *di)
2051 {
2052     unsigned rt = extract32(insn, 0, 5);
2053     unsigned ctl = extract32(insn, 21, 5);
2054     TCGv_reg tmp;
2055     DisasJumpType ret;
2056 
2057     switch (ctl) {
2058     case CR_SAR:
2059 #ifdef TARGET_HPPA64
2060         if (extract32(insn, 14, 1) == 0) {
2061             /* MFSAR without ,W masks low 5 bits.  */
2062             tmp = dest_gpr(ctx, rt);
2063             tcg_gen_andi_reg(tmp, cpu_sar, 31);
2064             save_gpr(ctx, rt, tmp);
2065             goto done;
2066         }
2067 #endif
2068         save_gpr(ctx, rt, cpu_sar);
2069         goto done;
2070     case CR_IT: /* Interval Timer */
2071         /* FIXME: Respect PSW_S bit.  */
2072         nullify_over(ctx);
2073         tmp = dest_gpr(ctx, rt);
2074         if (ctx->base.tb->cflags & CF_USE_ICOUNT) {
2075             gen_io_start();
2076             gen_helper_read_interval_timer(tmp);
2077             gen_io_end();
2078             ret = DISAS_IAQ_N_STALE;
2079         } else {
2080             gen_helper_read_interval_timer(tmp);
2081             ret = DISAS_NEXT;
2082         }
2083         save_gpr(ctx, rt, tmp);
2084         return nullify_end(ctx, ret);
2085     case 26:
2086     case 27:
2087         break;
2088     default:
2089         /* All other control registers are privileged.  */
2090         CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2091         break;
2092     }
2093 
2094     tmp = get_temp(ctx);
2095     tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2096     save_gpr(ctx, rt, tmp);
2097 
2098  done:
2099     cond_free(&ctx->null_cond);
2100     return DISAS_NEXT;
2101 }
2102 
2103 static DisasJumpType trans_mtsp(DisasContext *ctx, uint32_t insn,
2104                                 const DisasInsn *di)
2105 {
2106     unsigned rr = extract32(insn, 16, 5);
2107     unsigned rs = assemble_sr3(insn);
2108     TCGv_i64 t64;
2109 
2110     if (rs >= 5) {
2111         CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2112     }
2113     nullify_over(ctx);
2114 
2115     t64 = tcg_temp_new_i64();
2116     tcg_gen_extu_reg_i64(t64, load_gpr(ctx, rr));
2117     tcg_gen_shli_i64(t64, t64, 32);
2118 
2119     if (rs >= 4) {
2120         tcg_gen_st_i64(t64, cpu_env, offsetof(CPUHPPAState, sr[rs]));
2121         ctx->tb_flags &= ~TB_FLAG_SR_SAME;
2122     } else {
2123         tcg_gen_mov_i64(cpu_sr[rs], t64);
2124     }
2125     tcg_temp_free_i64(t64);
2126 
2127     return nullify_end(ctx, DISAS_NEXT);
2128 }
2129 
2130 static DisasJumpType trans_mtctl(DisasContext *ctx, uint32_t insn,
2131                                  const DisasInsn *di)
2132 {
2133     unsigned rin = extract32(insn, 16, 5);
2134     unsigned ctl = extract32(insn, 21, 5);
2135     TCGv_reg reg = load_gpr(ctx, rin);
2136     TCGv_reg tmp;
2137 
2138     if (ctl == CR_SAR) {
2139         tmp = tcg_temp_new();
2140         tcg_gen_andi_reg(tmp, reg, TARGET_REGISTER_BITS - 1);
2141         save_or_nullify(ctx, cpu_sar, tmp);
2142         tcg_temp_free(tmp);
2143 
2144         cond_free(&ctx->null_cond);
2145         return DISAS_NEXT;
2146     }
2147 
2148     /* All other control registers are privileged or read-only.  */
2149     CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2150 
2151 #ifdef CONFIG_USER_ONLY
2152     g_assert_not_reached();
2153 #else
2154     DisasJumpType ret = DISAS_NEXT;
2155 
2156     nullify_over(ctx);
2157     switch (ctl) {
2158     case CR_IT:
2159         gen_helper_write_interval_timer(cpu_env, reg);
2160         break;
2161     case CR_EIRR:
2162         gen_helper_write_eirr(cpu_env, reg);
2163         break;
2164     case CR_EIEM:
2165         gen_helper_write_eiem(cpu_env, reg);
2166         ret = DISAS_IAQ_N_STALE_EXIT;
2167         break;
2168 
2169     case CR_IIASQ:
2170     case CR_IIAOQ:
2171         /* FIXME: Respect PSW_Q bit */
2172         /* The write advances the queue and stores to the back element.  */
2173         tmp = get_temp(ctx);
2174         tcg_gen_ld_reg(tmp, cpu_env,
2175                        offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2176         tcg_gen_st_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2177         tcg_gen_st_reg(reg, cpu_env,
2178                        offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2179         break;
2180 
2181     default:
2182         tcg_gen_st_reg(reg, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2183         break;
2184     }
2185     return nullify_end(ctx, ret);
2186 #endif
2187 }
2188 
2189 static DisasJumpType trans_mtsarcm(DisasContext *ctx, uint32_t insn,
2190                                    const DisasInsn *di)
2191 {
2192     unsigned rin = extract32(insn, 16, 5);
2193     TCGv_reg tmp = tcg_temp_new();
2194 
2195     tcg_gen_not_reg(tmp, load_gpr(ctx, rin));
2196     tcg_gen_andi_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
2197     save_or_nullify(ctx, cpu_sar, tmp);
2198     tcg_temp_free(tmp);
2199 
2200     cond_free(&ctx->null_cond);
2201     return DISAS_NEXT;
2202 }
2203 
2204 static DisasJumpType trans_ldsid(DisasContext *ctx, uint32_t insn,
2205                                  const DisasInsn *di)
2206 {
2207     unsigned rt = extract32(insn, 0, 5);
2208     TCGv_reg dest = dest_gpr(ctx, rt);
2209 
2210 #ifdef CONFIG_USER_ONLY
2211     /* We don't implement space registers in user mode. */
2212     tcg_gen_movi_reg(dest, 0);
2213 #else
2214     unsigned rb = extract32(insn, 21, 5);
2215     unsigned sp = extract32(insn, 14, 2);
2216     TCGv_i64 t0 = tcg_temp_new_i64();
2217 
2218     tcg_gen_mov_i64(t0, space_select(ctx, sp, load_gpr(ctx, rb)));
2219     tcg_gen_shri_i64(t0, t0, 32);
2220     tcg_gen_trunc_i64_reg(dest, t0);
2221 
2222     tcg_temp_free_i64(t0);
2223 #endif
2224     save_gpr(ctx, rt, dest);
2225 
2226     cond_free(&ctx->null_cond);
2227     return DISAS_NEXT;
2228 }
2229 
2230 #ifndef CONFIG_USER_ONLY
2231 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently.  */
2232 static target_ureg extract_sm_imm(uint32_t insn)
2233 {
2234     target_ureg val = extract32(insn, 16, 10);
2235 
2236     if (val & PSW_SM_E) {
2237         val = (val & ~PSW_SM_E) | PSW_E;
2238     }
2239     if (val & PSW_SM_W) {
2240         val = (val & ~PSW_SM_W) | PSW_W;
2241     }
2242     return val;
2243 }
2244 
2245 static DisasJumpType trans_rsm(DisasContext *ctx, uint32_t insn,
2246                                const DisasInsn *di)
2247 {
2248     unsigned rt = extract32(insn, 0, 5);
2249     target_ureg sm = extract_sm_imm(insn);
2250     TCGv_reg tmp;
2251 
2252     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2253     nullify_over(ctx);
2254 
2255     tmp = get_temp(ctx);
2256     tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw));
2257     tcg_gen_andi_reg(tmp, tmp, ~sm);
2258     gen_helper_swap_system_mask(tmp, cpu_env, tmp);
2259     save_gpr(ctx, rt, tmp);
2260 
2261     /* Exit the TB to recognize new interrupts, e.g. PSW_M.  */
2262     return nullify_end(ctx, DISAS_IAQ_N_STALE_EXIT);
2263 }
2264 
2265 static DisasJumpType trans_ssm(DisasContext *ctx, uint32_t insn,
2266                                const DisasInsn *di)
2267 {
2268     unsigned rt = extract32(insn, 0, 5);
2269     target_ureg sm = extract_sm_imm(insn);
2270     TCGv_reg tmp;
2271 
2272     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2273     nullify_over(ctx);
2274 
2275     tmp = get_temp(ctx);
2276     tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw));
2277     tcg_gen_ori_reg(tmp, tmp, sm);
2278     gen_helper_swap_system_mask(tmp, cpu_env, tmp);
2279     save_gpr(ctx, rt, tmp);
2280 
2281     /* Exit the TB to recognize new interrupts, e.g. PSW_I.  */
2282     return nullify_end(ctx, DISAS_IAQ_N_STALE_EXIT);
2283 }
2284 
2285 static DisasJumpType trans_mtsm(DisasContext *ctx, uint32_t insn,
2286                                 const DisasInsn *di)
2287 {
2288     unsigned rr = extract32(insn, 16, 5);
2289     TCGv_reg tmp, reg;
2290 
2291     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2292     nullify_over(ctx);
2293 
2294     reg = load_gpr(ctx, rr);
2295     tmp = get_temp(ctx);
2296     gen_helper_swap_system_mask(tmp, cpu_env, reg);
2297 
2298     /* Exit the TB to recognize new interrupts.  */
2299     return nullify_end(ctx, DISAS_IAQ_N_STALE_EXIT);
2300 }
2301 
2302 static DisasJumpType trans_rfi(DisasContext *ctx, uint32_t insn,
2303                                const DisasInsn *di)
2304 {
2305     unsigned comp = extract32(insn, 5, 4);
2306 
2307     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2308     nullify_over(ctx);
2309 
2310     if (comp == 5) {
2311         gen_helper_rfi_r(cpu_env);
2312     } else {
2313         gen_helper_rfi(cpu_env);
2314     }
2315     if (ctx->base.singlestep_enabled) {
2316         gen_excp_1(EXCP_DEBUG);
2317     } else {
2318         tcg_gen_exit_tb(0);
2319     }
2320 
2321     /* Exit the TB to recognize new interrupts.  */
2322     return nullify_end(ctx, DISAS_NORETURN);
2323 }
2324 
2325 static DisasJumpType gen_hlt(DisasContext *ctx, int reset)
2326 {
2327     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2328     nullify_over(ctx);
2329     if (reset) {
2330         gen_helper_reset(cpu_env);
2331     } else {
2332         gen_helper_halt(cpu_env);
2333     }
2334     return nullify_end(ctx, DISAS_NORETURN);
2335 }
2336 #endif /* !CONFIG_USER_ONLY */
2337 
2338 static const DisasInsn table_system[] = {
2339     { 0x00000000u, 0xfc001fe0u, trans_break },
2340     { 0x00001820u, 0xffe01fffu, trans_mtsp },
2341     { 0x00001840u, 0xfc00ffffu, trans_mtctl },
2342     { 0x016018c0u, 0xffe0ffffu, trans_mtsarcm },
2343     { 0x000014a0u, 0xffffffe0u, trans_mfia },
2344     { 0x000004a0u, 0xffff1fe0u, trans_mfsp },
2345     { 0x000008a0u, 0xfc1fbfe0u, trans_mfctl },
2346     { 0x00000400u, 0xffffffffu, trans_sync },  /* sync */
2347     { 0x00100400u, 0xffffffffu, trans_sync },  /* syncdma */
2348     { 0x000010a0u, 0xfc1f3fe0u, trans_ldsid },
2349 #ifndef CONFIG_USER_ONLY
2350     { 0x00000e60u, 0xfc00ffe0u, trans_rsm },
2351     { 0x00000d60u, 0xfc00ffe0u, trans_ssm },
2352     { 0x00001860u, 0xffe0ffffu, trans_mtsm },
2353     { 0x00000c00u, 0xfffffe1fu, trans_rfi },
2354 #endif
2355 };
2356 
2357 static DisasJumpType trans_base_idx_mod(DisasContext *ctx, uint32_t insn,
2358                                         const DisasInsn *di)
2359 {
2360     unsigned rb = extract32(insn, 21, 5);
2361     unsigned rx = extract32(insn, 16, 5);
2362     TCGv_reg dest = dest_gpr(ctx, rb);
2363     TCGv_reg src1 = load_gpr(ctx, rb);
2364     TCGv_reg src2 = load_gpr(ctx, rx);
2365 
2366     /* The only thing we need to do is the base register modification.  */
2367     tcg_gen_add_reg(dest, src1, src2);
2368     save_gpr(ctx, rb, dest);
2369 
2370     cond_free(&ctx->null_cond);
2371     return DISAS_NEXT;
2372 }
2373 
2374 static DisasJumpType trans_probe(DisasContext *ctx, uint32_t insn,
2375                                  const DisasInsn *di)
2376 {
2377     unsigned rt = extract32(insn, 0, 5);
2378     unsigned sp = extract32(insn, 14, 2);
2379     unsigned rr = extract32(insn, 16, 5);
2380     unsigned rb = extract32(insn, 21, 5);
2381     unsigned is_write = extract32(insn, 6, 1);
2382     unsigned is_imm = extract32(insn, 13, 1);
2383     TCGv_reg dest, ofs;
2384     TCGv_i32 level, want;
2385     TCGv_tl addr;
2386 
2387     nullify_over(ctx);
2388 
2389     dest = dest_gpr(ctx, rt);
2390     form_gva(ctx, &addr, &ofs, rb, 0, 0, 0, sp, 0, false);
2391 
2392     if (is_imm) {
2393         level = tcg_const_i32(extract32(insn, 16, 2));
2394     } else {
2395         level = tcg_temp_new_i32();
2396         tcg_gen_trunc_reg_i32(level, load_gpr(ctx, rr));
2397         tcg_gen_andi_i32(level, level, 3);
2398     }
2399     want = tcg_const_i32(is_write ? PAGE_WRITE : PAGE_READ);
2400 
2401     gen_helper_probe(dest, cpu_env, addr, level, want);
2402 
2403     tcg_temp_free_i32(want);
2404     tcg_temp_free_i32(level);
2405 
2406     save_gpr(ctx, rt, dest);
2407     return nullify_end(ctx, DISAS_NEXT);
2408 }
2409 
2410 #ifndef CONFIG_USER_ONLY
2411 static DisasJumpType trans_ixtlbx(DisasContext *ctx, uint32_t insn,
2412                                   const DisasInsn *di)
2413 {
2414     unsigned sp;
2415     unsigned rr = extract32(insn, 16, 5);
2416     unsigned rb = extract32(insn, 21, 5);
2417     unsigned is_data = insn & 0x1000;
2418     unsigned is_addr = insn & 0x40;
2419     TCGv_tl addr;
2420     TCGv_reg ofs, reg;
2421 
2422     if (is_data) {
2423         sp = extract32(insn, 14, 2);
2424     } else {
2425         sp = ~assemble_sr3(insn);
2426     }
2427 
2428     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2429     nullify_over(ctx);
2430 
2431     form_gva(ctx, &addr, &ofs, rb, 0, 0, 0, sp, 0, false);
2432     reg = load_gpr(ctx, rr);
2433     if (is_addr) {
2434         gen_helper_itlba(cpu_env, addr, reg);
2435     } else {
2436         gen_helper_itlbp(cpu_env, addr, reg);
2437     }
2438 
2439     /* Exit TB for ITLB change if mmu is enabled.  This *should* not be
2440        the case, since the OS TLB fill handler runs with mmu disabled.  */
2441     return nullify_end(ctx, !is_data && (ctx->tb_flags & PSW_C)
2442                        ? DISAS_IAQ_N_STALE : DISAS_NEXT);
2443 }
2444 
2445 static DisasJumpType trans_pxtlbx(DisasContext *ctx, uint32_t insn,
2446                                   const DisasInsn *di)
2447 {
2448     unsigned m = extract32(insn, 5, 1);
2449     unsigned sp;
2450     unsigned rx = extract32(insn, 16, 5);
2451     unsigned rb = extract32(insn, 21, 5);
2452     unsigned is_data = insn & 0x1000;
2453     unsigned is_local = insn & 0x40;
2454     TCGv_tl addr;
2455     TCGv_reg ofs;
2456 
2457     if (is_data) {
2458         sp = extract32(insn, 14, 2);
2459     } else {
2460         sp = ~assemble_sr3(insn);
2461     }
2462 
2463     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2464     nullify_over(ctx);
2465 
2466     form_gva(ctx, &addr, &ofs, rb, rx, 0, 0, sp, m, false);
2467     if (m) {
2468         save_gpr(ctx, rb, ofs);
2469     }
2470     if (is_local) {
2471         gen_helper_ptlbe(cpu_env);
2472     } else {
2473         gen_helper_ptlb(cpu_env, addr);
2474     }
2475 
2476     /* Exit TB for TLB change if mmu is enabled.  */
2477     return nullify_end(ctx, !is_data && (ctx->tb_flags & PSW_C)
2478                        ? DISAS_IAQ_N_STALE : DISAS_NEXT);
2479 }
2480 
2481 static DisasJumpType trans_lpa(DisasContext *ctx, uint32_t insn,
2482                                const DisasInsn *di)
2483 {
2484     unsigned rt = extract32(insn, 0, 5);
2485     unsigned m = extract32(insn, 5, 1);
2486     unsigned sp = extract32(insn, 14, 2);
2487     unsigned rx = extract32(insn, 16, 5);
2488     unsigned rb = extract32(insn, 21, 5);
2489     TCGv_tl vaddr;
2490     TCGv_reg ofs, paddr;
2491 
2492     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2493     nullify_over(ctx);
2494 
2495     form_gva(ctx, &vaddr, &ofs, rb, rx, 0, 0, sp, m, false);
2496 
2497     paddr = tcg_temp_new();
2498     gen_helper_lpa(paddr, cpu_env, vaddr);
2499 
2500     /* Note that physical address result overrides base modification.  */
2501     if (m) {
2502         save_gpr(ctx, rb, ofs);
2503     }
2504     save_gpr(ctx, rt, paddr);
2505     tcg_temp_free(paddr);
2506 
2507     return nullify_end(ctx, DISAS_NEXT);
2508 }
2509 
2510 static DisasJumpType trans_lci(DisasContext *ctx, uint32_t insn,
2511                                const DisasInsn *di)
2512 {
2513     unsigned rt = extract32(insn, 0, 5);
2514     TCGv_reg ci;
2515 
2516     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2517 
2518     /* The Coherence Index is an implementation-defined function of the
2519        physical address.  Two addresses with the same CI have a coherent
2520        view of the cache.  Our implementation is to return 0 for all,
2521        since the entire address space is coherent.  */
2522     ci = tcg_const_reg(0);
2523     save_gpr(ctx, rt, ci);
2524     tcg_temp_free(ci);
2525 
2526     return DISAS_NEXT;
2527 }
2528 #endif /* !CONFIG_USER_ONLY */
2529 
2530 static const DisasInsn table_mem_mgmt[] = {
2531     { 0x04003280u, 0xfc003fffu, trans_nop },          /* fdc, disp */
2532     { 0x04001280u, 0xfc003fffu, trans_nop },          /* fdc, index */
2533     { 0x040012a0u, 0xfc003fffu, trans_base_idx_mod }, /* fdc, index, base mod */
2534     { 0x040012c0u, 0xfc003fffu, trans_nop },          /* fdce */
2535     { 0x040012e0u, 0xfc003fffu, trans_base_idx_mod }, /* fdce, base mod */
2536     { 0x04000280u, 0xfc001fffu, trans_nop },          /* fic 0a */
2537     { 0x040002a0u, 0xfc001fffu, trans_base_idx_mod }, /* fic 0a, base mod */
2538     { 0x040013c0u, 0xfc003fffu, trans_nop },          /* fic 4f */
2539     { 0x040013e0u, 0xfc003fffu, trans_base_idx_mod }, /* fic 4f, base mod */
2540     { 0x040002c0u, 0xfc001fffu, trans_nop },          /* fice */
2541     { 0x040002e0u, 0xfc001fffu, trans_base_idx_mod }, /* fice, base mod */
2542     { 0x04002700u, 0xfc003fffu, trans_nop },          /* pdc */
2543     { 0x04002720u, 0xfc003fffu, trans_base_idx_mod }, /* pdc, base mod */
2544     { 0x04001180u, 0xfc003fa0u, trans_probe },        /* probe */
2545     { 0x04003180u, 0xfc003fa0u, trans_probe },        /* probei */
2546 #ifndef CONFIG_USER_ONLY
2547     { 0x04000000u, 0xfc001fffu, trans_ixtlbx },       /* iitlbp */
2548     { 0x04000040u, 0xfc001fffu, trans_ixtlbx },       /* iitlba */
2549     { 0x04001000u, 0xfc001fffu, trans_ixtlbx },       /* idtlbp */
2550     { 0x04001040u, 0xfc001fffu, trans_ixtlbx },       /* idtlba */
2551     { 0x04000200u, 0xfc001fdfu, trans_pxtlbx },       /* pitlb */
2552     { 0x04000240u, 0xfc001fdfu, trans_pxtlbx },       /* pitlbe */
2553     { 0x04001200u, 0xfc001fdfu, trans_pxtlbx },       /* pdtlb */
2554     { 0x04001240u, 0xfc001fdfu, trans_pxtlbx },       /* pdtlbe */
2555     { 0x04001340u, 0xfc003fc0u, trans_lpa },
2556     { 0x04001300u, 0xfc003fe0u, trans_lci },
2557 #endif
2558 };
2559 
2560 static DisasJumpType trans_add(DisasContext *ctx, uint32_t insn,
2561                                const DisasInsn *di)
2562 {
2563     unsigned r2 = extract32(insn, 21, 5);
2564     unsigned r1 = extract32(insn, 16, 5);
2565     unsigned cf = extract32(insn, 12, 4);
2566     unsigned ext = extract32(insn, 8, 4);
2567     unsigned shift = extract32(insn, 6, 2);
2568     unsigned rt = extract32(insn,  0, 5);
2569     TCGv_reg tcg_r1, tcg_r2;
2570     bool is_c = false;
2571     bool is_l = false;
2572     bool is_tc = false;
2573     bool is_tsv = false;
2574     DisasJumpType ret;
2575 
2576     switch (ext) {
2577     case 0x6: /* ADD, SHLADD */
2578         break;
2579     case 0xa: /* ADD,L, SHLADD,L */
2580         is_l = true;
2581         break;
2582     case 0xe: /* ADD,TSV, SHLADD,TSV (1) */
2583         is_tsv = true;
2584         break;
2585     case 0x7: /* ADD,C */
2586         is_c = true;
2587         break;
2588     case 0xf: /* ADD,C,TSV */
2589         is_c = is_tsv = true;
2590         break;
2591     default:
2592         return gen_illegal(ctx);
2593     }
2594 
2595     if (cf) {
2596         nullify_over(ctx);
2597     }
2598     tcg_r1 = load_gpr(ctx, r1);
2599     tcg_r2 = load_gpr(ctx, r2);
2600     ret = do_add(ctx, rt, tcg_r1, tcg_r2, shift, is_l, is_tsv, is_tc, is_c, cf);
2601     return nullify_end(ctx, ret);
2602 }
2603 
2604 static DisasJumpType trans_sub(DisasContext *ctx, uint32_t insn,
2605                                const DisasInsn *di)
2606 {
2607     unsigned r2 = extract32(insn, 21, 5);
2608     unsigned r1 = extract32(insn, 16, 5);
2609     unsigned cf = extract32(insn, 12, 4);
2610     unsigned ext = extract32(insn, 6, 6);
2611     unsigned rt = extract32(insn,  0, 5);
2612     TCGv_reg tcg_r1, tcg_r2;
2613     bool is_b = false;
2614     bool is_tc = false;
2615     bool is_tsv = false;
2616     DisasJumpType ret;
2617 
2618     switch (ext) {
2619     case 0x10: /* SUB */
2620         break;
2621     case 0x30: /* SUB,TSV */
2622         is_tsv = true;
2623         break;
2624     case 0x14: /* SUB,B */
2625         is_b = true;
2626         break;
2627     case 0x34: /* SUB,B,TSV */
2628         is_b = is_tsv = true;
2629         break;
2630     case 0x13: /* SUB,TC */
2631         is_tc = true;
2632         break;
2633     case 0x33: /* SUB,TSV,TC */
2634         is_tc = is_tsv = true;
2635         break;
2636     default:
2637         return gen_illegal(ctx);
2638     }
2639 
2640     if (cf) {
2641         nullify_over(ctx);
2642     }
2643     tcg_r1 = load_gpr(ctx, r1);
2644     tcg_r2 = load_gpr(ctx, r2);
2645     ret = do_sub(ctx, rt, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, cf);
2646     return nullify_end(ctx, ret);
2647 }
2648 
2649 static DisasJumpType trans_log(DisasContext *ctx, uint32_t insn,
2650                                const DisasInsn *di)
2651 {
2652     unsigned r2 = extract32(insn, 21, 5);
2653     unsigned r1 = extract32(insn, 16, 5);
2654     unsigned cf = extract32(insn, 12, 4);
2655     unsigned rt = extract32(insn,  0, 5);
2656     TCGv_reg tcg_r1, tcg_r2;
2657     DisasJumpType ret;
2658 
2659     if (cf) {
2660         nullify_over(ctx);
2661     }
2662     tcg_r1 = load_gpr(ctx, r1);
2663     tcg_r2 = load_gpr(ctx, r2);
2664     ret = do_log(ctx, rt, tcg_r1, tcg_r2, cf, di->f.ttt);
2665     return nullify_end(ctx, ret);
2666 }
2667 
2668 /* OR r,0,t -> COPY (according to gas) */
2669 static DisasJumpType trans_copy(DisasContext *ctx, uint32_t insn,
2670                                 const DisasInsn *di)
2671 {
2672     unsigned r1 = extract32(insn, 16, 5);
2673     unsigned rt = extract32(insn,  0, 5);
2674 
2675     if (r1 == 0) {
2676         TCGv_reg dest = dest_gpr(ctx, rt);
2677         tcg_gen_movi_reg(dest, 0);
2678         save_gpr(ctx, rt, dest);
2679     } else {
2680         save_gpr(ctx, rt, cpu_gr[r1]);
2681     }
2682     cond_free(&ctx->null_cond);
2683     return DISAS_NEXT;
2684 }
2685 
2686 static DisasJumpType trans_cmpclr(DisasContext *ctx, uint32_t insn,
2687                                   const DisasInsn *di)
2688 {
2689     unsigned r2 = extract32(insn, 21, 5);
2690     unsigned r1 = extract32(insn, 16, 5);
2691     unsigned cf = extract32(insn, 12, 4);
2692     unsigned rt = extract32(insn,  0, 5);
2693     TCGv_reg tcg_r1, tcg_r2;
2694     DisasJumpType ret;
2695 
2696     if (cf) {
2697         nullify_over(ctx);
2698     }
2699     tcg_r1 = load_gpr(ctx, r1);
2700     tcg_r2 = load_gpr(ctx, r2);
2701     ret = do_cmpclr(ctx, rt, tcg_r1, tcg_r2, cf);
2702     return nullify_end(ctx, ret);
2703 }
2704 
2705 static DisasJumpType trans_uxor(DisasContext *ctx, uint32_t insn,
2706                                 const DisasInsn *di)
2707 {
2708     unsigned r2 = extract32(insn, 21, 5);
2709     unsigned r1 = extract32(insn, 16, 5);
2710     unsigned cf = extract32(insn, 12, 4);
2711     unsigned rt = extract32(insn,  0, 5);
2712     TCGv_reg tcg_r1, tcg_r2;
2713     DisasJumpType ret;
2714 
2715     if (cf) {
2716         nullify_over(ctx);
2717     }
2718     tcg_r1 = load_gpr(ctx, r1);
2719     tcg_r2 = load_gpr(ctx, r2);
2720     ret = do_unit(ctx, rt, tcg_r1, tcg_r2, cf, false, tcg_gen_xor_reg);
2721     return nullify_end(ctx, ret);
2722 }
2723 
2724 static DisasJumpType trans_uaddcm(DisasContext *ctx, uint32_t insn,
2725                                   const DisasInsn *di)
2726 {
2727     unsigned r2 = extract32(insn, 21, 5);
2728     unsigned r1 = extract32(insn, 16, 5);
2729     unsigned cf = extract32(insn, 12, 4);
2730     unsigned is_tc = extract32(insn, 6, 1);
2731     unsigned rt = extract32(insn,  0, 5);
2732     TCGv_reg tcg_r1, tcg_r2, tmp;
2733     DisasJumpType ret;
2734 
2735     if (cf) {
2736         nullify_over(ctx);
2737     }
2738     tcg_r1 = load_gpr(ctx, r1);
2739     tcg_r2 = load_gpr(ctx, r2);
2740     tmp = get_temp(ctx);
2741     tcg_gen_not_reg(tmp, tcg_r2);
2742     ret = do_unit(ctx, rt, tcg_r1, tmp, cf, is_tc, tcg_gen_add_reg);
2743     return nullify_end(ctx, ret);
2744 }
2745 
2746 static DisasJumpType trans_dcor(DisasContext *ctx, uint32_t insn,
2747                                 const DisasInsn *di)
2748 {
2749     unsigned r2 = extract32(insn, 21, 5);
2750     unsigned cf = extract32(insn, 12, 4);
2751     unsigned is_i = extract32(insn, 6, 1);
2752     unsigned rt = extract32(insn,  0, 5);
2753     TCGv_reg tmp;
2754     DisasJumpType ret;
2755 
2756     nullify_over(ctx);
2757 
2758     tmp = get_temp(ctx);
2759     tcg_gen_shri_reg(tmp, cpu_psw_cb, 3);
2760     if (!is_i) {
2761         tcg_gen_not_reg(tmp, tmp);
2762     }
2763     tcg_gen_andi_reg(tmp, tmp, 0x11111111);
2764     tcg_gen_muli_reg(tmp, tmp, 6);
2765     ret = do_unit(ctx, rt, tmp, load_gpr(ctx, r2), cf, false,
2766                   is_i ? tcg_gen_add_reg : tcg_gen_sub_reg);
2767 
2768     return nullify_end(ctx, ret);
2769 }
2770 
2771 static DisasJumpType trans_ds(DisasContext *ctx, uint32_t insn,
2772                               const DisasInsn *di)
2773 {
2774     unsigned r2 = extract32(insn, 21, 5);
2775     unsigned r1 = extract32(insn, 16, 5);
2776     unsigned cf = extract32(insn, 12, 4);
2777     unsigned rt = extract32(insn,  0, 5);
2778     TCGv_reg dest, add1, add2, addc, zero, in1, in2;
2779 
2780     nullify_over(ctx);
2781 
2782     in1 = load_gpr(ctx, r1);
2783     in2 = load_gpr(ctx, r2);
2784 
2785     add1 = tcg_temp_new();
2786     add2 = tcg_temp_new();
2787     addc = tcg_temp_new();
2788     dest = tcg_temp_new();
2789     zero = tcg_const_reg(0);
2790 
2791     /* Form R1 << 1 | PSW[CB]{8}.  */
2792     tcg_gen_add_reg(add1, in1, in1);
2793     tcg_gen_add_reg(add1, add1, cpu_psw_cb_msb);
2794 
2795     /* Add or subtract R2, depending on PSW[V].  Proper computation of
2796        carry{8} requires that we subtract via + ~R2 + 1, as described in
2797        the manual.  By extracting and masking V, we can produce the
2798        proper inputs to the addition without movcond.  */
2799     tcg_gen_sari_reg(addc, cpu_psw_v, TARGET_REGISTER_BITS - 1);
2800     tcg_gen_xor_reg(add2, in2, addc);
2801     tcg_gen_andi_reg(addc, addc, 1);
2802     /* ??? This is only correct for 32-bit.  */
2803     tcg_gen_add2_i32(dest, cpu_psw_cb_msb, add1, zero, add2, zero);
2804     tcg_gen_add2_i32(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero);
2805 
2806     tcg_temp_free(addc);
2807     tcg_temp_free(zero);
2808 
2809     /* Write back the result register.  */
2810     save_gpr(ctx, rt, dest);
2811 
2812     /* Write back PSW[CB].  */
2813     tcg_gen_xor_reg(cpu_psw_cb, add1, add2);
2814     tcg_gen_xor_reg(cpu_psw_cb, cpu_psw_cb, dest);
2815 
2816     /* Write back PSW[V] for the division step.  */
2817     tcg_gen_neg_reg(cpu_psw_v, cpu_psw_cb_msb);
2818     tcg_gen_xor_reg(cpu_psw_v, cpu_psw_v, in2);
2819 
2820     /* Install the new nullification.  */
2821     if (cf) {
2822         TCGv_reg sv = NULL;
2823         if (cf >> 1 == 6) {
2824             /* ??? The lshift is supposed to contribute to overflow.  */
2825             sv = do_add_sv(ctx, dest, add1, add2);
2826         }
2827         ctx->null_cond = do_cond(cf, dest, cpu_psw_cb_msb, sv);
2828     }
2829 
2830     tcg_temp_free(add1);
2831     tcg_temp_free(add2);
2832     tcg_temp_free(dest);
2833 
2834     return nullify_end(ctx, DISAS_NEXT);
2835 }
2836 
2837 #ifndef CONFIG_USER_ONLY
2838 /* These are QEMU extensions and are nops in the real architecture:
2839  *
2840  * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2841  * or %r31,%r31,%r31 -- death loop; offline cpu
2842  *                      currently implemented as idle.
2843  */
2844 static DisasJumpType trans_pause(DisasContext *ctx, uint32_t insn,
2845                                  const DisasInsn *di)
2846 {
2847     TCGv_i32 tmp;
2848 
2849     /* No need to check for supervisor, as userland can only pause
2850        until the next timer interrupt.  */
2851     nullify_over(ctx);
2852 
2853     /* Advance the instruction queue.  */
2854     copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
2855     copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
2856     nullify_set(ctx, 0);
2857 
2858     /* Tell the qemu main loop to halt until this cpu has work.  */
2859     tmp = tcg_const_i32(1);
2860     tcg_gen_st_i32(tmp, cpu_env, -offsetof(HPPACPU, env) +
2861                                  offsetof(CPUState, halted));
2862     tcg_temp_free_i32(tmp);
2863     gen_excp_1(EXCP_HALTED);
2864 
2865     return nullify_end(ctx, DISAS_NORETURN);
2866 }
2867 #endif
2868 
2869 static const DisasInsn table_arith_log[] = {
2870     { 0x08000240u, 0xfc00ffffu, trans_nop },  /* or x,y,0 */
2871     { 0x08000240u, 0xffe0ffe0u, trans_copy }, /* or x,0,t */
2872 #ifndef CONFIG_USER_ONLY
2873     { 0x094a024au, 0xffffffffu, trans_pause }, /* or r10,r10,r10 */
2874     { 0x0bff025fu, 0xffffffffu, trans_pause }, /* or r31,r31,r31 */
2875 #endif
2876     { 0x08000000u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_andc_reg },
2877     { 0x08000200u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_and_reg },
2878     { 0x08000240u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_or_reg },
2879     { 0x08000280u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_xor_reg },
2880     { 0x08000880u, 0xfc000fe0u, trans_cmpclr },
2881     { 0x08000380u, 0xfc000fe0u, trans_uxor },
2882     { 0x08000980u, 0xfc000fa0u, trans_uaddcm },
2883     { 0x08000b80u, 0xfc1f0fa0u, trans_dcor },
2884     { 0x08000440u, 0xfc000fe0u, trans_ds },
2885     { 0x08000700u, 0xfc0007e0u, trans_add }, /* add */
2886     { 0x08000400u, 0xfc0006e0u, trans_sub }, /* sub; sub,b; sub,tsv */
2887     { 0x080004c0u, 0xfc0007e0u, trans_sub }, /* sub,tc; sub,tsv,tc */
2888     { 0x08000200u, 0xfc000320u, trans_add }, /* shladd */
2889 };
2890 
2891 static DisasJumpType trans_addi(DisasContext *ctx, uint32_t insn)
2892 {
2893     target_sreg im = low_sextract(insn, 0, 11);
2894     unsigned e1 = extract32(insn, 11, 1);
2895     unsigned cf = extract32(insn, 12, 4);
2896     unsigned rt = extract32(insn, 16, 5);
2897     unsigned r2 = extract32(insn, 21, 5);
2898     unsigned o1 = extract32(insn, 26, 1);
2899     TCGv_reg tcg_im, tcg_r2;
2900     DisasJumpType ret;
2901 
2902     if (cf) {
2903         nullify_over(ctx);
2904     }
2905 
2906     tcg_im = load_const(ctx, im);
2907     tcg_r2 = load_gpr(ctx, r2);
2908     ret = do_add(ctx, rt, tcg_im, tcg_r2, 0, false, e1, !o1, false, cf);
2909 
2910     return nullify_end(ctx, ret);
2911 }
2912 
2913 static DisasJumpType trans_subi(DisasContext *ctx, uint32_t insn)
2914 {
2915     target_sreg im = low_sextract(insn, 0, 11);
2916     unsigned e1 = extract32(insn, 11, 1);
2917     unsigned cf = extract32(insn, 12, 4);
2918     unsigned rt = extract32(insn, 16, 5);
2919     unsigned r2 = extract32(insn, 21, 5);
2920     TCGv_reg tcg_im, tcg_r2;
2921     DisasJumpType ret;
2922 
2923     if (cf) {
2924         nullify_over(ctx);
2925     }
2926 
2927     tcg_im = load_const(ctx, im);
2928     tcg_r2 = load_gpr(ctx, r2);
2929     ret = do_sub(ctx, rt, tcg_im, tcg_r2, e1, false, false, cf);
2930 
2931     return nullify_end(ctx, ret);
2932 }
2933 
2934 static DisasJumpType trans_cmpiclr(DisasContext *ctx, uint32_t insn)
2935 {
2936     target_sreg im = low_sextract(insn, 0, 11);
2937     unsigned cf = extract32(insn, 12, 4);
2938     unsigned rt = extract32(insn, 16, 5);
2939     unsigned r2 = extract32(insn, 21, 5);
2940     TCGv_reg tcg_im, tcg_r2;
2941     DisasJumpType ret;
2942 
2943     if (cf) {
2944         nullify_over(ctx);
2945     }
2946 
2947     tcg_im = load_const(ctx, im);
2948     tcg_r2 = load_gpr(ctx, r2);
2949     ret = do_cmpclr(ctx, rt, tcg_im, tcg_r2, cf);
2950 
2951     return nullify_end(ctx, ret);
2952 }
2953 
2954 static DisasJumpType trans_ld_idx_i(DisasContext *ctx, uint32_t insn,
2955                                     const DisasInsn *di)
2956 {
2957     unsigned rt = extract32(insn, 0, 5);
2958     unsigned m = extract32(insn, 5, 1);
2959     unsigned sz = extract32(insn, 6, 2);
2960     unsigned a = extract32(insn, 13, 1);
2961     unsigned sp = extract32(insn, 14, 2);
2962     int disp = low_sextract(insn, 16, 5);
2963     unsigned rb = extract32(insn, 21, 5);
2964     int modify = (m ? (a ? -1 : 1) : 0);
2965     TCGMemOp mop = MO_TE | sz;
2966 
2967     return do_load(ctx, rt, rb, 0, 0, disp, sp, modify, mop);
2968 }
2969 
2970 static DisasJumpType trans_ld_idx_x(DisasContext *ctx, uint32_t insn,
2971                                     const DisasInsn *di)
2972 {
2973     unsigned rt = extract32(insn, 0, 5);
2974     unsigned m = extract32(insn, 5, 1);
2975     unsigned sz = extract32(insn, 6, 2);
2976     unsigned u = extract32(insn, 13, 1);
2977     unsigned sp = extract32(insn, 14, 2);
2978     unsigned rx = extract32(insn, 16, 5);
2979     unsigned rb = extract32(insn, 21, 5);
2980     TCGMemOp mop = MO_TE | sz;
2981 
2982     return do_load(ctx, rt, rb, rx, u ? sz : 0, 0, sp, m, mop);
2983 }
2984 
2985 static DisasJumpType trans_st_idx_i(DisasContext *ctx, uint32_t insn,
2986                                     const DisasInsn *di)
2987 {
2988     int disp = low_sextract(insn, 0, 5);
2989     unsigned m = extract32(insn, 5, 1);
2990     unsigned sz = extract32(insn, 6, 2);
2991     unsigned a = extract32(insn, 13, 1);
2992     unsigned sp = extract32(insn, 14, 2);
2993     unsigned rr = extract32(insn, 16, 5);
2994     unsigned rb = extract32(insn, 21, 5);
2995     int modify = (m ? (a ? -1 : 1) : 0);
2996     TCGMemOp mop = MO_TE | sz;
2997 
2998     return do_store(ctx, rr, rb, disp, sp, modify, mop);
2999 }
3000 
3001 static DisasJumpType trans_ldcw(DisasContext *ctx, uint32_t insn,
3002                                 const DisasInsn *di)
3003 {
3004     unsigned rt = extract32(insn, 0, 5);
3005     unsigned m = extract32(insn, 5, 1);
3006     unsigned i = extract32(insn, 12, 1);
3007     unsigned au = extract32(insn, 13, 1);
3008     unsigned sp = extract32(insn, 14, 2);
3009     unsigned rx = extract32(insn, 16, 5);
3010     unsigned rb = extract32(insn, 21, 5);
3011     TCGMemOp mop = MO_TEUL | MO_ALIGN_16;
3012     TCGv_reg zero, dest, ofs;
3013     TCGv_tl addr;
3014     int modify, disp = 0, scale = 0;
3015 
3016     nullify_over(ctx);
3017 
3018     if (i) {
3019         modify = (m ? (au ? -1 : 1) : 0);
3020         disp = low_sextract(rx, 0, 5);
3021         rx = 0;
3022     } else {
3023         modify = m;
3024         if (au) {
3025             scale = mop & MO_SIZE;
3026         }
3027     }
3028     if (modify) {
3029         /* Base register modification.  Make sure if RT == RB,
3030            we see the result of the load.  */
3031         dest = get_temp(ctx);
3032     } else {
3033         dest = dest_gpr(ctx, rt);
3034     }
3035 
3036     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
3037              ctx->mmu_idx == MMU_PHYS_IDX);
3038     zero = tcg_const_reg(0);
3039     tcg_gen_atomic_xchg_reg(dest, addr, zero, ctx->mmu_idx, mop);
3040     if (modify) {
3041         save_gpr(ctx, rb, ofs);
3042     }
3043     save_gpr(ctx, rt, dest);
3044 
3045     return nullify_end(ctx, DISAS_NEXT);
3046 }
3047 
3048 static DisasJumpType trans_stby(DisasContext *ctx, uint32_t insn,
3049                                 const DisasInsn *di)
3050 {
3051     target_sreg disp = low_sextract(insn, 0, 5);
3052     unsigned m = extract32(insn, 5, 1);
3053     unsigned a = extract32(insn, 13, 1);
3054     unsigned sp = extract32(insn, 14, 2);
3055     unsigned rt = extract32(insn, 16, 5);
3056     unsigned rb = extract32(insn, 21, 5);
3057     TCGv_reg ofs, val;
3058     TCGv_tl addr;
3059 
3060     nullify_over(ctx);
3061 
3062     form_gva(ctx, &addr, &ofs, rb, 0, 0, disp, sp, m,
3063              ctx->mmu_idx == MMU_PHYS_IDX);
3064     val = load_gpr(ctx, rt);
3065     if (a) {
3066         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3067             gen_helper_stby_e_parallel(cpu_env, addr, val);
3068         } else {
3069             gen_helper_stby_e(cpu_env, addr, val);
3070         }
3071     } else {
3072         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3073             gen_helper_stby_b_parallel(cpu_env, addr, val);
3074         } else {
3075             gen_helper_stby_b(cpu_env, addr, val);
3076         }
3077     }
3078 
3079     if (m) {
3080         tcg_gen_andi_reg(ofs, ofs, ~3);
3081         save_gpr(ctx, rb, ofs);
3082     }
3083 
3084     return nullify_end(ctx, DISAS_NEXT);
3085 }
3086 
3087 #ifndef CONFIG_USER_ONLY
3088 static DisasJumpType trans_ldwa_idx_i(DisasContext *ctx, uint32_t insn,
3089                                       const DisasInsn *di)
3090 {
3091     int hold_mmu_idx = ctx->mmu_idx;
3092     DisasJumpType ret;
3093 
3094     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3095 
3096     /* ??? needs fixing for hppa64 -- ldda does not follow the same
3097        format wrt the sub-opcode in bits 6:9.  */
3098     ctx->mmu_idx = MMU_PHYS_IDX;
3099     ret = trans_ld_idx_i(ctx, insn, di);
3100     ctx->mmu_idx = hold_mmu_idx;
3101     return ret;
3102 }
3103 
3104 static DisasJumpType trans_ldwa_idx_x(DisasContext *ctx, uint32_t insn,
3105                                       const DisasInsn *di)
3106 {
3107     int hold_mmu_idx = ctx->mmu_idx;
3108     DisasJumpType ret;
3109 
3110     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3111 
3112     /* ??? needs fixing for hppa64 -- ldda does not follow the same
3113        format wrt the sub-opcode in bits 6:9.  */
3114     ctx->mmu_idx = MMU_PHYS_IDX;
3115     ret = trans_ld_idx_x(ctx, insn, di);
3116     ctx->mmu_idx = hold_mmu_idx;
3117     return ret;
3118 }
3119 
3120 static DisasJumpType trans_stwa_idx_i(DisasContext *ctx, uint32_t insn,
3121                                       const DisasInsn *di)
3122 {
3123     int hold_mmu_idx = ctx->mmu_idx;
3124     DisasJumpType ret;
3125 
3126     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3127 
3128     /* ??? needs fixing for hppa64 -- ldda does not follow the same
3129        format wrt the sub-opcode in bits 6:9.  */
3130     ctx->mmu_idx = MMU_PHYS_IDX;
3131     ret = trans_st_idx_i(ctx, insn, di);
3132     ctx->mmu_idx = hold_mmu_idx;
3133     return ret;
3134 }
3135 #endif
3136 
3137 static const DisasInsn table_index_mem[] = {
3138     { 0x0c001000u, 0xfc001300, trans_ld_idx_i }, /* LD[BHWD], im */
3139     { 0x0c000000u, 0xfc001300, trans_ld_idx_x }, /* LD[BHWD], rx */
3140     { 0x0c001200u, 0xfc001300, trans_st_idx_i }, /* ST[BHWD] */
3141     { 0x0c0001c0u, 0xfc0003c0, trans_ldcw },
3142     { 0x0c001300u, 0xfc0013c0, trans_stby },
3143 #ifndef CONFIG_USER_ONLY
3144     { 0x0c000180u, 0xfc00d3c0, trans_ldwa_idx_x }, /* LDWA, rx */
3145     { 0x0c001180u, 0xfc00d3c0, trans_ldwa_idx_i }, /* LDWA, im */
3146     { 0x0c001380u, 0xfc00d3c0, trans_stwa_idx_i }, /* STWA, im */
3147 #endif
3148 };
3149 
3150 static DisasJumpType trans_ldil(DisasContext *ctx, uint32_t insn)
3151 {
3152     unsigned rt = extract32(insn, 21, 5);
3153     target_sreg i = assemble_21(insn);
3154     TCGv_reg tcg_rt = dest_gpr(ctx, rt);
3155 
3156     tcg_gen_movi_reg(tcg_rt, i);
3157     save_gpr(ctx, rt, tcg_rt);
3158     cond_free(&ctx->null_cond);
3159 
3160     return DISAS_NEXT;
3161 }
3162 
3163 static DisasJumpType trans_addil(DisasContext *ctx, uint32_t insn)
3164 {
3165     unsigned rt = extract32(insn, 21, 5);
3166     target_sreg i = assemble_21(insn);
3167     TCGv_reg tcg_rt = load_gpr(ctx, rt);
3168     TCGv_reg tcg_r1 = dest_gpr(ctx, 1);
3169 
3170     tcg_gen_addi_reg(tcg_r1, tcg_rt, i);
3171     save_gpr(ctx, 1, tcg_r1);
3172     cond_free(&ctx->null_cond);
3173 
3174     return DISAS_NEXT;
3175 }
3176 
3177 static DisasJumpType trans_ldo(DisasContext *ctx, uint32_t insn)
3178 {
3179     unsigned rb = extract32(insn, 21, 5);
3180     unsigned rt = extract32(insn, 16, 5);
3181     target_sreg i = assemble_16(insn);
3182     TCGv_reg tcg_rt = dest_gpr(ctx, rt);
3183 
3184     /* Special case rb == 0, for the LDI pseudo-op.
3185        The COPY pseudo-op is handled for free within tcg_gen_addi_tl.  */
3186     if (rb == 0) {
3187         tcg_gen_movi_reg(tcg_rt, i);
3188     } else {
3189         tcg_gen_addi_reg(tcg_rt, cpu_gr[rb], i);
3190     }
3191     save_gpr(ctx, rt, tcg_rt);
3192     cond_free(&ctx->null_cond);
3193 
3194     return DISAS_NEXT;
3195 }
3196 
3197 static DisasJumpType trans_load(DisasContext *ctx, uint32_t insn,
3198                                 bool is_mod, TCGMemOp mop)
3199 {
3200     unsigned rb = extract32(insn, 21, 5);
3201     unsigned rt = extract32(insn, 16, 5);
3202     unsigned sp = extract32(insn, 14, 2);
3203     target_sreg i = assemble_16(insn);
3204 
3205     return do_load(ctx, rt, rb, 0, 0, i, sp,
3206                    is_mod ? (i < 0 ? -1 : 1) : 0, mop);
3207 }
3208 
3209 static DisasJumpType trans_load_w(DisasContext *ctx, uint32_t insn)
3210 {
3211     unsigned rb = extract32(insn, 21, 5);
3212     unsigned rt = extract32(insn, 16, 5);
3213     unsigned sp = extract32(insn, 14, 2);
3214     target_sreg i = assemble_16a(insn);
3215     unsigned ext2 = extract32(insn, 1, 2);
3216 
3217     switch (ext2) {
3218     case 0:
3219     case 1:
3220         /* FLDW without modification.  */
3221         return do_floadw(ctx, ext2 * 32 + rt, rb, 0, 0, i, sp, 0);
3222     case 2:
3223         /* LDW with modification.  Note that the sign of I selects
3224            post-dec vs pre-inc.  */
3225         return do_load(ctx, rt, rb, 0, 0, i, sp, (i < 0 ? 1 : -1), MO_TEUL);
3226     default:
3227         return gen_illegal(ctx);
3228     }
3229 }
3230 
3231 static DisasJumpType trans_fload_mod(DisasContext *ctx, uint32_t insn)
3232 {
3233     target_sreg i = assemble_16a(insn);
3234     unsigned t1 = extract32(insn, 1, 1);
3235     unsigned a = extract32(insn, 2, 1);
3236     unsigned sp = extract32(insn, 14, 2);
3237     unsigned t0 = extract32(insn, 16, 5);
3238     unsigned rb = extract32(insn, 21, 5);
3239 
3240     /* FLDW with modification.  */
3241     return do_floadw(ctx, t1 * 32 + t0, rb, 0, 0, i, sp, (a ? -1 : 1));
3242 }
3243 
3244 static DisasJumpType trans_store(DisasContext *ctx, uint32_t insn,
3245                                  bool is_mod, TCGMemOp mop)
3246 {
3247     unsigned rb = extract32(insn, 21, 5);
3248     unsigned rt = extract32(insn, 16, 5);
3249     unsigned sp = extract32(insn, 14, 2);
3250     target_sreg i = assemble_16(insn);
3251 
3252     return do_store(ctx, rt, rb, i, sp, is_mod ? (i < 0 ? -1 : 1) : 0, mop);
3253 }
3254 
3255 static DisasJumpType trans_store_w(DisasContext *ctx, uint32_t insn)
3256 {
3257     unsigned rb = extract32(insn, 21, 5);
3258     unsigned rt = extract32(insn, 16, 5);
3259     unsigned sp = extract32(insn, 14, 2);
3260     target_sreg i = assemble_16a(insn);
3261     unsigned ext2 = extract32(insn, 1, 2);
3262 
3263     switch (ext2) {
3264     case 0:
3265     case 1:
3266         /* FSTW without modification.  */
3267         return do_fstorew(ctx, ext2 * 32 + rt, rb, 0, 0, i, sp, 0);
3268     case 2:
3269         /* STW with modification.  */
3270         return do_store(ctx, rt, rb, i, sp, (i < 0 ? 1 : -1), MO_TEUL);
3271     default:
3272         return gen_illegal(ctx);
3273     }
3274 }
3275 
3276 static DisasJumpType trans_fstore_mod(DisasContext *ctx, uint32_t insn)
3277 {
3278     target_sreg i = assemble_16a(insn);
3279     unsigned t1 = extract32(insn, 1, 1);
3280     unsigned a = extract32(insn, 2, 1);
3281     unsigned sp = extract32(insn, 14, 2);
3282     unsigned t0 = extract32(insn, 16, 5);
3283     unsigned rb = extract32(insn, 21, 5);
3284 
3285     /* FSTW with modification.  */
3286     return do_fstorew(ctx, t1 * 32 + t0, rb, 0, 0, i, sp, (a ? -1 : 1));
3287 }
3288 
3289 static DisasJumpType trans_copr_w(DisasContext *ctx, uint32_t insn)
3290 {
3291     unsigned t0 = extract32(insn, 0, 5);
3292     unsigned m = extract32(insn, 5, 1);
3293     unsigned t1 = extract32(insn, 6, 1);
3294     unsigned ext3 = extract32(insn, 7, 3);
3295     /* unsigned cc = extract32(insn, 10, 2); */
3296     unsigned i = extract32(insn, 12, 1);
3297     unsigned ua = extract32(insn, 13, 1);
3298     unsigned sp = extract32(insn, 14, 2);
3299     unsigned rx = extract32(insn, 16, 5);
3300     unsigned rb = extract32(insn, 21, 5);
3301     unsigned rt = t1 * 32 + t0;
3302     int modify = (m ? (ua ? -1 : 1) : 0);
3303     int disp, scale;
3304 
3305     if (i == 0) {
3306         scale = (ua ? 2 : 0);
3307         disp = 0;
3308         modify = m;
3309     } else {
3310         disp = low_sextract(rx, 0, 5);
3311         scale = 0;
3312         rx = 0;
3313         modify = (m ? (ua ? -1 : 1) : 0);
3314     }
3315 
3316     switch (ext3) {
3317     case 0: /* FLDW */
3318         return do_floadw(ctx, rt, rb, rx, scale, disp, sp, modify);
3319     case 4: /* FSTW */
3320         return do_fstorew(ctx, rt, rb, rx, scale, disp, sp, modify);
3321     }
3322     return gen_illegal(ctx);
3323 }
3324 
3325 static DisasJumpType trans_copr_dw(DisasContext *ctx, uint32_t insn)
3326 {
3327     unsigned rt = extract32(insn, 0, 5);
3328     unsigned m = extract32(insn, 5, 1);
3329     unsigned ext4 = extract32(insn, 6, 4);
3330     /* unsigned cc = extract32(insn, 10, 2); */
3331     unsigned i = extract32(insn, 12, 1);
3332     unsigned ua = extract32(insn, 13, 1);
3333     unsigned sp = extract32(insn, 14, 2);
3334     unsigned rx = extract32(insn, 16, 5);
3335     unsigned rb = extract32(insn, 21, 5);
3336     int modify = (m ? (ua ? -1 : 1) : 0);
3337     int disp, scale;
3338 
3339     if (i == 0) {
3340         scale = (ua ? 3 : 0);
3341         disp = 0;
3342         modify = m;
3343     } else {
3344         disp = low_sextract(rx, 0, 5);
3345         scale = 0;
3346         rx = 0;
3347         modify = (m ? (ua ? -1 : 1) : 0);
3348     }
3349 
3350     switch (ext4) {
3351     case 0: /* FLDD */
3352         return do_floadd(ctx, rt, rb, rx, scale, disp, sp, modify);
3353     case 8: /* FSTD */
3354         return do_fstored(ctx, rt, rb, rx, scale, disp, sp, modify);
3355     default:
3356         return gen_illegal(ctx);
3357     }
3358 }
3359 
3360 static DisasJumpType trans_cmpb(DisasContext *ctx, uint32_t insn,
3361                                 bool is_true, bool is_imm, bool is_dw)
3362 {
3363     target_sreg disp = assemble_12(insn) * 4;
3364     unsigned n = extract32(insn, 1, 1);
3365     unsigned c = extract32(insn, 13, 3);
3366     unsigned r = extract32(insn, 21, 5);
3367     unsigned cf = c * 2 + !is_true;
3368     TCGv_reg dest, in1, in2, sv;
3369     DisasCond cond;
3370 
3371     nullify_over(ctx);
3372 
3373     if (is_imm) {
3374         in1 = load_const(ctx, low_sextract(insn, 16, 5));
3375     } else {
3376         in1 = load_gpr(ctx, extract32(insn, 16, 5));
3377     }
3378     in2 = load_gpr(ctx, r);
3379     dest = get_temp(ctx);
3380 
3381     tcg_gen_sub_reg(dest, in1, in2);
3382 
3383     sv = NULL;
3384     if (c == 6) {
3385         sv = do_sub_sv(ctx, dest, in1, in2);
3386     }
3387 
3388     cond = do_sub_cond(cf, dest, in1, in2, sv);
3389     return do_cbranch(ctx, disp, n, &cond);
3390 }
3391 
3392 static DisasJumpType trans_addb(DisasContext *ctx, uint32_t insn,
3393                                 bool is_true, bool is_imm)
3394 {
3395     target_sreg disp = assemble_12(insn) * 4;
3396     unsigned n = extract32(insn, 1, 1);
3397     unsigned c = extract32(insn, 13, 3);
3398     unsigned r = extract32(insn, 21, 5);
3399     unsigned cf = c * 2 + !is_true;
3400     TCGv_reg dest, in1, in2, sv, cb_msb;
3401     DisasCond cond;
3402 
3403     nullify_over(ctx);
3404 
3405     if (is_imm) {
3406         in1 = load_const(ctx, low_sextract(insn, 16, 5));
3407     } else {
3408         in1 = load_gpr(ctx, extract32(insn, 16, 5));
3409     }
3410     in2 = load_gpr(ctx, r);
3411     dest = dest_gpr(ctx, r);
3412     sv = NULL;
3413     cb_msb = NULL;
3414 
3415     switch (c) {
3416     default:
3417         tcg_gen_add_reg(dest, in1, in2);
3418         break;
3419     case 4: case 5:
3420         cb_msb = get_temp(ctx);
3421         tcg_gen_movi_reg(cb_msb, 0);
3422         tcg_gen_add2_reg(dest, cb_msb, in1, cb_msb, in2, cb_msb);
3423         break;
3424     case 6:
3425         tcg_gen_add_reg(dest, in1, in2);
3426         sv = do_add_sv(ctx, dest, in1, in2);
3427         break;
3428     }
3429 
3430     cond = do_cond(cf, dest, cb_msb, sv);
3431     return do_cbranch(ctx, disp, n, &cond);
3432 }
3433 
3434 static DisasJumpType trans_bb(DisasContext *ctx, uint32_t insn)
3435 {
3436     target_sreg disp = assemble_12(insn) * 4;
3437     unsigned n = extract32(insn, 1, 1);
3438     unsigned c = extract32(insn, 15, 1);
3439     unsigned r = extract32(insn, 16, 5);
3440     unsigned p = extract32(insn, 21, 5);
3441     unsigned i = extract32(insn, 26, 1);
3442     TCGv_reg tmp, tcg_r;
3443     DisasCond cond;
3444 
3445     nullify_over(ctx);
3446 
3447     tmp = tcg_temp_new();
3448     tcg_r = load_gpr(ctx, r);
3449     if (i) {
3450         tcg_gen_shli_reg(tmp, tcg_r, p);
3451     } else {
3452         tcg_gen_shl_reg(tmp, tcg_r, cpu_sar);
3453     }
3454 
3455     cond = cond_make_0(c ? TCG_COND_GE : TCG_COND_LT, tmp);
3456     tcg_temp_free(tmp);
3457     return do_cbranch(ctx, disp, n, &cond);
3458 }
3459 
3460 static DisasJumpType trans_movb(DisasContext *ctx, uint32_t insn, bool is_imm)
3461 {
3462     target_sreg disp = assemble_12(insn) * 4;
3463     unsigned n = extract32(insn, 1, 1);
3464     unsigned c = extract32(insn, 13, 3);
3465     unsigned t = extract32(insn, 16, 5);
3466     unsigned r = extract32(insn, 21, 5);
3467     TCGv_reg dest;
3468     DisasCond cond;
3469 
3470     nullify_over(ctx);
3471 
3472     dest = dest_gpr(ctx, r);
3473     if (is_imm) {
3474         tcg_gen_movi_reg(dest, low_sextract(t, 0, 5));
3475     } else if (t == 0) {
3476         tcg_gen_movi_reg(dest, 0);
3477     } else {
3478         tcg_gen_mov_reg(dest, cpu_gr[t]);
3479     }
3480 
3481     cond = do_sed_cond(c, dest);
3482     return do_cbranch(ctx, disp, n, &cond);
3483 }
3484 
3485 static DisasJumpType trans_shrpw_sar(DisasContext *ctx, uint32_t insn,
3486                                     const DisasInsn *di)
3487 {
3488     unsigned rt = extract32(insn, 0, 5);
3489     unsigned c = extract32(insn, 13, 3);
3490     unsigned r1 = extract32(insn, 16, 5);
3491     unsigned r2 = extract32(insn, 21, 5);
3492     TCGv_reg dest;
3493 
3494     if (c) {
3495         nullify_over(ctx);
3496     }
3497 
3498     dest = dest_gpr(ctx, rt);
3499     if (r1 == 0) {
3500         tcg_gen_ext32u_reg(dest, load_gpr(ctx, r2));
3501         tcg_gen_shr_reg(dest, dest, cpu_sar);
3502     } else if (r1 == r2) {
3503         TCGv_i32 t32 = tcg_temp_new_i32();
3504         tcg_gen_trunc_reg_i32(t32, load_gpr(ctx, r2));
3505         tcg_gen_rotr_i32(t32, t32, cpu_sar);
3506         tcg_gen_extu_i32_reg(dest, t32);
3507         tcg_temp_free_i32(t32);
3508     } else {
3509         TCGv_i64 t = tcg_temp_new_i64();
3510         TCGv_i64 s = tcg_temp_new_i64();
3511 
3512         tcg_gen_concat_reg_i64(t, load_gpr(ctx, r2), load_gpr(ctx, r1));
3513         tcg_gen_extu_reg_i64(s, cpu_sar);
3514         tcg_gen_shr_i64(t, t, s);
3515         tcg_gen_trunc_i64_reg(dest, t);
3516 
3517         tcg_temp_free_i64(t);
3518         tcg_temp_free_i64(s);
3519     }
3520     save_gpr(ctx, rt, dest);
3521 
3522     /* Install the new nullification.  */
3523     cond_free(&ctx->null_cond);
3524     if (c) {
3525         ctx->null_cond = do_sed_cond(c, dest);
3526     }
3527     return nullify_end(ctx, DISAS_NEXT);
3528 }
3529 
3530 static DisasJumpType trans_shrpw_imm(DisasContext *ctx, uint32_t insn,
3531                                      const DisasInsn *di)
3532 {
3533     unsigned rt = extract32(insn, 0, 5);
3534     unsigned cpos = extract32(insn, 5, 5);
3535     unsigned c = extract32(insn, 13, 3);
3536     unsigned r1 = extract32(insn, 16, 5);
3537     unsigned r2 = extract32(insn, 21, 5);
3538     unsigned sa = 31 - cpos;
3539     TCGv_reg dest, t2;
3540 
3541     if (c) {
3542         nullify_over(ctx);
3543     }
3544 
3545     dest = dest_gpr(ctx, rt);
3546     t2 = load_gpr(ctx, r2);
3547     if (r1 == r2) {
3548         TCGv_i32 t32 = tcg_temp_new_i32();
3549         tcg_gen_trunc_reg_i32(t32, t2);
3550         tcg_gen_rotri_i32(t32, t32, sa);
3551         tcg_gen_extu_i32_reg(dest, t32);
3552         tcg_temp_free_i32(t32);
3553     } else if (r1 == 0) {
3554         tcg_gen_extract_reg(dest, t2, sa, 32 - sa);
3555     } else {
3556         TCGv_reg t0 = tcg_temp_new();
3557         tcg_gen_extract_reg(t0, t2, sa, 32 - sa);
3558         tcg_gen_deposit_reg(dest, t0, cpu_gr[r1], 32 - sa, sa);
3559         tcg_temp_free(t0);
3560     }
3561     save_gpr(ctx, rt, dest);
3562 
3563     /* Install the new nullification.  */
3564     cond_free(&ctx->null_cond);
3565     if (c) {
3566         ctx->null_cond = do_sed_cond(c, dest);
3567     }
3568     return nullify_end(ctx, DISAS_NEXT);
3569 }
3570 
3571 static DisasJumpType trans_extrw_sar(DisasContext *ctx, uint32_t insn,
3572                                      const DisasInsn *di)
3573 {
3574     unsigned clen = extract32(insn, 0, 5);
3575     unsigned is_se = extract32(insn, 10, 1);
3576     unsigned c = extract32(insn, 13, 3);
3577     unsigned rt = extract32(insn, 16, 5);
3578     unsigned rr = extract32(insn, 21, 5);
3579     unsigned len = 32 - clen;
3580     TCGv_reg dest, src, tmp;
3581 
3582     if (c) {
3583         nullify_over(ctx);
3584     }
3585 
3586     dest = dest_gpr(ctx, rt);
3587     src = load_gpr(ctx, rr);
3588     tmp = tcg_temp_new();
3589 
3590     /* Recall that SAR is using big-endian bit numbering.  */
3591     tcg_gen_xori_reg(tmp, cpu_sar, TARGET_REGISTER_BITS - 1);
3592     if (is_se) {
3593         tcg_gen_sar_reg(dest, src, tmp);
3594         tcg_gen_sextract_reg(dest, dest, 0, len);
3595     } else {
3596         tcg_gen_shr_reg(dest, src, tmp);
3597         tcg_gen_extract_reg(dest, dest, 0, len);
3598     }
3599     tcg_temp_free(tmp);
3600     save_gpr(ctx, rt, dest);
3601 
3602     /* Install the new nullification.  */
3603     cond_free(&ctx->null_cond);
3604     if (c) {
3605         ctx->null_cond = do_sed_cond(c, dest);
3606     }
3607     return nullify_end(ctx, DISAS_NEXT);
3608 }
3609 
3610 static DisasJumpType trans_extrw_imm(DisasContext *ctx, uint32_t insn,
3611                                      const DisasInsn *di)
3612 {
3613     unsigned clen = extract32(insn, 0, 5);
3614     unsigned pos = extract32(insn, 5, 5);
3615     unsigned is_se = extract32(insn, 10, 1);
3616     unsigned c = extract32(insn, 13, 3);
3617     unsigned rt = extract32(insn, 16, 5);
3618     unsigned rr = extract32(insn, 21, 5);
3619     unsigned len = 32 - clen;
3620     unsigned cpos = 31 - pos;
3621     TCGv_reg dest, src;
3622 
3623     if (c) {
3624         nullify_over(ctx);
3625     }
3626 
3627     dest = dest_gpr(ctx, rt);
3628     src = load_gpr(ctx, rr);
3629     if (is_se) {
3630         tcg_gen_sextract_reg(dest, src, cpos, len);
3631     } else {
3632         tcg_gen_extract_reg(dest, src, cpos, len);
3633     }
3634     save_gpr(ctx, rt, dest);
3635 
3636     /* Install the new nullification.  */
3637     cond_free(&ctx->null_cond);
3638     if (c) {
3639         ctx->null_cond = do_sed_cond(c, dest);
3640     }
3641     return nullify_end(ctx, DISAS_NEXT);
3642 }
3643 
3644 static const DisasInsn table_sh_ex[] = {
3645     { 0xd0000000u, 0xfc001fe0u, trans_shrpw_sar },
3646     { 0xd0000800u, 0xfc001c00u, trans_shrpw_imm },
3647     { 0xd0001000u, 0xfc001be0u, trans_extrw_sar },
3648     { 0xd0001800u, 0xfc001800u, trans_extrw_imm },
3649 };
3650 
3651 static DisasJumpType trans_depw_imm_c(DisasContext *ctx, uint32_t insn,
3652                                       const DisasInsn *di)
3653 {
3654     unsigned clen = extract32(insn, 0, 5);
3655     unsigned cpos = extract32(insn, 5, 5);
3656     unsigned nz = extract32(insn, 10, 1);
3657     unsigned c = extract32(insn, 13, 3);
3658     target_sreg val = low_sextract(insn, 16, 5);
3659     unsigned rt = extract32(insn, 21, 5);
3660     unsigned len = 32 - clen;
3661     target_sreg mask0, mask1;
3662     TCGv_reg dest;
3663 
3664     if (c) {
3665         nullify_over(ctx);
3666     }
3667     if (cpos + len > 32) {
3668         len = 32 - cpos;
3669     }
3670 
3671     dest = dest_gpr(ctx, rt);
3672     mask0 = deposit64(0, cpos, len, val);
3673     mask1 = deposit64(-1, cpos, len, val);
3674 
3675     if (nz) {
3676         TCGv_reg src = load_gpr(ctx, rt);
3677         if (mask1 != -1) {
3678             tcg_gen_andi_reg(dest, src, mask1);
3679             src = dest;
3680         }
3681         tcg_gen_ori_reg(dest, src, mask0);
3682     } else {
3683         tcg_gen_movi_reg(dest, mask0);
3684     }
3685     save_gpr(ctx, rt, dest);
3686 
3687     /* Install the new nullification.  */
3688     cond_free(&ctx->null_cond);
3689     if (c) {
3690         ctx->null_cond = do_sed_cond(c, dest);
3691     }
3692     return nullify_end(ctx, DISAS_NEXT);
3693 }
3694 
3695 static DisasJumpType trans_depw_imm(DisasContext *ctx, uint32_t insn,
3696                                     const DisasInsn *di)
3697 {
3698     unsigned clen = extract32(insn, 0, 5);
3699     unsigned cpos = extract32(insn, 5, 5);
3700     unsigned nz = extract32(insn, 10, 1);
3701     unsigned c = extract32(insn, 13, 3);
3702     unsigned rr = extract32(insn, 16, 5);
3703     unsigned rt = extract32(insn, 21, 5);
3704     unsigned rs = nz ? rt : 0;
3705     unsigned len = 32 - clen;
3706     TCGv_reg dest, val;
3707 
3708     if (c) {
3709         nullify_over(ctx);
3710     }
3711     if (cpos + len > 32) {
3712         len = 32 - cpos;
3713     }
3714 
3715     dest = dest_gpr(ctx, rt);
3716     val = load_gpr(ctx, rr);
3717     if (rs == 0) {
3718         tcg_gen_deposit_z_reg(dest, val, cpos, len);
3719     } else {
3720         tcg_gen_deposit_reg(dest, cpu_gr[rs], val, cpos, len);
3721     }
3722     save_gpr(ctx, rt, dest);
3723 
3724     /* Install the new nullification.  */
3725     cond_free(&ctx->null_cond);
3726     if (c) {
3727         ctx->null_cond = do_sed_cond(c, dest);
3728     }
3729     return nullify_end(ctx, DISAS_NEXT);
3730 }
3731 
3732 static DisasJumpType trans_depw_sar(DisasContext *ctx, uint32_t insn,
3733                                     const DisasInsn *di)
3734 {
3735     unsigned clen = extract32(insn, 0, 5);
3736     unsigned nz = extract32(insn, 10, 1);
3737     unsigned i = extract32(insn, 12, 1);
3738     unsigned c = extract32(insn, 13, 3);
3739     unsigned rt = extract32(insn, 21, 5);
3740     unsigned rs = nz ? rt : 0;
3741     unsigned len = 32 - clen;
3742     TCGv_reg val, mask, tmp, shift, dest;
3743     unsigned msb = 1U << (len - 1);
3744 
3745     if (c) {
3746         nullify_over(ctx);
3747     }
3748 
3749     if (i) {
3750         val = load_const(ctx, low_sextract(insn, 16, 5));
3751     } else {
3752         val = load_gpr(ctx, extract32(insn, 16, 5));
3753     }
3754     dest = dest_gpr(ctx, rt);
3755     shift = tcg_temp_new();
3756     tmp = tcg_temp_new();
3757 
3758     /* Convert big-endian bit numbering in SAR to left-shift.  */
3759     tcg_gen_xori_reg(shift, cpu_sar, TARGET_REGISTER_BITS - 1);
3760 
3761     mask = tcg_const_reg(msb + (msb - 1));
3762     tcg_gen_and_reg(tmp, val, mask);
3763     if (rs) {
3764         tcg_gen_shl_reg(mask, mask, shift);
3765         tcg_gen_shl_reg(tmp, tmp, shift);
3766         tcg_gen_andc_reg(dest, cpu_gr[rs], mask);
3767         tcg_gen_or_reg(dest, dest, tmp);
3768     } else {
3769         tcg_gen_shl_reg(dest, tmp, shift);
3770     }
3771     tcg_temp_free(shift);
3772     tcg_temp_free(mask);
3773     tcg_temp_free(tmp);
3774     save_gpr(ctx, rt, dest);
3775 
3776     /* Install the new nullification.  */
3777     cond_free(&ctx->null_cond);
3778     if (c) {
3779         ctx->null_cond = do_sed_cond(c, dest);
3780     }
3781     return nullify_end(ctx, DISAS_NEXT);
3782 }
3783 
3784 static const DisasInsn table_depw[] = {
3785     { 0xd4000000u, 0xfc000be0u, trans_depw_sar },
3786     { 0xd4000800u, 0xfc001800u, trans_depw_imm },
3787     { 0xd4001800u, 0xfc001800u, trans_depw_imm_c },
3788 };
3789 
3790 static DisasJumpType trans_be(DisasContext *ctx, uint32_t insn, bool is_l)
3791 {
3792     unsigned n = extract32(insn, 1, 1);
3793     unsigned b = extract32(insn, 21, 5);
3794     target_sreg disp = assemble_17(insn);
3795     TCGv_reg tmp;
3796 
3797 #ifdef CONFIG_USER_ONLY
3798     /* ??? It seems like there should be a good way of using
3799        "be disp(sr2, r0)", the canonical gateway entry mechanism
3800        to our advantage.  But that appears to be inconvenient to
3801        manage along side branch delay slots.  Therefore we handle
3802        entry into the gateway page via absolute address.  */
3803     /* Since we don't implement spaces, just branch.  Do notice the special
3804        case of "be disp(*,r0)" using a direct branch to disp, so that we can
3805        goto_tb to the TB containing the syscall.  */
3806     if (b == 0) {
3807         return do_dbranch(ctx, disp, is_l ? 31 : 0, n);
3808     }
3809 #else
3810     int sp = assemble_sr3(insn);
3811     nullify_over(ctx);
3812 #endif
3813 
3814     tmp = get_temp(ctx);
3815     tcg_gen_addi_reg(tmp, load_gpr(ctx, b), disp);
3816     tmp = do_ibranch_priv(ctx, tmp);
3817 
3818 #ifdef CONFIG_USER_ONLY
3819     return do_ibranch(ctx, tmp, is_l ? 31 : 0, n);
3820 #else
3821     TCGv_i64 new_spc = tcg_temp_new_i64();
3822 
3823     load_spr(ctx, new_spc, sp);
3824     if (is_l) {
3825         copy_iaoq_entry(cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
3826         tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f);
3827     }
3828     if (n && use_nullify_skip(ctx)) {
3829         tcg_gen_mov_reg(cpu_iaoq_f, tmp);
3830         tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
3831         tcg_gen_mov_i64(cpu_iasq_f, new_spc);
3832         tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f);
3833     } else {
3834         copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3835         if (ctx->iaoq_b == -1) {
3836             tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3837         }
3838         tcg_gen_mov_reg(cpu_iaoq_b, tmp);
3839         tcg_gen_mov_i64(cpu_iasq_b, new_spc);
3840         nullify_set(ctx, n);
3841     }
3842     tcg_temp_free_i64(new_spc);
3843     tcg_gen_lookup_and_goto_ptr();
3844     return nullify_end(ctx, DISAS_NORETURN);
3845 #endif
3846 }
3847 
3848 static DisasJumpType trans_bl(DisasContext *ctx, uint32_t insn,
3849                               const DisasInsn *di)
3850 {
3851     unsigned n = extract32(insn, 1, 1);
3852     unsigned link = extract32(insn, 21, 5);
3853     target_sreg disp = assemble_17(insn);
3854 
3855     return do_dbranch(ctx, iaoq_dest(ctx, disp), link, n);
3856 }
3857 
3858 static DisasJumpType trans_b_gate(DisasContext *ctx, uint32_t insn,
3859                                   const DisasInsn *di)
3860 {
3861     unsigned n = extract32(insn, 1, 1);
3862     unsigned link = extract32(insn, 21, 5);
3863     target_sreg disp = assemble_17(insn);
3864     target_ureg dest = iaoq_dest(ctx, disp);
3865 
3866     /* Make sure the caller hasn't done something weird with the queue.
3867      * ??? This is not quite the same as the PSW[B] bit, which would be
3868      * expensive to track.  Real hardware will trap for
3869      *    b  gateway
3870      *    b  gateway+4  (in delay slot of first branch)
3871      * However, checking for a non-sequential instruction queue *will*
3872      * diagnose the security hole
3873      *    b  gateway
3874      *    b  evil
3875      * in which instructions at evil would run with increased privs.
3876      */
3877     if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) {
3878         return gen_illegal(ctx);
3879     }
3880 
3881 #ifndef CONFIG_USER_ONLY
3882     if (ctx->tb_flags & PSW_C) {
3883         CPUHPPAState *env = ctx->cs->env_ptr;
3884         int type = hppa_artype_for_page(env, ctx->base.pc_next);
3885         /* If we could not find a TLB entry, then we need to generate an
3886            ITLB miss exception so the kernel will provide it.
3887            The resulting TLB fill operation will invalidate this TB and
3888            we will re-translate, at which point we *will* be able to find
3889            the TLB entry and determine if this is in fact a gateway page.  */
3890         if (type < 0) {
3891             return gen_excp(ctx, EXCP_ITLB_MISS);
3892         }
3893         /* No change for non-gateway pages or for priv decrease.  */
3894         if (type >= 4 && type - 4 < ctx->privilege) {
3895             dest = deposit32(dest, 0, 2, type - 4);
3896         }
3897     } else {
3898         dest &= -4;  /* priv = 0 */
3899     }
3900 #endif
3901 
3902     return do_dbranch(ctx, dest, link, n);
3903 }
3904 
3905 static DisasJumpType trans_bl_long(DisasContext *ctx, uint32_t insn,
3906                                    const DisasInsn *di)
3907 {
3908     unsigned n = extract32(insn, 1, 1);
3909     target_sreg disp = assemble_22(insn);
3910 
3911     return do_dbranch(ctx, iaoq_dest(ctx, disp), 2, n);
3912 }
3913 
3914 static DisasJumpType trans_blr(DisasContext *ctx, uint32_t insn,
3915                                const DisasInsn *di)
3916 {
3917     unsigned n = extract32(insn, 1, 1);
3918     unsigned rx = extract32(insn, 16, 5);
3919     unsigned link = extract32(insn, 21, 5);
3920     TCGv_reg tmp = get_temp(ctx);
3921 
3922     tcg_gen_shli_reg(tmp, load_gpr(ctx, rx), 3);
3923     tcg_gen_addi_reg(tmp, tmp, ctx->iaoq_f + 8);
3924     /* The computation here never changes privilege level.  */
3925     return do_ibranch(ctx, tmp, link, n);
3926 }
3927 
3928 static DisasJumpType trans_bv(DisasContext *ctx, uint32_t insn,
3929                               const DisasInsn *di)
3930 {
3931     unsigned n = extract32(insn, 1, 1);
3932     unsigned rx = extract32(insn, 16, 5);
3933     unsigned rb = extract32(insn, 21, 5);
3934     TCGv_reg dest;
3935 
3936     if (rx == 0) {
3937         dest = load_gpr(ctx, rb);
3938     } else {
3939         dest = get_temp(ctx);
3940         tcg_gen_shli_reg(dest, load_gpr(ctx, rx), 3);
3941         tcg_gen_add_reg(dest, dest, load_gpr(ctx, rb));
3942     }
3943     dest = do_ibranch_priv(ctx, dest);
3944     return do_ibranch(ctx, dest, 0, n);
3945 }
3946 
3947 static DisasJumpType trans_bve(DisasContext *ctx, uint32_t insn,
3948                                const DisasInsn *di)
3949 {
3950     unsigned n = extract32(insn, 1, 1);
3951     unsigned rb = extract32(insn, 21, 5);
3952     unsigned link = extract32(insn, 13, 1) ? 2 : 0;
3953     TCGv_reg dest;
3954 
3955 #ifdef CONFIG_USER_ONLY
3956     dest = do_ibranch_priv(ctx, load_gpr(ctx, rb));
3957     return do_ibranch(ctx, dest, link, n);
3958 #else
3959     nullify_over(ctx);
3960     dest = do_ibranch_priv(ctx, load_gpr(ctx, rb));
3961 
3962     copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3963     if (ctx->iaoq_b == -1) {
3964         tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3965     }
3966     copy_iaoq_entry(cpu_iaoq_b, -1, dest);
3967     tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest));
3968     if (link) {
3969         copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
3970     }
3971     nullify_set(ctx, n);
3972     tcg_gen_lookup_and_goto_ptr();
3973     return nullify_end(ctx, DISAS_NORETURN);
3974 #endif
3975 }
3976 
3977 static const DisasInsn table_branch[] = {
3978     { 0xe8000000u, 0xfc006000u, trans_bl }, /* B,L and B,L,PUSH */
3979     { 0xe800a000u, 0xfc00e000u, trans_bl_long },
3980     { 0xe8004000u, 0xfc00fffdu, trans_blr },
3981     { 0xe800c000u, 0xfc00fffdu, trans_bv },
3982     { 0xe800d000u, 0xfc00dffcu, trans_bve },
3983     { 0xe8002000u, 0xfc00e000u, trans_b_gate },
3984 };
3985 
3986 static DisasJumpType trans_fop_wew_0c(DisasContext *ctx, uint32_t insn,
3987                                       const DisasInsn *di)
3988 {
3989     unsigned rt = extract32(insn, 0, 5);
3990     unsigned ra = extract32(insn, 21, 5);
3991     return do_fop_wew(ctx, rt, ra, di->f.wew);
3992 }
3993 
3994 static DisasJumpType trans_fop_wew_0e(DisasContext *ctx, uint32_t insn,
3995                                       const DisasInsn *di)
3996 {
3997     unsigned rt = assemble_rt64(insn);
3998     unsigned ra = assemble_ra64(insn);
3999     return do_fop_wew(ctx, rt, ra, di->f.wew);
4000 }
4001 
4002 static DisasJumpType trans_fop_ded(DisasContext *ctx, uint32_t insn,
4003                                    const DisasInsn *di)
4004 {
4005     unsigned rt = extract32(insn, 0, 5);
4006     unsigned ra = extract32(insn, 21, 5);
4007     return do_fop_ded(ctx, rt, ra, di->f.ded);
4008 }
4009 
4010 static DisasJumpType trans_fop_wed_0c(DisasContext *ctx, uint32_t insn,
4011                                       const DisasInsn *di)
4012 {
4013     unsigned rt = extract32(insn, 0, 5);
4014     unsigned ra = extract32(insn, 21, 5);
4015     return do_fop_wed(ctx, rt, ra, di->f.wed);
4016 }
4017 
4018 static DisasJumpType trans_fop_wed_0e(DisasContext *ctx, uint32_t insn,
4019                                       const DisasInsn *di)
4020 {
4021     unsigned rt = assemble_rt64(insn);
4022     unsigned ra = extract32(insn, 21, 5);
4023     return do_fop_wed(ctx, rt, ra, di->f.wed);
4024 }
4025 
4026 static DisasJumpType trans_fop_dew_0c(DisasContext *ctx, uint32_t insn,
4027                                       const DisasInsn *di)
4028 {
4029     unsigned rt = extract32(insn, 0, 5);
4030     unsigned ra = extract32(insn, 21, 5);
4031     return do_fop_dew(ctx, rt, ra, di->f.dew);
4032 }
4033 
4034 static DisasJumpType trans_fop_dew_0e(DisasContext *ctx, uint32_t insn,
4035                                       const DisasInsn *di)
4036 {
4037     unsigned rt = extract32(insn, 0, 5);
4038     unsigned ra = assemble_ra64(insn);
4039     return do_fop_dew(ctx, rt, ra, di->f.dew);
4040 }
4041 
4042 static DisasJumpType trans_fop_weww_0c(DisasContext *ctx, uint32_t insn,
4043                                        const DisasInsn *di)
4044 {
4045     unsigned rt = extract32(insn, 0, 5);
4046     unsigned rb = extract32(insn, 16, 5);
4047     unsigned ra = extract32(insn, 21, 5);
4048     return do_fop_weww(ctx, rt, ra, rb, di->f.weww);
4049 }
4050 
4051 static DisasJumpType trans_fop_weww_0e(DisasContext *ctx, uint32_t insn,
4052                                        const DisasInsn *di)
4053 {
4054     unsigned rt = assemble_rt64(insn);
4055     unsigned rb = assemble_rb64(insn);
4056     unsigned ra = assemble_ra64(insn);
4057     return do_fop_weww(ctx, rt, ra, rb, di->f.weww);
4058 }
4059 
4060 static DisasJumpType trans_fop_dedd(DisasContext *ctx, uint32_t insn,
4061                                     const DisasInsn *di)
4062 {
4063     unsigned rt = extract32(insn, 0, 5);
4064     unsigned rb = extract32(insn, 16, 5);
4065     unsigned ra = extract32(insn, 21, 5);
4066     return do_fop_dedd(ctx, rt, ra, rb, di->f.dedd);
4067 }
4068 
4069 static void gen_fcpy_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4070 {
4071     tcg_gen_mov_i32(dst, src);
4072 }
4073 
4074 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4075 {
4076     tcg_gen_mov_i64(dst, src);
4077 }
4078 
4079 static void gen_fabs_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4080 {
4081     tcg_gen_andi_i32(dst, src, INT32_MAX);
4082 }
4083 
4084 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4085 {
4086     tcg_gen_andi_i64(dst, src, INT64_MAX);
4087 }
4088 
4089 static void gen_fneg_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4090 {
4091     tcg_gen_xori_i32(dst, src, INT32_MIN);
4092 }
4093 
4094 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4095 {
4096     tcg_gen_xori_i64(dst, src, INT64_MIN);
4097 }
4098 
4099 static void gen_fnegabs_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4100 {
4101     tcg_gen_ori_i32(dst, src, INT32_MIN);
4102 }
4103 
4104 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4105 {
4106     tcg_gen_ori_i64(dst, src, INT64_MIN);
4107 }
4108 
4109 static DisasJumpType do_fcmp_s(DisasContext *ctx, unsigned ra, unsigned rb,
4110                                unsigned y, unsigned c)
4111 {
4112     TCGv_i32 ta, tb, tc, ty;
4113 
4114     nullify_over(ctx);
4115 
4116     ta = load_frw0_i32(ra);
4117     tb = load_frw0_i32(rb);
4118     ty = tcg_const_i32(y);
4119     tc = tcg_const_i32(c);
4120 
4121     gen_helper_fcmp_s(cpu_env, ta, tb, ty, tc);
4122 
4123     tcg_temp_free_i32(ta);
4124     tcg_temp_free_i32(tb);
4125     tcg_temp_free_i32(ty);
4126     tcg_temp_free_i32(tc);
4127 
4128     return nullify_end(ctx, DISAS_NEXT);
4129 }
4130 
4131 static DisasJumpType trans_fcmp_s_0c(DisasContext *ctx, uint32_t insn,
4132                                      const DisasInsn *di)
4133 {
4134     unsigned c = extract32(insn, 0, 5);
4135     unsigned y = extract32(insn, 13, 3);
4136     unsigned rb = extract32(insn, 16, 5);
4137     unsigned ra = extract32(insn, 21, 5);
4138     return do_fcmp_s(ctx, ra, rb, y, c);
4139 }
4140 
4141 static DisasJumpType trans_fcmp_s_0e(DisasContext *ctx, uint32_t insn,
4142                                      const DisasInsn *di)
4143 {
4144     unsigned c = extract32(insn, 0, 5);
4145     unsigned y = extract32(insn, 13, 3);
4146     unsigned rb = assemble_rb64(insn);
4147     unsigned ra = assemble_ra64(insn);
4148     return do_fcmp_s(ctx, ra, rb, y, c);
4149 }
4150 
4151 static DisasJumpType trans_fcmp_d(DisasContext *ctx, uint32_t insn,
4152                                   const DisasInsn *di)
4153 {
4154     unsigned c = extract32(insn, 0, 5);
4155     unsigned y = extract32(insn, 13, 3);
4156     unsigned rb = extract32(insn, 16, 5);
4157     unsigned ra = extract32(insn, 21, 5);
4158     TCGv_i64 ta, tb;
4159     TCGv_i32 tc, ty;
4160 
4161     nullify_over(ctx);
4162 
4163     ta = load_frd0(ra);
4164     tb = load_frd0(rb);
4165     ty = tcg_const_i32(y);
4166     tc = tcg_const_i32(c);
4167 
4168     gen_helper_fcmp_d(cpu_env, ta, tb, ty, tc);
4169 
4170     tcg_temp_free_i64(ta);
4171     tcg_temp_free_i64(tb);
4172     tcg_temp_free_i32(ty);
4173     tcg_temp_free_i32(tc);
4174 
4175     return nullify_end(ctx, DISAS_NEXT);
4176 }
4177 
4178 static DisasJumpType trans_ftest_t(DisasContext *ctx, uint32_t insn,
4179                                    const DisasInsn *di)
4180 {
4181     unsigned y = extract32(insn, 13, 3);
4182     unsigned cbit = (y ^ 1) - 1;
4183     TCGv_reg t;
4184 
4185     nullify_over(ctx);
4186 
4187     t = tcg_temp_new();
4188     tcg_gen_ld32u_reg(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
4189     tcg_gen_extract_reg(t, t, 21 - cbit, 1);
4190     ctx->null_cond = cond_make_0(TCG_COND_NE, t);
4191     tcg_temp_free(t);
4192 
4193     return nullify_end(ctx, DISAS_NEXT);
4194 }
4195 
4196 static DisasJumpType trans_ftest_q(DisasContext *ctx, uint32_t insn,
4197                                    const DisasInsn *di)
4198 {
4199     unsigned c = extract32(insn, 0, 5);
4200     int mask;
4201     bool inv = false;
4202     TCGv_reg t;
4203 
4204     nullify_over(ctx);
4205 
4206     t = tcg_temp_new();
4207     tcg_gen_ld32u_reg(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
4208 
4209     switch (c) {
4210     case 0: /* simple */
4211         tcg_gen_andi_reg(t, t, 0x4000000);
4212         ctx->null_cond = cond_make_0(TCG_COND_NE, t);
4213         goto done;
4214     case 2: /* rej */
4215         inv = true;
4216         /* fallthru */
4217     case 1: /* acc */
4218         mask = 0x43ff800;
4219         break;
4220     case 6: /* rej8 */
4221         inv = true;
4222         /* fallthru */
4223     case 5: /* acc8 */
4224         mask = 0x43f8000;
4225         break;
4226     case 9: /* acc6 */
4227         mask = 0x43e0000;
4228         break;
4229     case 13: /* acc4 */
4230         mask = 0x4380000;
4231         break;
4232     case 17: /* acc2 */
4233         mask = 0x4200000;
4234         break;
4235     default:
4236         return gen_illegal(ctx);
4237     }
4238     if (inv) {
4239         TCGv_reg c = load_const(ctx, mask);
4240         tcg_gen_or_reg(t, t, c);
4241         ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
4242     } else {
4243         tcg_gen_andi_reg(t, t, mask);
4244         ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
4245     }
4246  done:
4247     return nullify_end(ctx, DISAS_NEXT);
4248 }
4249 
4250 static DisasJumpType trans_xmpyu(DisasContext *ctx, uint32_t insn,
4251                                  const DisasInsn *di)
4252 {
4253     unsigned rt = extract32(insn, 0, 5);
4254     unsigned rb = assemble_rb64(insn);
4255     unsigned ra = assemble_ra64(insn);
4256     TCGv_i64 a, b;
4257 
4258     nullify_over(ctx);
4259 
4260     a = load_frw0_i64(ra);
4261     b = load_frw0_i64(rb);
4262     tcg_gen_mul_i64(a, a, b);
4263     save_frd(rt, a);
4264     tcg_temp_free_i64(a);
4265     tcg_temp_free_i64(b);
4266 
4267     return nullify_end(ctx, DISAS_NEXT);
4268 }
4269 
4270 #define FOP_DED  trans_fop_ded, .f.ded
4271 #define FOP_DEDD trans_fop_dedd, .f.dedd
4272 
4273 #define FOP_WEW  trans_fop_wew_0c, .f.wew
4274 #define FOP_DEW  trans_fop_dew_0c, .f.dew
4275 #define FOP_WED  trans_fop_wed_0c, .f.wed
4276 #define FOP_WEWW trans_fop_weww_0c, .f.weww
4277 
4278 static const DisasInsn table_float_0c[] = {
4279     /* floating point class zero */
4280     { 0x30004000, 0xfc1fffe0, FOP_WEW = gen_fcpy_s },
4281     { 0x30006000, 0xfc1fffe0, FOP_WEW = gen_fabs_s },
4282     { 0x30008000, 0xfc1fffe0, FOP_WEW = gen_helper_fsqrt_s },
4283     { 0x3000a000, 0xfc1fffe0, FOP_WEW = gen_helper_frnd_s },
4284     { 0x3000c000, 0xfc1fffe0, FOP_WEW = gen_fneg_s },
4285     { 0x3000e000, 0xfc1fffe0, FOP_WEW = gen_fnegabs_s },
4286 
4287     { 0x30004800, 0xfc1fffe0, FOP_DED = gen_fcpy_d },
4288     { 0x30006800, 0xfc1fffe0, FOP_DED = gen_fabs_d },
4289     { 0x30008800, 0xfc1fffe0, FOP_DED = gen_helper_fsqrt_d },
4290     { 0x3000a800, 0xfc1fffe0, FOP_DED = gen_helper_frnd_d },
4291     { 0x3000c800, 0xfc1fffe0, FOP_DED = gen_fneg_d },
4292     { 0x3000e800, 0xfc1fffe0, FOP_DED = gen_fnegabs_d },
4293 
4294     /* floating point class three */
4295     { 0x30000600, 0xfc00ffe0, FOP_WEWW = gen_helper_fadd_s },
4296     { 0x30002600, 0xfc00ffe0, FOP_WEWW = gen_helper_fsub_s },
4297     { 0x30004600, 0xfc00ffe0, FOP_WEWW = gen_helper_fmpy_s },
4298     { 0x30006600, 0xfc00ffe0, FOP_WEWW = gen_helper_fdiv_s },
4299 
4300     { 0x30000e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fadd_d },
4301     { 0x30002e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fsub_d },
4302     { 0x30004e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fmpy_d },
4303     { 0x30006e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fdiv_d },
4304 
4305     /* floating point class one */
4306     /* float/float */
4307     { 0x30000a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_d_s },
4308     { 0x30002200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_s_d },
4309     /* int/float */
4310     { 0x30008200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_w_s },
4311     { 0x30008a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_dw_s },
4312     { 0x3000a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_w_d },
4313     { 0x3000aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_dw_d },
4314     /* float/int */
4315     { 0x30010200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_s_w },
4316     { 0x30010a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_d_w },
4317     { 0x30012200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_s_dw },
4318     { 0x30012a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_dw },
4319     /* float/int truncate */
4320     { 0x30018200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_t_s_w },
4321     { 0x30018a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_t_d_w },
4322     { 0x3001a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_t_s_dw },
4323     { 0x3001aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_dw },
4324     /* uint/float */
4325     { 0x30028200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_uw_s },
4326     { 0x30028a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_udw_s },
4327     { 0x3002a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_uw_d },
4328     { 0x3002aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_udw_d },
4329     /* float/uint */
4330     { 0x30030200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_s_uw },
4331     { 0x30030a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_d_uw },
4332     { 0x30032200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_s_udw },
4333     { 0x30032a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_udw },
4334     /* float/uint truncate */
4335     { 0x30038200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_t_s_uw },
4336     { 0x30038a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_t_d_uw },
4337     { 0x3003a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_t_s_udw },
4338     { 0x3003aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_udw },
4339 
4340     /* floating point class two */
4341     { 0x30000400, 0xfc001fe0, trans_fcmp_s_0c },
4342     { 0x30000c00, 0xfc001fe0, trans_fcmp_d },
4343     { 0x30002420, 0xffffffe0, trans_ftest_q },
4344     { 0x30000420, 0xffff1fff, trans_ftest_t },
4345 
4346     /* FID.  Note that ra == rt == 0, which via fcpy puts 0 into fr0.
4347        This is machine/revision == 0, which is reserved for simulator.  */
4348     { 0x30000000, 0xffffffff, FOP_WEW = gen_fcpy_s },
4349 };
4350 
4351 #undef FOP_WEW
4352 #undef FOP_DEW
4353 #undef FOP_WED
4354 #undef FOP_WEWW
4355 #define FOP_WEW  trans_fop_wew_0e, .f.wew
4356 #define FOP_DEW  trans_fop_dew_0e, .f.dew
4357 #define FOP_WED  trans_fop_wed_0e, .f.wed
4358 #define FOP_WEWW trans_fop_weww_0e, .f.weww
4359 
4360 static const DisasInsn table_float_0e[] = {
4361     /* floating point class zero */
4362     { 0x38004000, 0xfc1fff20, FOP_WEW = gen_fcpy_s },
4363     { 0x38006000, 0xfc1fff20, FOP_WEW = gen_fabs_s },
4364     { 0x38008000, 0xfc1fff20, FOP_WEW = gen_helper_fsqrt_s },
4365     { 0x3800a000, 0xfc1fff20, FOP_WEW = gen_helper_frnd_s },
4366     { 0x3800c000, 0xfc1fff20, FOP_WEW = gen_fneg_s },
4367     { 0x3800e000, 0xfc1fff20, FOP_WEW = gen_fnegabs_s },
4368 
4369     { 0x38004800, 0xfc1fffe0, FOP_DED = gen_fcpy_d },
4370     { 0x38006800, 0xfc1fffe0, FOP_DED = gen_fabs_d },
4371     { 0x38008800, 0xfc1fffe0, FOP_DED = gen_helper_fsqrt_d },
4372     { 0x3800a800, 0xfc1fffe0, FOP_DED = gen_helper_frnd_d },
4373     { 0x3800c800, 0xfc1fffe0, FOP_DED = gen_fneg_d },
4374     { 0x3800e800, 0xfc1fffe0, FOP_DED = gen_fnegabs_d },
4375 
4376     /* floating point class three */
4377     { 0x38000600, 0xfc00ef20, FOP_WEWW = gen_helper_fadd_s },
4378     { 0x38002600, 0xfc00ef20, FOP_WEWW = gen_helper_fsub_s },
4379     { 0x38004600, 0xfc00ef20, FOP_WEWW = gen_helper_fmpy_s },
4380     { 0x38006600, 0xfc00ef20, FOP_WEWW = gen_helper_fdiv_s },
4381 
4382     { 0x38000e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fadd_d },
4383     { 0x38002e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fsub_d },
4384     { 0x38004e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fmpy_d },
4385     { 0x38006e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fdiv_d },
4386 
4387     { 0x38004700, 0xfc00ef60, trans_xmpyu },
4388 
4389     /* floating point class one */
4390     /* float/float */
4391     { 0x38000a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_s },
4392     { 0x38002200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_s_d },
4393     /* int/float */
4394     { 0x38008200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_w_s },
4395     { 0x38008a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_dw_s },
4396     { 0x3800a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_w_d },
4397     { 0x3800aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_dw_d },
4398     /* float/int */
4399     { 0x38010200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_s_w },
4400     { 0x38010a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_w },
4401     { 0x38012200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_s_dw },
4402     { 0x38012a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_dw },
4403     /* float/int truncate */
4404     { 0x38018200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_t_s_w },
4405     { 0x38018a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_t_d_w },
4406     { 0x3801a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_t_s_dw },
4407     { 0x3801aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_dw },
4408     /* uint/float */
4409     { 0x38028200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_uw_s },
4410     { 0x38028a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_udw_s },
4411     { 0x3802a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_uw_d },
4412     { 0x3802aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_udw_d },
4413     /* float/uint */
4414     { 0x38030200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_s_uw },
4415     { 0x38030a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_uw },
4416     { 0x38032200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_s_udw },
4417     { 0x38032a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_udw },
4418     /* float/uint truncate */
4419     { 0x38038200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_t_s_uw },
4420     { 0x38038a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_t_d_uw },
4421     { 0x3803a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_t_s_udw },
4422     { 0x3803aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_udw },
4423 
4424     /* floating point class two */
4425     { 0x38000400, 0xfc000f60, trans_fcmp_s_0e },
4426     { 0x38000c00, 0xfc001fe0, trans_fcmp_d },
4427 };
4428 
4429 #undef FOP_WEW
4430 #undef FOP_DEW
4431 #undef FOP_WED
4432 #undef FOP_WEWW
4433 #undef FOP_DED
4434 #undef FOP_DEDD
4435 
4436 /* Convert the fmpyadd single-precision register encodings to standard.  */
4437 static inline int fmpyadd_s_reg(unsigned r)
4438 {
4439     return (r & 16) * 2 + 16 + (r & 15);
4440 }
4441 
4442 static DisasJumpType trans_fmpyadd(DisasContext *ctx,
4443                                    uint32_t insn, bool is_sub)
4444 {
4445     unsigned tm = extract32(insn, 0, 5);
4446     unsigned f = extract32(insn, 5, 1);
4447     unsigned ra = extract32(insn, 6, 5);
4448     unsigned ta = extract32(insn, 11, 5);
4449     unsigned rm2 = extract32(insn, 16, 5);
4450     unsigned rm1 = extract32(insn, 21, 5);
4451 
4452     nullify_over(ctx);
4453 
4454     /* Independent multiply & add/sub, with undefined behaviour
4455        if outputs overlap inputs.  */
4456     if (f == 0) {
4457         tm = fmpyadd_s_reg(tm);
4458         ra = fmpyadd_s_reg(ra);
4459         ta = fmpyadd_s_reg(ta);
4460         rm2 = fmpyadd_s_reg(rm2);
4461         rm1 = fmpyadd_s_reg(rm1);
4462         do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
4463         do_fop_weww(ctx, ta, ta, ra,
4464                     is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
4465     } else {
4466         do_fop_dedd(ctx, tm, rm1, rm2, gen_helper_fmpy_d);
4467         do_fop_dedd(ctx, ta, ta, ra,
4468                     is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
4469     }
4470 
4471     return nullify_end(ctx, DISAS_NEXT);
4472 }
4473 
4474 static DisasJumpType trans_fmpyfadd_s(DisasContext *ctx, uint32_t insn,
4475                                       const DisasInsn *di)
4476 {
4477     unsigned rt = assemble_rt64(insn);
4478     unsigned neg = extract32(insn, 5, 1);
4479     unsigned rm1 = assemble_ra64(insn);
4480     unsigned rm2 = assemble_rb64(insn);
4481     unsigned ra3 = assemble_rc64(insn);
4482     TCGv_i32 a, b, c;
4483 
4484     nullify_over(ctx);
4485     a = load_frw0_i32(rm1);
4486     b = load_frw0_i32(rm2);
4487     c = load_frw0_i32(ra3);
4488 
4489     if (neg) {
4490         gen_helper_fmpynfadd_s(a, cpu_env, a, b, c);
4491     } else {
4492         gen_helper_fmpyfadd_s(a, cpu_env, a, b, c);
4493     }
4494 
4495     tcg_temp_free_i32(b);
4496     tcg_temp_free_i32(c);
4497     save_frw_i32(rt, a);
4498     tcg_temp_free_i32(a);
4499     return nullify_end(ctx, DISAS_NEXT);
4500 }
4501 
4502 static DisasJumpType trans_fmpyfadd_d(DisasContext *ctx, uint32_t insn,
4503                                       const DisasInsn *di)
4504 {
4505     unsigned rt = extract32(insn, 0, 5);
4506     unsigned neg = extract32(insn, 5, 1);
4507     unsigned rm1 = extract32(insn, 21, 5);
4508     unsigned rm2 = extract32(insn, 16, 5);
4509     unsigned ra3 = assemble_rc64(insn);
4510     TCGv_i64 a, b, c;
4511 
4512     nullify_over(ctx);
4513     a = load_frd0(rm1);
4514     b = load_frd0(rm2);
4515     c = load_frd0(ra3);
4516 
4517     if (neg) {
4518         gen_helper_fmpynfadd_d(a, cpu_env, a, b, c);
4519     } else {
4520         gen_helper_fmpyfadd_d(a, cpu_env, a, b, c);
4521     }
4522 
4523     tcg_temp_free_i64(b);
4524     tcg_temp_free_i64(c);
4525     save_frd(rt, a);
4526     tcg_temp_free_i64(a);
4527     return nullify_end(ctx, DISAS_NEXT);
4528 }
4529 
4530 static const DisasInsn table_fp_fused[] = {
4531     { 0xb8000000u, 0xfc000800u, trans_fmpyfadd_s },
4532     { 0xb8000800u, 0xfc0019c0u, trans_fmpyfadd_d }
4533 };
4534 
4535 static DisasJumpType translate_table_int(DisasContext *ctx, uint32_t insn,
4536                                          const DisasInsn table[], size_t n)
4537 {
4538     size_t i;
4539     for (i = 0; i < n; ++i) {
4540         if ((insn & table[i].mask) == table[i].insn) {
4541             return table[i].trans(ctx, insn, &table[i]);
4542         }
4543     }
4544     qemu_log_mask(LOG_UNIMP, "UNIMP insn %08x @ " TARGET_FMT_lx "\n",
4545                   insn, ctx->base.pc_next);
4546     return gen_illegal(ctx);
4547 }
4548 
4549 #define translate_table(ctx, insn, table) \
4550     translate_table_int(ctx, insn, table, ARRAY_SIZE(table))
4551 
4552 static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
4553 {
4554     uint32_t opc = extract32(insn, 26, 6);
4555 
4556     switch (opc) {
4557     case 0x00: /* system op */
4558         return translate_table(ctx, insn, table_system);
4559     case 0x01:
4560         return translate_table(ctx, insn, table_mem_mgmt);
4561     case 0x02:
4562         return translate_table(ctx, insn, table_arith_log);
4563     case 0x03:
4564         return translate_table(ctx, insn, table_index_mem);
4565     case 0x06:
4566         return trans_fmpyadd(ctx, insn, false);
4567     case 0x08:
4568         return trans_ldil(ctx, insn);
4569     case 0x09:
4570         return trans_copr_w(ctx, insn);
4571     case 0x0A:
4572         return trans_addil(ctx, insn);
4573     case 0x0B:
4574         return trans_copr_dw(ctx, insn);
4575     case 0x0C:
4576         return translate_table(ctx, insn, table_float_0c);
4577     case 0x0D:
4578         return trans_ldo(ctx, insn);
4579     case 0x0E:
4580         return translate_table(ctx, insn, table_float_0e);
4581 
4582     case 0x10:
4583         return trans_load(ctx, insn, false, MO_UB);
4584     case 0x11:
4585         return trans_load(ctx, insn, false, MO_TEUW);
4586     case 0x12:
4587         return trans_load(ctx, insn, false, MO_TEUL);
4588     case 0x13:
4589         return trans_load(ctx, insn, true, MO_TEUL);
4590     case 0x16:
4591         return trans_fload_mod(ctx, insn);
4592     case 0x17:
4593         return trans_load_w(ctx, insn);
4594     case 0x18:
4595         return trans_store(ctx, insn, false, MO_UB);
4596     case 0x19:
4597         return trans_store(ctx, insn, false, MO_TEUW);
4598     case 0x1A:
4599         return trans_store(ctx, insn, false, MO_TEUL);
4600     case 0x1B:
4601         return trans_store(ctx, insn, true, MO_TEUL);
4602     case 0x1E:
4603         return trans_fstore_mod(ctx, insn);
4604     case 0x1F:
4605         return trans_store_w(ctx, insn);
4606 
4607     case 0x20:
4608         return trans_cmpb(ctx, insn, true, false, false);
4609     case 0x21:
4610         return trans_cmpb(ctx, insn, true, true, false);
4611     case 0x22:
4612         return trans_cmpb(ctx, insn, false, false, false);
4613     case 0x23:
4614         return trans_cmpb(ctx, insn, false, true, false);
4615     case 0x24:
4616         return trans_cmpiclr(ctx, insn);
4617     case 0x25:
4618         return trans_subi(ctx, insn);
4619     case 0x26:
4620         return trans_fmpyadd(ctx, insn, true);
4621     case 0x27:
4622         return trans_cmpb(ctx, insn, true, false, true);
4623     case 0x28:
4624         return trans_addb(ctx, insn, true, false);
4625     case 0x29:
4626         return trans_addb(ctx, insn, true, true);
4627     case 0x2A:
4628         return trans_addb(ctx, insn, false, false);
4629     case 0x2B:
4630         return trans_addb(ctx, insn, false, true);
4631     case 0x2C:
4632     case 0x2D:
4633         return trans_addi(ctx, insn);
4634     case 0x2E:
4635         return translate_table(ctx, insn, table_fp_fused);
4636     case 0x2F:
4637         return trans_cmpb(ctx, insn, false, false, true);
4638 
4639     case 0x30:
4640     case 0x31:
4641         return trans_bb(ctx, insn);
4642     case 0x32:
4643         return trans_movb(ctx, insn, false);
4644     case 0x33:
4645         return trans_movb(ctx, insn, true);
4646     case 0x34:
4647         return translate_table(ctx, insn, table_sh_ex);
4648     case 0x35:
4649         return translate_table(ctx, insn, table_depw);
4650     case 0x38:
4651         return trans_be(ctx, insn, false);
4652     case 0x39:
4653         return trans_be(ctx, insn, true);
4654     case 0x3A:
4655         return translate_table(ctx, insn, table_branch);
4656 
4657     case 0x04: /* spopn */
4658     case 0x05: /* diag */
4659     case 0x0F: /* product specific */
4660         break;
4661 
4662     case 0x07: /* unassigned */
4663     case 0x15: /* unassigned */
4664     case 0x1D: /* unassigned */
4665     case 0x37: /* unassigned */
4666         break;
4667     case 0x3F:
4668 #ifndef CONFIG_USER_ONLY
4669         /* Unassigned, but use as system-halt.  */
4670         if (insn == 0xfffdead0) {
4671             return gen_hlt(ctx, 0); /* halt system */
4672         }
4673         if (insn == 0xfffdead1) {
4674             return gen_hlt(ctx, 1); /* reset system */
4675         }
4676 #endif
4677         break;
4678     default:
4679         break;
4680     }
4681     return gen_illegal(ctx);
4682 }
4683 
4684 static int hppa_tr_init_disas_context(DisasContextBase *dcbase,
4685                                       CPUState *cs, int max_insns)
4686 {
4687     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4688     int bound;
4689 
4690     ctx->cs = cs;
4691     ctx->tb_flags = ctx->base.tb->flags;
4692 
4693 #ifdef CONFIG_USER_ONLY
4694     ctx->privilege = MMU_USER_IDX;
4695     ctx->mmu_idx = MMU_USER_IDX;
4696     ctx->iaoq_f = ctx->base.pc_first | MMU_USER_IDX;
4697     ctx->iaoq_b = ctx->base.tb->cs_base | MMU_USER_IDX;
4698 #else
4699     ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
4700     ctx->mmu_idx = (ctx->tb_flags & PSW_D ? ctx->privilege : MMU_PHYS_IDX);
4701 
4702     /* Recover the IAOQ values from the GVA + PRIV.  */
4703     uint64_t cs_base = ctx->base.tb->cs_base;
4704     uint64_t iasq_f = cs_base & ~0xffffffffull;
4705     int32_t diff = cs_base;
4706 
4707     ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege;
4708     ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1);
4709 #endif
4710     ctx->iaoq_n = -1;
4711     ctx->iaoq_n_var = NULL;
4712 
4713     /* Bound the number of instructions by those left on the page.  */
4714     bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
4715     bound = MIN(max_insns, bound);
4716 
4717     ctx->ntempr = 0;
4718     ctx->ntempl = 0;
4719     memset(ctx->tempr, 0, sizeof(ctx->tempr));
4720     memset(ctx->templ, 0, sizeof(ctx->templ));
4721 
4722     return bound;
4723 }
4724 
4725 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
4726 {
4727     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4728 
4729     /* Seed the nullification status from PSW[N], as saved in TB->FLAGS.  */
4730     ctx->null_cond = cond_make_f();
4731     ctx->psw_n_nonzero = false;
4732     if (ctx->tb_flags & PSW_N) {
4733         ctx->null_cond.c = TCG_COND_ALWAYS;
4734         ctx->psw_n_nonzero = true;
4735     }
4736     ctx->null_lab = NULL;
4737 }
4738 
4739 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4740 {
4741     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4742 
4743     tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b);
4744 }
4745 
4746 static bool hppa_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
4747                                       const CPUBreakpoint *bp)
4748 {
4749     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4750 
4751     ctx->base.is_jmp = gen_excp(ctx, EXCP_DEBUG);
4752     ctx->base.pc_next += 4;
4753     return true;
4754 }
4755 
4756 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4757 {
4758     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4759     CPUHPPAState *env = cs->env_ptr;
4760     DisasJumpType ret;
4761     int i, n;
4762 
4763     /* Execute one insn.  */
4764 #ifdef CONFIG_USER_ONLY
4765     if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
4766         ret = do_page_zero(ctx);
4767         assert(ret != DISAS_NEXT);
4768     } else
4769 #endif
4770     {
4771         /* Always fetch the insn, even if nullified, so that we check
4772            the page permissions for execute.  */
4773         uint32_t insn = cpu_ldl_code(env, ctx->base.pc_next);
4774 
4775         /* Set up the IA queue for the next insn.
4776            This will be overwritten by a branch.  */
4777         if (ctx->iaoq_b == -1) {
4778             ctx->iaoq_n = -1;
4779             ctx->iaoq_n_var = get_temp(ctx);
4780             tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
4781         } else {
4782             ctx->iaoq_n = ctx->iaoq_b + 4;
4783             ctx->iaoq_n_var = NULL;
4784         }
4785 
4786         if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
4787             ctx->null_cond.c = TCG_COND_NEVER;
4788             ret = DISAS_NEXT;
4789         } else {
4790             ctx->insn = insn;
4791             ret = translate_one(ctx, insn);
4792             assert(ctx->null_lab == NULL);
4793         }
4794     }
4795 
4796     /* Free any temporaries allocated.  */
4797     for (i = 0, n = ctx->ntempr; i < n; ++i) {
4798         tcg_temp_free(ctx->tempr[i]);
4799         ctx->tempr[i] = NULL;
4800     }
4801     for (i = 0, n = ctx->ntempl; i < n; ++i) {
4802         tcg_temp_free_tl(ctx->templ[i]);
4803         ctx->templ[i] = NULL;
4804     }
4805     ctx->ntempr = 0;
4806     ctx->ntempl = 0;
4807 
4808     /* Advance the insn queue.  Note that this check also detects
4809        a priority change within the instruction queue.  */
4810     if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
4811         if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1
4812             && use_goto_tb(ctx, ctx->iaoq_b)
4813             && (ctx->null_cond.c == TCG_COND_NEVER
4814                 || ctx->null_cond.c == TCG_COND_ALWAYS)) {
4815             nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
4816             gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
4817             ret = DISAS_NORETURN;
4818         } else {
4819             ret = DISAS_IAQ_N_STALE;
4820         }
4821     }
4822     ctx->iaoq_f = ctx->iaoq_b;
4823     ctx->iaoq_b = ctx->iaoq_n;
4824     ctx->base.is_jmp = ret;
4825     ctx->base.pc_next += 4;
4826 
4827     if (ret == DISAS_NORETURN || ret == DISAS_IAQ_N_UPDATED) {
4828         return;
4829     }
4830     if (ctx->iaoq_f == -1) {
4831         tcg_gen_mov_reg(cpu_iaoq_f, cpu_iaoq_b);
4832         copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
4833 #ifndef CONFIG_USER_ONLY
4834         tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
4835 #endif
4836         nullify_save(ctx);
4837         ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
4838     } else if (ctx->iaoq_b == -1) {
4839         tcg_gen_mov_reg(cpu_iaoq_b, ctx->iaoq_n_var);
4840     }
4841 }
4842 
4843 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4844 {
4845     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4846     DisasJumpType is_jmp = ctx->base.is_jmp;
4847 
4848     switch (is_jmp) {
4849     case DISAS_NORETURN:
4850         break;
4851     case DISAS_TOO_MANY:
4852     case DISAS_IAQ_N_STALE:
4853     case DISAS_IAQ_N_STALE_EXIT:
4854         copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
4855         copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
4856         nullify_save(ctx);
4857         /* FALLTHRU */
4858     case DISAS_IAQ_N_UPDATED:
4859         if (ctx->base.singlestep_enabled) {
4860             gen_excp_1(EXCP_DEBUG);
4861         } else if (is_jmp == DISAS_IAQ_N_STALE_EXIT) {
4862             tcg_gen_exit_tb(0);
4863         } else {
4864             tcg_gen_lookup_and_goto_ptr();
4865         }
4866         break;
4867     default:
4868         g_assert_not_reached();
4869     }
4870 }
4871 
4872 static void hppa_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
4873 {
4874     target_ulong pc = dcbase->pc_first;
4875 
4876 #ifdef CONFIG_USER_ONLY
4877     switch (pc) {
4878     case 0x00:
4879         qemu_log("IN:\n0x00000000:  (null)\n");
4880         return;
4881     case 0xb0:
4882         qemu_log("IN:\n0x000000b0:  light-weight-syscall\n");
4883         return;
4884     case 0xe0:
4885         qemu_log("IN:\n0x000000e0:  set-thread-pointer-syscall\n");
4886         return;
4887     case 0x100:
4888         qemu_log("IN:\n0x00000100:  syscall\n");
4889         return;
4890     }
4891 #endif
4892 
4893     qemu_log("IN: %s\n", lookup_symbol(pc));
4894     log_target_disas(cs, pc, dcbase->tb->size);
4895 }
4896 
4897 static const TranslatorOps hppa_tr_ops = {
4898     .init_disas_context = hppa_tr_init_disas_context,
4899     .tb_start           = hppa_tr_tb_start,
4900     .insn_start         = hppa_tr_insn_start,
4901     .breakpoint_check   = hppa_tr_breakpoint_check,
4902     .translate_insn     = hppa_tr_translate_insn,
4903     .tb_stop            = hppa_tr_tb_stop,
4904     .disas_log          = hppa_tr_disas_log,
4905 };
4906 
4907 void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
4908 
4909 {
4910     DisasContext ctx;
4911     translator_loop(&hppa_tr_ops, &ctx.base, cs, tb);
4912 }
4913 
4914 void restore_state_to_opc(CPUHPPAState *env, TranslationBlock *tb,
4915                           target_ulong *data)
4916 {
4917     env->iaoq_f = data[0];
4918     if (data[1] != (target_ureg)-1) {
4919         env->iaoq_b = data[1];
4920     }
4921     /* Since we were executing the instruction at IAOQ_F, and took some
4922        sort of action that provoked the cpu_restore_state, we can infer
4923        that the instruction was not nullified.  */
4924     env->psw_n = 0;
4925 }
4926