xref: /openbmc/qemu/target/hppa/translate.c (revision 073d9f2c)
1 /*
2  * HPPA emulation cpu translation for qemu.
3  *
4  * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
25 #include "tcg-op.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "trace-tcg.h"
31 #include "exec/log.h"
32 
33 /* Since we have a distinction between register size and address size,
34    we need to redefine all of these.  */
35 
36 #undef TCGv
37 #undef tcg_temp_new
38 #undef tcg_global_reg_new
39 #undef tcg_global_mem_new
40 #undef tcg_temp_local_new
41 #undef tcg_temp_free
42 
43 #if TARGET_LONG_BITS == 64
44 #define TCGv_tl              TCGv_i64
45 #define tcg_temp_new_tl      tcg_temp_new_i64
46 #define tcg_temp_free_tl     tcg_temp_free_i64
47 #if TARGET_REGISTER_BITS == 64
48 #define tcg_gen_extu_reg_tl  tcg_gen_mov_i64
49 #else
50 #define tcg_gen_extu_reg_tl  tcg_gen_extu_i32_i64
51 #endif
52 #else
53 #define TCGv_tl              TCGv_i32
54 #define tcg_temp_new_tl      tcg_temp_new_i32
55 #define tcg_temp_free_tl     tcg_temp_free_i32
56 #define tcg_gen_extu_reg_tl  tcg_gen_mov_i32
57 #endif
58 
59 #if TARGET_REGISTER_BITS == 64
60 #define TCGv_reg             TCGv_i64
61 
62 #define tcg_temp_new         tcg_temp_new_i64
63 #define tcg_global_reg_new   tcg_global_reg_new_i64
64 #define tcg_global_mem_new   tcg_global_mem_new_i64
65 #define tcg_temp_local_new   tcg_temp_local_new_i64
66 #define tcg_temp_free        tcg_temp_free_i64
67 
68 #define tcg_gen_movi_reg     tcg_gen_movi_i64
69 #define tcg_gen_mov_reg      tcg_gen_mov_i64
70 #define tcg_gen_ld8u_reg     tcg_gen_ld8u_i64
71 #define tcg_gen_ld8s_reg     tcg_gen_ld8s_i64
72 #define tcg_gen_ld16u_reg    tcg_gen_ld16u_i64
73 #define tcg_gen_ld16s_reg    tcg_gen_ld16s_i64
74 #define tcg_gen_ld32u_reg    tcg_gen_ld32u_i64
75 #define tcg_gen_ld32s_reg    tcg_gen_ld32s_i64
76 #define tcg_gen_ld_reg       tcg_gen_ld_i64
77 #define tcg_gen_st8_reg      tcg_gen_st8_i64
78 #define tcg_gen_st16_reg     tcg_gen_st16_i64
79 #define tcg_gen_st32_reg     tcg_gen_st32_i64
80 #define tcg_gen_st_reg       tcg_gen_st_i64
81 #define tcg_gen_add_reg      tcg_gen_add_i64
82 #define tcg_gen_addi_reg     tcg_gen_addi_i64
83 #define tcg_gen_sub_reg      tcg_gen_sub_i64
84 #define tcg_gen_neg_reg      tcg_gen_neg_i64
85 #define tcg_gen_subfi_reg    tcg_gen_subfi_i64
86 #define tcg_gen_subi_reg     tcg_gen_subi_i64
87 #define tcg_gen_and_reg      tcg_gen_and_i64
88 #define tcg_gen_andi_reg     tcg_gen_andi_i64
89 #define tcg_gen_or_reg       tcg_gen_or_i64
90 #define tcg_gen_ori_reg      tcg_gen_ori_i64
91 #define tcg_gen_xor_reg      tcg_gen_xor_i64
92 #define tcg_gen_xori_reg     tcg_gen_xori_i64
93 #define tcg_gen_not_reg      tcg_gen_not_i64
94 #define tcg_gen_shl_reg      tcg_gen_shl_i64
95 #define tcg_gen_shli_reg     tcg_gen_shli_i64
96 #define tcg_gen_shr_reg      tcg_gen_shr_i64
97 #define tcg_gen_shri_reg     tcg_gen_shri_i64
98 #define tcg_gen_sar_reg      tcg_gen_sar_i64
99 #define tcg_gen_sari_reg     tcg_gen_sari_i64
100 #define tcg_gen_brcond_reg   tcg_gen_brcond_i64
101 #define tcg_gen_brcondi_reg  tcg_gen_brcondi_i64
102 #define tcg_gen_setcond_reg  tcg_gen_setcond_i64
103 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i64
104 #define tcg_gen_mul_reg      tcg_gen_mul_i64
105 #define tcg_gen_muli_reg     tcg_gen_muli_i64
106 #define tcg_gen_div_reg      tcg_gen_div_i64
107 #define tcg_gen_rem_reg      tcg_gen_rem_i64
108 #define tcg_gen_divu_reg     tcg_gen_divu_i64
109 #define tcg_gen_remu_reg     tcg_gen_remu_i64
110 #define tcg_gen_discard_reg  tcg_gen_discard_i64
111 #define tcg_gen_trunc_reg_i32 tcg_gen_extrl_i64_i32
112 #define tcg_gen_trunc_i64_reg tcg_gen_mov_i64
113 #define tcg_gen_extu_i32_reg tcg_gen_extu_i32_i64
114 #define tcg_gen_ext_i32_reg  tcg_gen_ext_i32_i64
115 #define tcg_gen_extu_reg_i64 tcg_gen_mov_i64
116 #define tcg_gen_ext_reg_i64  tcg_gen_mov_i64
117 #define tcg_gen_ext8u_reg    tcg_gen_ext8u_i64
118 #define tcg_gen_ext8s_reg    tcg_gen_ext8s_i64
119 #define tcg_gen_ext16u_reg   tcg_gen_ext16u_i64
120 #define tcg_gen_ext16s_reg   tcg_gen_ext16s_i64
121 #define tcg_gen_ext32u_reg   tcg_gen_ext32u_i64
122 #define tcg_gen_ext32s_reg   tcg_gen_ext32s_i64
123 #define tcg_gen_bswap16_reg  tcg_gen_bswap16_i64
124 #define tcg_gen_bswap32_reg  tcg_gen_bswap32_i64
125 #define tcg_gen_bswap64_reg  tcg_gen_bswap64_i64
126 #define tcg_gen_concat_reg_i64 tcg_gen_concat32_i64
127 #define tcg_gen_andc_reg     tcg_gen_andc_i64
128 #define tcg_gen_eqv_reg      tcg_gen_eqv_i64
129 #define tcg_gen_nand_reg     tcg_gen_nand_i64
130 #define tcg_gen_nor_reg      tcg_gen_nor_i64
131 #define tcg_gen_orc_reg      tcg_gen_orc_i64
132 #define tcg_gen_clz_reg      tcg_gen_clz_i64
133 #define tcg_gen_ctz_reg      tcg_gen_ctz_i64
134 #define tcg_gen_clzi_reg     tcg_gen_clzi_i64
135 #define tcg_gen_ctzi_reg     tcg_gen_ctzi_i64
136 #define tcg_gen_clrsb_reg    tcg_gen_clrsb_i64
137 #define tcg_gen_ctpop_reg    tcg_gen_ctpop_i64
138 #define tcg_gen_rotl_reg     tcg_gen_rotl_i64
139 #define tcg_gen_rotli_reg    tcg_gen_rotli_i64
140 #define tcg_gen_rotr_reg     tcg_gen_rotr_i64
141 #define tcg_gen_rotri_reg    tcg_gen_rotri_i64
142 #define tcg_gen_deposit_reg  tcg_gen_deposit_i64
143 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i64
144 #define tcg_gen_extract_reg  tcg_gen_extract_i64
145 #define tcg_gen_sextract_reg tcg_gen_sextract_i64
146 #define tcg_const_reg        tcg_const_i64
147 #define tcg_const_local_reg  tcg_const_local_i64
148 #define tcg_gen_movcond_reg  tcg_gen_movcond_i64
149 #define tcg_gen_add2_reg     tcg_gen_add2_i64
150 #define tcg_gen_sub2_reg     tcg_gen_sub2_i64
151 #define tcg_gen_qemu_ld_reg  tcg_gen_qemu_ld_i64
152 #define tcg_gen_qemu_st_reg  tcg_gen_qemu_st_i64
153 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i64
154 #define tcg_gen_trunc_reg_ptr   tcg_gen_trunc_i64_ptr
155 #else
156 #define TCGv_reg             TCGv_i32
157 #define tcg_temp_new         tcg_temp_new_i32
158 #define tcg_global_reg_new   tcg_global_reg_new_i32
159 #define tcg_global_mem_new   tcg_global_mem_new_i32
160 #define tcg_temp_local_new   tcg_temp_local_new_i32
161 #define tcg_temp_free        tcg_temp_free_i32
162 
163 #define tcg_gen_movi_reg     tcg_gen_movi_i32
164 #define tcg_gen_mov_reg      tcg_gen_mov_i32
165 #define tcg_gen_ld8u_reg     tcg_gen_ld8u_i32
166 #define tcg_gen_ld8s_reg     tcg_gen_ld8s_i32
167 #define tcg_gen_ld16u_reg    tcg_gen_ld16u_i32
168 #define tcg_gen_ld16s_reg    tcg_gen_ld16s_i32
169 #define tcg_gen_ld32u_reg    tcg_gen_ld_i32
170 #define tcg_gen_ld32s_reg    tcg_gen_ld_i32
171 #define tcg_gen_ld_reg       tcg_gen_ld_i32
172 #define tcg_gen_st8_reg      tcg_gen_st8_i32
173 #define tcg_gen_st16_reg     tcg_gen_st16_i32
174 #define tcg_gen_st32_reg     tcg_gen_st32_i32
175 #define tcg_gen_st_reg       tcg_gen_st_i32
176 #define tcg_gen_add_reg      tcg_gen_add_i32
177 #define tcg_gen_addi_reg     tcg_gen_addi_i32
178 #define tcg_gen_sub_reg      tcg_gen_sub_i32
179 #define tcg_gen_neg_reg      tcg_gen_neg_i32
180 #define tcg_gen_subfi_reg    tcg_gen_subfi_i32
181 #define tcg_gen_subi_reg     tcg_gen_subi_i32
182 #define tcg_gen_and_reg      tcg_gen_and_i32
183 #define tcg_gen_andi_reg     tcg_gen_andi_i32
184 #define tcg_gen_or_reg       tcg_gen_or_i32
185 #define tcg_gen_ori_reg      tcg_gen_ori_i32
186 #define tcg_gen_xor_reg      tcg_gen_xor_i32
187 #define tcg_gen_xori_reg     tcg_gen_xori_i32
188 #define tcg_gen_not_reg      tcg_gen_not_i32
189 #define tcg_gen_shl_reg      tcg_gen_shl_i32
190 #define tcg_gen_shli_reg     tcg_gen_shli_i32
191 #define tcg_gen_shr_reg      tcg_gen_shr_i32
192 #define tcg_gen_shri_reg     tcg_gen_shri_i32
193 #define tcg_gen_sar_reg      tcg_gen_sar_i32
194 #define tcg_gen_sari_reg     tcg_gen_sari_i32
195 #define tcg_gen_brcond_reg   tcg_gen_brcond_i32
196 #define tcg_gen_brcondi_reg  tcg_gen_brcondi_i32
197 #define tcg_gen_setcond_reg  tcg_gen_setcond_i32
198 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i32
199 #define tcg_gen_mul_reg      tcg_gen_mul_i32
200 #define tcg_gen_muli_reg     tcg_gen_muli_i32
201 #define tcg_gen_div_reg      tcg_gen_div_i32
202 #define tcg_gen_rem_reg      tcg_gen_rem_i32
203 #define tcg_gen_divu_reg     tcg_gen_divu_i32
204 #define tcg_gen_remu_reg     tcg_gen_remu_i32
205 #define tcg_gen_discard_reg  tcg_gen_discard_i32
206 #define tcg_gen_trunc_reg_i32 tcg_gen_mov_i32
207 #define tcg_gen_trunc_i64_reg tcg_gen_extrl_i64_i32
208 #define tcg_gen_extu_i32_reg tcg_gen_mov_i32
209 #define tcg_gen_ext_i32_reg  tcg_gen_mov_i32
210 #define tcg_gen_extu_reg_i64 tcg_gen_extu_i32_i64
211 #define tcg_gen_ext_reg_i64  tcg_gen_ext_i32_i64
212 #define tcg_gen_ext8u_reg    tcg_gen_ext8u_i32
213 #define tcg_gen_ext8s_reg    tcg_gen_ext8s_i32
214 #define tcg_gen_ext16u_reg   tcg_gen_ext16u_i32
215 #define tcg_gen_ext16s_reg   tcg_gen_ext16s_i32
216 #define tcg_gen_ext32u_reg   tcg_gen_mov_i32
217 #define tcg_gen_ext32s_reg   tcg_gen_mov_i32
218 #define tcg_gen_bswap16_reg  tcg_gen_bswap16_i32
219 #define tcg_gen_bswap32_reg  tcg_gen_bswap32_i32
220 #define tcg_gen_concat_reg_i64 tcg_gen_concat_i32_i64
221 #define tcg_gen_andc_reg     tcg_gen_andc_i32
222 #define tcg_gen_eqv_reg      tcg_gen_eqv_i32
223 #define tcg_gen_nand_reg     tcg_gen_nand_i32
224 #define tcg_gen_nor_reg      tcg_gen_nor_i32
225 #define tcg_gen_orc_reg      tcg_gen_orc_i32
226 #define tcg_gen_clz_reg      tcg_gen_clz_i32
227 #define tcg_gen_ctz_reg      tcg_gen_ctz_i32
228 #define tcg_gen_clzi_reg     tcg_gen_clzi_i32
229 #define tcg_gen_ctzi_reg     tcg_gen_ctzi_i32
230 #define tcg_gen_clrsb_reg    tcg_gen_clrsb_i32
231 #define tcg_gen_ctpop_reg    tcg_gen_ctpop_i32
232 #define tcg_gen_rotl_reg     tcg_gen_rotl_i32
233 #define tcg_gen_rotli_reg    tcg_gen_rotli_i32
234 #define tcg_gen_rotr_reg     tcg_gen_rotr_i32
235 #define tcg_gen_rotri_reg    tcg_gen_rotri_i32
236 #define tcg_gen_deposit_reg  tcg_gen_deposit_i32
237 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i32
238 #define tcg_gen_extract_reg  tcg_gen_extract_i32
239 #define tcg_gen_sextract_reg tcg_gen_sextract_i32
240 #define tcg_const_reg        tcg_const_i32
241 #define tcg_const_local_reg  tcg_const_local_i32
242 #define tcg_gen_movcond_reg  tcg_gen_movcond_i32
243 #define tcg_gen_add2_reg     tcg_gen_add2_i32
244 #define tcg_gen_sub2_reg     tcg_gen_sub2_i32
245 #define tcg_gen_qemu_ld_reg  tcg_gen_qemu_ld_i32
246 #define tcg_gen_qemu_st_reg  tcg_gen_qemu_st_i32
247 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i32
248 #define tcg_gen_trunc_reg_ptr   tcg_gen_ext_i32_ptr
249 #endif /* TARGET_REGISTER_BITS */
250 
251 typedef struct DisasCond {
252     TCGCond c;
253     TCGv_reg a0, a1;
254     bool a0_is_n;
255     bool a1_is_0;
256 } DisasCond;
257 
258 typedef struct DisasContext {
259     DisasContextBase base;
260     CPUState *cs;
261 
262     target_ureg iaoq_f;
263     target_ureg iaoq_b;
264     target_ureg iaoq_n;
265     TCGv_reg iaoq_n_var;
266 
267     int ntempr, ntempl;
268     TCGv_reg tempr[8];
269     TCGv_tl  templ[4];
270 
271     DisasCond null_cond;
272     TCGLabel *null_lab;
273 
274     uint32_t insn;
275     uint32_t tb_flags;
276     int mmu_idx;
277     int privilege;
278     bool psw_n_nonzero;
279 } DisasContext;
280 
281 /* Target-specific return values from translate_one, indicating the
282    state of the TB.  Note that DISAS_NEXT indicates that we are not
283    exiting the TB.  */
284 
285 /* We are not using a goto_tb (for whatever reason), but have updated
286    the iaq (for whatever reason), so don't do it again on exit.  */
287 #define DISAS_IAQ_N_UPDATED  DISAS_TARGET_0
288 
289 /* We are exiting the TB, but have neither emitted a goto_tb, nor
290    updated the iaq for the next instruction to be executed.  */
291 #define DISAS_IAQ_N_STALE    DISAS_TARGET_1
292 
293 /* Similarly, but we want to return to the main loop immediately
294    to recognize unmasked interrupts.  */
295 #define DISAS_IAQ_N_STALE_EXIT      DISAS_TARGET_2
296 
297 typedef struct DisasInsn {
298     uint32_t insn, mask;
299     DisasJumpType (*trans)(DisasContext *ctx, uint32_t insn,
300                            const struct DisasInsn *f);
301     union {
302         void (*ttt)(TCGv_reg, TCGv_reg, TCGv_reg);
303         void (*weww)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32);
304         void (*dedd)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64);
305         void (*wew)(TCGv_i32, TCGv_env, TCGv_i32);
306         void (*ded)(TCGv_i64, TCGv_env, TCGv_i64);
307         void (*wed)(TCGv_i32, TCGv_env, TCGv_i64);
308         void (*dew)(TCGv_i64, TCGv_env, TCGv_i32);
309     } f;
310 } DisasInsn;
311 
312 /* global register indexes */
313 static TCGv_reg cpu_gr[32];
314 static TCGv_i64 cpu_sr[4];
315 static TCGv_i64 cpu_srH;
316 static TCGv_reg cpu_iaoq_f;
317 static TCGv_reg cpu_iaoq_b;
318 static TCGv_i64 cpu_iasq_f;
319 static TCGv_i64 cpu_iasq_b;
320 static TCGv_reg cpu_sar;
321 static TCGv_reg cpu_psw_n;
322 static TCGv_reg cpu_psw_v;
323 static TCGv_reg cpu_psw_cb;
324 static TCGv_reg cpu_psw_cb_msb;
325 
326 #include "exec/gen-icount.h"
327 
328 void hppa_translate_init(void)
329 {
330 #define DEF_VAR(V)  { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
331 
332     typedef struct { TCGv_reg *var; const char *name; int ofs; } GlobalVar;
333     static const GlobalVar vars[] = {
334         { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
335         DEF_VAR(psw_n),
336         DEF_VAR(psw_v),
337         DEF_VAR(psw_cb),
338         DEF_VAR(psw_cb_msb),
339         DEF_VAR(iaoq_f),
340         DEF_VAR(iaoq_b),
341     };
342 
343 #undef DEF_VAR
344 
345     /* Use the symbolic register names that match the disassembler.  */
346     static const char gr_names[32][4] = {
347         "r0",  "r1",  "r2",  "r3",  "r4",  "r5",  "r6",  "r7",
348         "r8",  "r9",  "r10", "r11", "r12", "r13", "r14", "r15",
349         "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
350         "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
351     };
352     /* SR[4-7] are not global registers so that we can index them.  */
353     static const char sr_names[5][4] = {
354         "sr0", "sr1", "sr2", "sr3", "srH"
355     };
356 
357     int i;
358 
359     cpu_gr[0] = NULL;
360     for (i = 1; i < 32; i++) {
361         cpu_gr[i] = tcg_global_mem_new(cpu_env,
362                                        offsetof(CPUHPPAState, gr[i]),
363                                        gr_names[i]);
364     }
365     for (i = 0; i < 4; i++) {
366         cpu_sr[i] = tcg_global_mem_new_i64(cpu_env,
367                                            offsetof(CPUHPPAState, sr[i]),
368                                            sr_names[i]);
369     }
370     cpu_srH = tcg_global_mem_new_i64(cpu_env,
371                                      offsetof(CPUHPPAState, sr[4]),
372                                      sr_names[4]);
373 
374     for (i = 0; i < ARRAY_SIZE(vars); ++i) {
375         const GlobalVar *v = &vars[i];
376         *v->var = tcg_global_mem_new(cpu_env, v->ofs, v->name);
377     }
378 
379     cpu_iasq_f = tcg_global_mem_new_i64(cpu_env,
380                                         offsetof(CPUHPPAState, iasq_f),
381                                         "iasq_f");
382     cpu_iasq_b = tcg_global_mem_new_i64(cpu_env,
383                                         offsetof(CPUHPPAState, iasq_b),
384                                         "iasq_b");
385 }
386 
387 static DisasCond cond_make_f(void)
388 {
389     return (DisasCond){
390         .c = TCG_COND_NEVER,
391         .a0 = NULL,
392         .a1 = NULL,
393     };
394 }
395 
396 static DisasCond cond_make_n(void)
397 {
398     return (DisasCond){
399         .c = TCG_COND_NE,
400         .a0 = cpu_psw_n,
401         .a0_is_n = true,
402         .a1 = NULL,
403         .a1_is_0 = true
404     };
405 }
406 
407 static DisasCond cond_make_0(TCGCond c, TCGv_reg a0)
408 {
409     DisasCond r = { .c = c, .a1 = NULL, .a1_is_0 = true };
410 
411     assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
412     r.a0 = tcg_temp_new();
413     tcg_gen_mov_reg(r.a0, a0);
414 
415     return r;
416 }
417 
418 static DisasCond cond_make(TCGCond c, TCGv_reg a0, TCGv_reg a1)
419 {
420     DisasCond r = { .c = c };
421 
422     assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
423     r.a0 = tcg_temp_new();
424     tcg_gen_mov_reg(r.a0, a0);
425     r.a1 = tcg_temp_new();
426     tcg_gen_mov_reg(r.a1, a1);
427 
428     return r;
429 }
430 
431 static void cond_prep(DisasCond *cond)
432 {
433     if (cond->a1_is_0) {
434         cond->a1_is_0 = false;
435         cond->a1 = tcg_const_reg(0);
436     }
437 }
438 
439 static void cond_free(DisasCond *cond)
440 {
441     switch (cond->c) {
442     default:
443         if (!cond->a0_is_n) {
444             tcg_temp_free(cond->a0);
445         }
446         if (!cond->a1_is_0) {
447             tcg_temp_free(cond->a1);
448         }
449         cond->a0_is_n = false;
450         cond->a1_is_0 = false;
451         cond->a0 = NULL;
452         cond->a1 = NULL;
453         /* fallthru */
454     case TCG_COND_ALWAYS:
455         cond->c = TCG_COND_NEVER;
456         break;
457     case TCG_COND_NEVER:
458         break;
459     }
460 }
461 
462 static TCGv_reg get_temp(DisasContext *ctx)
463 {
464     unsigned i = ctx->ntempr++;
465     g_assert(i < ARRAY_SIZE(ctx->tempr));
466     return ctx->tempr[i] = tcg_temp_new();
467 }
468 
469 #ifndef CONFIG_USER_ONLY
470 static TCGv_tl get_temp_tl(DisasContext *ctx)
471 {
472     unsigned i = ctx->ntempl++;
473     g_assert(i < ARRAY_SIZE(ctx->templ));
474     return ctx->templ[i] = tcg_temp_new_tl();
475 }
476 #endif
477 
478 static TCGv_reg load_const(DisasContext *ctx, target_sreg v)
479 {
480     TCGv_reg t = get_temp(ctx);
481     tcg_gen_movi_reg(t, v);
482     return t;
483 }
484 
485 static TCGv_reg load_gpr(DisasContext *ctx, unsigned reg)
486 {
487     if (reg == 0) {
488         TCGv_reg t = get_temp(ctx);
489         tcg_gen_movi_reg(t, 0);
490         return t;
491     } else {
492         return cpu_gr[reg];
493     }
494 }
495 
496 static TCGv_reg dest_gpr(DisasContext *ctx, unsigned reg)
497 {
498     if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
499         return get_temp(ctx);
500     } else {
501         return cpu_gr[reg];
502     }
503 }
504 
505 static void save_or_nullify(DisasContext *ctx, TCGv_reg dest, TCGv_reg t)
506 {
507     if (ctx->null_cond.c != TCG_COND_NEVER) {
508         cond_prep(&ctx->null_cond);
509         tcg_gen_movcond_reg(ctx->null_cond.c, dest, ctx->null_cond.a0,
510                            ctx->null_cond.a1, dest, t);
511     } else {
512         tcg_gen_mov_reg(dest, t);
513     }
514 }
515 
516 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_reg t)
517 {
518     if (reg != 0) {
519         save_or_nullify(ctx, cpu_gr[reg], t);
520     }
521 }
522 
523 #ifdef HOST_WORDS_BIGENDIAN
524 # define HI_OFS  0
525 # define LO_OFS  4
526 #else
527 # define HI_OFS  4
528 # define LO_OFS  0
529 #endif
530 
531 static TCGv_i32 load_frw_i32(unsigned rt)
532 {
533     TCGv_i32 ret = tcg_temp_new_i32();
534     tcg_gen_ld_i32(ret, cpu_env,
535                    offsetof(CPUHPPAState, fr[rt & 31])
536                    + (rt & 32 ? LO_OFS : HI_OFS));
537     return ret;
538 }
539 
540 static TCGv_i32 load_frw0_i32(unsigned rt)
541 {
542     if (rt == 0) {
543         return tcg_const_i32(0);
544     } else {
545         return load_frw_i32(rt);
546     }
547 }
548 
549 static TCGv_i64 load_frw0_i64(unsigned rt)
550 {
551     if (rt == 0) {
552         return tcg_const_i64(0);
553     } else {
554         TCGv_i64 ret = tcg_temp_new_i64();
555         tcg_gen_ld32u_i64(ret, cpu_env,
556                           offsetof(CPUHPPAState, fr[rt & 31])
557                           + (rt & 32 ? LO_OFS : HI_OFS));
558         return ret;
559     }
560 }
561 
562 static void save_frw_i32(unsigned rt, TCGv_i32 val)
563 {
564     tcg_gen_st_i32(val, cpu_env,
565                    offsetof(CPUHPPAState, fr[rt & 31])
566                    + (rt & 32 ? LO_OFS : HI_OFS));
567 }
568 
569 #undef HI_OFS
570 #undef LO_OFS
571 
572 static TCGv_i64 load_frd(unsigned rt)
573 {
574     TCGv_i64 ret = tcg_temp_new_i64();
575     tcg_gen_ld_i64(ret, cpu_env, offsetof(CPUHPPAState, fr[rt]));
576     return ret;
577 }
578 
579 static TCGv_i64 load_frd0(unsigned rt)
580 {
581     if (rt == 0) {
582         return tcg_const_i64(0);
583     } else {
584         return load_frd(rt);
585     }
586 }
587 
588 static void save_frd(unsigned rt, TCGv_i64 val)
589 {
590     tcg_gen_st_i64(val, cpu_env, offsetof(CPUHPPAState, fr[rt]));
591 }
592 
593 static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
594 {
595 #ifdef CONFIG_USER_ONLY
596     tcg_gen_movi_i64(dest, 0);
597 #else
598     if (reg < 4) {
599         tcg_gen_mov_i64(dest, cpu_sr[reg]);
600     } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
601         tcg_gen_mov_i64(dest, cpu_srH);
602     } else {
603         tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUHPPAState, sr[reg]));
604     }
605 #endif
606 }
607 
608 /* Skip over the implementation of an insn that has been nullified.
609    Use this when the insn is too complex for a conditional move.  */
610 static void nullify_over(DisasContext *ctx)
611 {
612     if (ctx->null_cond.c != TCG_COND_NEVER) {
613         /* The always condition should have been handled in the main loop.  */
614         assert(ctx->null_cond.c != TCG_COND_ALWAYS);
615 
616         ctx->null_lab = gen_new_label();
617         cond_prep(&ctx->null_cond);
618 
619         /* If we're using PSW[N], copy it to a temp because... */
620         if (ctx->null_cond.a0_is_n) {
621             ctx->null_cond.a0_is_n = false;
622             ctx->null_cond.a0 = tcg_temp_new();
623             tcg_gen_mov_reg(ctx->null_cond.a0, cpu_psw_n);
624         }
625         /* ... we clear it before branching over the implementation,
626            so that (1) it's clear after nullifying this insn and
627            (2) if this insn nullifies the next, PSW[N] is valid.  */
628         if (ctx->psw_n_nonzero) {
629             ctx->psw_n_nonzero = false;
630             tcg_gen_movi_reg(cpu_psw_n, 0);
631         }
632 
633         tcg_gen_brcond_reg(ctx->null_cond.c, ctx->null_cond.a0,
634                           ctx->null_cond.a1, ctx->null_lab);
635         cond_free(&ctx->null_cond);
636     }
637 }
638 
639 /* Save the current nullification state to PSW[N].  */
640 static void nullify_save(DisasContext *ctx)
641 {
642     if (ctx->null_cond.c == TCG_COND_NEVER) {
643         if (ctx->psw_n_nonzero) {
644             tcg_gen_movi_reg(cpu_psw_n, 0);
645         }
646         return;
647     }
648     if (!ctx->null_cond.a0_is_n) {
649         cond_prep(&ctx->null_cond);
650         tcg_gen_setcond_reg(ctx->null_cond.c, cpu_psw_n,
651                            ctx->null_cond.a0, ctx->null_cond.a1);
652         ctx->psw_n_nonzero = true;
653     }
654     cond_free(&ctx->null_cond);
655 }
656 
657 /* Set a PSW[N] to X.  The intention is that this is used immediately
658    before a goto_tb/exit_tb, so that there is no fallthru path to other
659    code within the TB.  Therefore we do not update psw_n_nonzero.  */
660 static void nullify_set(DisasContext *ctx, bool x)
661 {
662     if (ctx->psw_n_nonzero || x) {
663         tcg_gen_movi_reg(cpu_psw_n, x);
664     }
665 }
666 
667 /* Mark the end of an instruction that may have been nullified.
668    This is the pair to nullify_over.  */
669 static DisasJumpType nullify_end(DisasContext *ctx, DisasJumpType status)
670 {
671     TCGLabel *null_lab = ctx->null_lab;
672 
673     /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
674        For UPDATED, we cannot update on the nullified path.  */
675     assert(status != DISAS_IAQ_N_UPDATED);
676 
677     if (likely(null_lab == NULL)) {
678         /* The current insn wasn't conditional or handled the condition
679            applied to it without a branch, so the (new) setting of
680            NULL_COND can be applied directly to the next insn.  */
681         return status;
682     }
683     ctx->null_lab = NULL;
684 
685     if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
686         /* The next instruction will be unconditional,
687            and NULL_COND already reflects that.  */
688         gen_set_label(null_lab);
689     } else {
690         /* The insn that we just executed is itself nullifying the next
691            instruction.  Store the condition in the PSW[N] global.
692            We asserted PSW[N] = 0 in nullify_over, so that after the
693            label we have the proper value in place.  */
694         nullify_save(ctx);
695         gen_set_label(null_lab);
696         ctx->null_cond = cond_make_n();
697     }
698     if (status == DISAS_NORETURN) {
699         status = DISAS_NEXT;
700     }
701     return status;
702 }
703 
704 static void copy_iaoq_entry(TCGv_reg dest, target_ureg ival, TCGv_reg vval)
705 {
706     if (unlikely(ival == -1)) {
707         tcg_gen_mov_reg(dest, vval);
708     } else {
709         tcg_gen_movi_reg(dest, ival);
710     }
711 }
712 
713 static inline target_ureg iaoq_dest(DisasContext *ctx, target_sreg disp)
714 {
715     return ctx->iaoq_f + disp + 8;
716 }
717 
718 static void gen_excp_1(int exception)
719 {
720     TCGv_i32 t = tcg_const_i32(exception);
721     gen_helper_excp(cpu_env, t);
722     tcg_temp_free_i32(t);
723 }
724 
725 static DisasJumpType gen_excp(DisasContext *ctx, int exception)
726 {
727     copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
728     copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
729     nullify_save(ctx);
730     gen_excp_1(exception);
731     return DISAS_NORETURN;
732 }
733 
734 static DisasJumpType gen_excp_iir(DisasContext *ctx, int exc)
735 {
736     TCGv_reg tmp = tcg_const_reg(ctx->insn);
737     tcg_gen_st_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[CR_IIR]));
738     tcg_temp_free(tmp);
739     return gen_excp(ctx, exc);
740 }
741 
742 static DisasJumpType gen_illegal(DisasContext *ctx)
743 {
744     nullify_over(ctx);
745     return nullify_end(ctx, gen_excp_iir(ctx, EXCP_ILL));
746 }
747 
748 #define CHECK_MOST_PRIVILEGED(EXCP)                               \
749     do {                                                          \
750         if (ctx->privilege != 0) {                                \
751             nullify_over(ctx);                                    \
752             return nullify_end(ctx, gen_excp_iir(ctx, EXCP));     \
753         }                                                         \
754     } while (0)
755 
756 static bool use_goto_tb(DisasContext *ctx, target_ureg dest)
757 {
758     /* Suppress goto_tb in the case of single-steping and IO.  */
759     if ((tb_cflags(ctx->base.tb) & CF_LAST_IO) || ctx->base.singlestep_enabled) {
760         return false;
761     }
762     return true;
763 }
764 
765 /* If the next insn is to be nullified, and it's on the same page,
766    and we're not attempting to set a breakpoint on it, then we can
767    totally skip the nullified insn.  This avoids creating and
768    executing a TB that merely branches to the next TB.  */
769 static bool use_nullify_skip(DisasContext *ctx)
770 {
771     return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
772             && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
773 }
774 
775 static void gen_goto_tb(DisasContext *ctx, int which,
776                         target_ureg f, target_ureg b)
777 {
778     if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
779         tcg_gen_goto_tb(which);
780         tcg_gen_movi_reg(cpu_iaoq_f, f);
781         tcg_gen_movi_reg(cpu_iaoq_b, b);
782         tcg_gen_exit_tb(ctx->base.tb, which);
783     } else {
784         copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b);
785         copy_iaoq_entry(cpu_iaoq_b, b, ctx->iaoq_n_var);
786         if (ctx->base.singlestep_enabled) {
787             gen_excp_1(EXCP_DEBUG);
788         } else {
789             tcg_gen_lookup_and_goto_ptr();
790         }
791     }
792 }
793 
794 /* PA has a habit of taking the LSB of a field and using that as the sign,
795    with the rest of the field becoming the least significant bits.  */
796 static target_sreg low_sextract(uint32_t val, int pos, int len)
797 {
798     target_ureg x = -(target_ureg)extract32(val, pos, 1);
799     x = (x << (len - 1)) | extract32(val, pos + 1, len - 1);
800     return x;
801 }
802 
803 static unsigned assemble_rt64(uint32_t insn)
804 {
805     unsigned r1 = extract32(insn, 6, 1);
806     unsigned r0 = extract32(insn, 0, 5);
807     return r1 * 32 + r0;
808 }
809 
810 static unsigned assemble_ra64(uint32_t insn)
811 {
812     unsigned r1 = extract32(insn, 7, 1);
813     unsigned r0 = extract32(insn, 21, 5);
814     return r1 * 32 + r0;
815 }
816 
817 static unsigned assemble_rb64(uint32_t insn)
818 {
819     unsigned r1 = extract32(insn, 12, 1);
820     unsigned r0 = extract32(insn, 16, 5);
821     return r1 * 32 + r0;
822 }
823 
824 static unsigned assemble_rc64(uint32_t insn)
825 {
826     unsigned r2 = extract32(insn, 8, 1);
827     unsigned r1 = extract32(insn, 13, 3);
828     unsigned r0 = extract32(insn, 9, 2);
829     return r2 * 32 + r1 * 4 + r0;
830 }
831 
832 static unsigned assemble_sr3(uint32_t insn)
833 {
834     unsigned s2 = extract32(insn, 13, 1);
835     unsigned s0 = extract32(insn, 14, 2);
836     return s2 * 4 + s0;
837 }
838 
839 static target_sreg assemble_12(uint32_t insn)
840 {
841     target_ureg x = -(target_ureg)(insn & 1);
842     x = (x <<  1) | extract32(insn, 2, 1);
843     x = (x << 10) | extract32(insn, 3, 10);
844     return x;
845 }
846 
847 static target_sreg assemble_16(uint32_t insn)
848 {
849     /* Take the name from PA2.0, which produces a 16-bit number
850        only with wide mode; otherwise a 14-bit number.  Since we don't
851        implement wide mode, this is always the 14-bit number.  */
852     return low_sextract(insn, 0, 14);
853 }
854 
855 static target_sreg assemble_16a(uint32_t insn)
856 {
857     /* Take the name from PA2.0, which produces a 14-bit shifted number
858        only with wide mode; otherwise a 12-bit shifted number.  Since we
859        don't implement wide mode, this is always the 12-bit number.  */
860     target_ureg x = -(target_ureg)(insn & 1);
861     x = (x << 11) | extract32(insn, 2, 11);
862     return x << 2;
863 }
864 
865 static target_sreg assemble_17(uint32_t insn)
866 {
867     target_ureg x = -(target_ureg)(insn & 1);
868     x = (x <<  5) | extract32(insn, 16, 5);
869     x = (x <<  1) | extract32(insn, 2, 1);
870     x = (x << 10) | extract32(insn, 3, 10);
871     return x << 2;
872 }
873 
874 static target_sreg assemble_21(uint32_t insn)
875 {
876     target_ureg x = -(target_ureg)(insn & 1);
877     x = (x << 11) | extract32(insn, 1, 11);
878     x = (x <<  2) | extract32(insn, 14, 2);
879     x = (x <<  5) | extract32(insn, 16, 5);
880     x = (x <<  2) | extract32(insn, 12, 2);
881     return x << 11;
882 }
883 
884 static target_sreg assemble_22(uint32_t insn)
885 {
886     target_ureg x = -(target_ureg)(insn & 1);
887     x = (x << 10) | extract32(insn, 16, 10);
888     x = (x <<  1) | extract32(insn, 2, 1);
889     x = (x << 10) | extract32(insn, 3, 10);
890     return x << 2;
891 }
892 
893 /* The parisc documentation describes only the general interpretation of
894    the conditions, without describing their exact implementation.  The
895    interpretations do not stand up well when considering ADD,C and SUB,B.
896    However, considering the Addition, Subtraction and Logical conditions
897    as a whole it would appear that these relations are similar to what
898    a traditional NZCV set of flags would produce.  */
899 
900 static DisasCond do_cond(unsigned cf, TCGv_reg res,
901                          TCGv_reg cb_msb, TCGv_reg sv)
902 {
903     DisasCond cond;
904     TCGv_reg tmp;
905 
906     switch (cf >> 1) {
907     case 0: /* Never / TR */
908         cond = cond_make_f();
909         break;
910     case 1: /* = / <>        (Z / !Z) */
911         cond = cond_make_0(TCG_COND_EQ, res);
912         break;
913     case 2: /* < / >=        (N / !N) */
914         cond = cond_make_0(TCG_COND_LT, res);
915         break;
916     case 3: /* <= / >        (N | Z / !N & !Z) */
917         cond = cond_make_0(TCG_COND_LE, res);
918         break;
919     case 4: /* NUV / UV      (!C / C) */
920         cond = cond_make_0(TCG_COND_EQ, cb_msb);
921         break;
922     case 5: /* ZNV / VNZ     (!C | Z / C & !Z) */
923         tmp = tcg_temp_new();
924         tcg_gen_neg_reg(tmp, cb_msb);
925         tcg_gen_and_reg(tmp, tmp, res);
926         cond = cond_make_0(TCG_COND_EQ, tmp);
927         tcg_temp_free(tmp);
928         break;
929     case 6: /* SV / NSV      (V / !V) */
930         cond = cond_make_0(TCG_COND_LT, sv);
931         break;
932     case 7: /* OD / EV */
933         tmp = tcg_temp_new();
934         tcg_gen_andi_reg(tmp, res, 1);
935         cond = cond_make_0(TCG_COND_NE, tmp);
936         tcg_temp_free(tmp);
937         break;
938     default:
939         g_assert_not_reached();
940     }
941     if (cf & 1) {
942         cond.c = tcg_invert_cond(cond.c);
943     }
944 
945     return cond;
946 }
947 
948 /* Similar, but for the special case of subtraction without borrow, we
949    can use the inputs directly.  This can allow other computation to be
950    deleted as unused.  */
951 
952 static DisasCond do_sub_cond(unsigned cf, TCGv_reg res,
953                              TCGv_reg in1, TCGv_reg in2, TCGv_reg sv)
954 {
955     DisasCond cond;
956 
957     switch (cf >> 1) {
958     case 1: /* = / <> */
959         cond = cond_make(TCG_COND_EQ, in1, in2);
960         break;
961     case 2: /* < / >= */
962         cond = cond_make(TCG_COND_LT, in1, in2);
963         break;
964     case 3: /* <= / > */
965         cond = cond_make(TCG_COND_LE, in1, in2);
966         break;
967     case 4: /* << / >>= */
968         cond = cond_make(TCG_COND_LTU, in1, in2);
969         break;
970     case 5: /* <<= / >> */
971         cond = cond_make(TCG_COND_LEU, in1, in2);
972         break;
973     default:
974         return do_cond(cf, res, sv, sv);
975     }
976     if (cf & 1) {
977         cond.c = tcg_invert_cond(cond.c);
978     }
979 
980     return cond;
981 }
982 
983 /* Similar, but for logicals, where the carry and overflow bits are not
984    computed, and use of them is undefined.  */
985 
986 static DisasCond do_log_cond(unsigned cf, TCGv_reg res)
987 {
988     switch (cf >> 1) {
989     case 4: case 5: case 6:
990         cf &= 1;
991         break;
992     }
993     return do_cond(cf, res, res, res);
994 }
995 
996 /* Similar, but for shift/extract/deposit conditions.  */
997 
998 static DisasCond do_sed_cond(unsigned orig, TCGv_reg res)
999 {
1000     unsigned c, f;
1001 
1002     /* Convert the compressed condition codes to standard.
1003        0-2 are the same as logicals (nv,<,<=), while 3 is OD.
1004        4-7 are the reverse of 0-3.  */
1005     c = orig & 3;
1006     if (c == 3) {
1007         c = 7;
1008     }
1009     f = (orig & 4) / 4;
1010 
1011     return do_log_cond(c * 2 + f, res);
1012 }
1013 
1014 /* Similar, but for unit conditions.  */
1015 
1016 static DisasCond do_unit_cond(unsigned cf, TCGv_reg res,
1017                               TCGv_reg in1, TCGv_reg in2)
1018 {
1019     DisasCond cond;
1020     TCGv_reg tmp, cb = NULL;
1021 
1022     if (cf & 8) {
1023         /* Since we want to test lots of carry-out bits all at once, do not
1024          * do our normal thing and compute carry-in of bit B+1 since that
1025          * leaves us with carry bits spread across two words.
1026          */
1027         cb = tcg_temp_new();
1028         tmp = tcg_temp_new();
1029         tcg_gen_or_reg(cb, in1, in2);
1030         tcg_gen_and_reg(tmp, in1, in2);
1031         tcg_gen_andc_reg(cb, cb, res);
1032         tcg_gen_or_reg(cb, cb, tmp);
1033         tcg_temp_free(tmp);
1034     }
1035 
1036     switch (cf >> 1) {
1037     case 0: /* never / TR */
1038     case 1: /* undefined */
1039     case 5: /* undefined */
1040         cond = cond_make_f();
1041         break;
1042 
1043     case 2: /* SBZ / NBZ */
1044         /* See hasless(v,1) from
1045          * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
1046          */
1047         tmp = tcg_temp_new();
1048         tcg_gen_subi_reg(tmp, res, 0x01010101u);
1049         tcg_gen_andc_reg(tmp, tmp, res);
1050         tcg_gen_andi_reg(tmp, tmp, 0x80808080u);
1051         cond = cond_make_0(TCG_COND_NE, tmp);
1052         tcg_temp_free(tmp);
1053         break;
1054 
1055     case 3: /* SHZ / NHZ */
1056         tmp = tcg_temp_new();
1057         tcg_gen_subi_reg(tmp, res, 0x00010001u);
1058         tcg_gen_andc_reg(tmp, tmp, res);
1059         tcg_gen_andi_reg(tmp, tmp, 0x80008000u);
1060         cond = cond_make_0(TCG_COND_NE, tmp);
1061         tcg_temp_free(tmp);
1062         break;
1063 
1064     case 4: /* SDC / NDC */
1065         tcg_gen_andi_reg(cb, cb, 0x88888888u);
1066         cond = cond_make_0(TCG_COND_NE, cb);
1067         break;
1068 
1069     case 6: /* SBC / NBC */
1070         tcg_gen_andi_reg(cb, cb, 0x80808080u);
1071         cond = cond_make_0(TCG_COND_NE, cb);
1072         break;
1073 
1074     case 7: /* SHC / NHC */
1075         tcg_gen_andi_reg(cb, cb, 0x80008000u);
1076         cond = cond_make_0(TCG_COND_NE, cb);
1077         break;
1078 
1079     default:
1080         g_assert_not_reached();
1081     }
1082     if (cf & 8) {
1083         tcg_temp_free(cb);
1084     }
1085     if (cf & 1) {
1086         cond.c = tcg_invert_cond(cond.c);
1087     }
1088 
1089     return cond;
1090 }
1091 
1092 /* Compute signed overflow for addition.  */
1093 static TCGv_reg do_add_sv(DisasContext *ctx, TCGv_reg res,
1094                           TCGv_reg in1, TCGv_reg in2)
1095 {
1096     TCGv_reg sv = get_temp(ctx);
1097     TCGv_reg tmp = tcg_temp_new();
1098 
1099     tcg_gen_xor_reg(sv, res, in1);
1100     tcg_gen_xor_reg(tmp, in1, in2);
1101     tcg_gen_andc_reg(sv, sv, tmp);
1102     tcg_temp_free(tmp);
1103 
1104     return sv;
1105 }
1106 
1107 /* Compute signed overflow for subtraction.  */
1108 static TCGv_reg do_sub_sv(DisasContext *ctx, TCGv_reg res,
1109                           TCGv_reg in1, TCGv_reg in2)
1110 {
1111     TCGv_reg sv = get_temp(ctx);
1112     TCGv_reg tmp = tcg_temp_new();
1113 
1114     tcg_gen_xor_reg(sv, res, in1);
1115     tcg_gen_xor_reg(tmp, in1, in2);
1116     tcg_gen_and_reg(sv, sv, tmp);
1117     tcg_temp_free(tmp);
1118 
1119     return sv;
1120 }
1121 
1122 static DisasJumpType do_add(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1123                             TCGv_reg in2, unsigned shift, bool is_l,
1124                             bool is_tsv, bool is_tc, bool is_c, unsigned cf)
1125 {
1126     TCGv_reg dest, cb, cb_msb, sv, tmp;
1127     unsigned c = cf >> 1;
1128     DisasCond cond;
1129 
1130     dest = tcg_temp_new();
1131     cb = NULL;
1132     cb_msb = NULL;
1133 
1134     if (shift) {
1135         tmp = get_temp(ctx);
1136         tcg_gen_shli_reg(tmp, in1, shift);
1137         in1 = tmp;
1138     }
1139 
1140     if (!is_l || c == 4 || c == 5) {
1141         TCGv_reg zero = tcg_const_reg(0);
1142         cb_msb = get_temp(ctx);
1143         tcg_gen_add2_reg(dest, cb_msb, in1, zero, in2, zero);
1144         if (is_c) {
1145             tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cpu_psw_cb_msb, zero);
1146         }
1147         tcg_temp_free(zero);
1148         if (!is_l) {
1149             cb = get_temp(ctx);
1150             tcg_gen_xor_reg(cb, in1, in2);
1151             tcg_gen_xor_reg(cb, cb, dest);
1152         }
1153     } else {
1154         tcg_gen_add_reg(dest, in1, in2);
1155         if (is_c) {
1156             tcg_gen_add_reg(dest, dest, cpu_psw_cb_msb);
1157         }
1158     }
1159 
1160     /* Compute signed overflow if required.  */
1161     sv = NULL;
1162     if (is_tsv || c == 6) {
1163         sv = do_add_sv(ctx, dest, in1, in2);
1164         if (is_tsv) {
1165             /* ??? Need to include overflow from shift.  */
1166             gen_helper_tsv(cpu_env, sv);
1167         }
1168     }
1169 
1170     /* Emit any conditional trap before any writeback.  */
1171     cond = do_cond(cf, dest, cb_msb, sv);
1172     if (is_tc) {
1173         cond_prep(&cond);
1174         tmp = tcg_temp_new();
1175         tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1176         gen_helper_tcond(cpu_env, tmp);
1177         tcg_temp_free(tmp);
1178     }
1179 
1180     /* Write back the result.  */
1181     if (!is_l) {
1182         save_or_nullify(ctx, cpu_psw_cb, cb);
1183         save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1184     }
1185     save_gpr(ctx, rt, dest);
1186     tcg_temp_free(dest);
1187 
1188     /* Install the new nullification.  */
1189     cond_free(&ctx->null_cond);
1190     ctx->null_cond = cond;
1191     return DISAS_NEXT;
1192 }
1193 
1194 static DisasJumpType do_sub(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1195                             TCGv_reg in2, bool is_tsv, bool is_b,
1196                             bool is_tc, unsigned cf)
1197 {
1198     TCGv_reg dest, sv, cb, cb_msb, zero, tmp;
1199     unsigned c = cf >> 1;
1200     DisasCond cond;
1201 
1202     dest = tcg_temp_new();
1203     cb = tcg_temp_new();
1204     cb_msb = tcg_temp_new();
1205 
1206     zero = tcg_const_reg(0);
1207     if (is_b) {
1208         /* DEST,C = IN1 + ~IN2 + C.  */
1209         tcg_gen_not_reg(cb, in2);
1210         tcg_gen_add2_reg(dest, cb_msb, in1, zero, cpu_psw_cb_msb, zero);
1211         tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cb, zero);
1212         tcg_gen_xor_reg(cb, cb, in1);
1213         tcg_gen_xor_reg(cb, cb, dest);
1214     } else {
1215         /* DEST,C = IN1 + ~IN2 + 1.  We can produce the same result in fewer
1216            operations by seeding the high word with 1 and subtracting.  */
1217         tcg_gen_movi_reg(cb_msb, 1);
1218         tcg_gen_sub2_reg(dest, cb_msb, in1, cb_msb, in2, zero);
1219         tcg_gen_eqv_reg(cb, in1, in2);
1220         tcg_gen_xor_reg(cb, cb, dest);
1221     }
1222     tcg_temp_free(zero);
1223 
1224     /* Compute signed overflow if required.  */
1225     sv = NULL;
1226     if (is_tsv || c == 6) {
1227         sv = do_sub_sv(ctx, dest, in1, in2);
1228         if (is_tsv) {
1229             gen_helper_tsv(cpu_env, sv);
1230         }
1231     }
1232 
1233     /* Compute the condition.  We cannot use the special case for borrow.  */
1234     if (!is_b) {
1235         cond = do_sub_cond(cf, dest, in1, in2, sv);
1236     } else {
1237         cond = do_cond(cf, dest, cb_msb, sv);
1238     }
1239 
1240     /* Emit any conditional trap before any writeback.  */
1241     if (is_tc) {
1242         cond_prep(&cond);
1243         tmp = tcg_temp_new();
1244         tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1245         gen_helper_tcond(cpu_env, tmp);
1246         tcg_temp_free(tmp);
1247     }
1248 
1249     /* Write back the result.  */
1250     save_or_nullify(ctx, cpu_psw_cb, cb);
1251     save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1252     save_gpr(ctx, rt, dest);
1253     tcg_temp_free(dest);
1254 
1255     /* Install the new nullification.  */
1256     cond_free(&ctx->null_cond);
1257     ctx->null_cond = cond;
1258     return DISAS_NEXT;
1259 }
1260 
1261 static DisasJumpType do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1262                                TCGv_reg in2, unsigned cf)
1263 {
1264     TCGv_reg dest, sv;
1265     DisasCond cond;
1266 
1267     dest = tcg_temp_new();
1268     tcg_gen_sub_reg(dest, in1, in2);
1269 
1270     /* Compute signed overflow if required.  */
1271     sv = NULL;
1272     if ((cf >> 1) == 6) {
1273         sv = do_sub_sv(ctx, dest, in1, in2);
1274     }
1275 
1276     /* Form the condition for the compare.  */
1277     cond = do_sub_cond(cf, dest, in1, in2, sv);
1278 
1279     /* Clear.  */
1280     tcg_gen_movi_reg(dest, 0);
1281     save_gpr(ctx, rt, dest);
1282     tcg_temp_free(dest);
1283 
1284     /* Install the new nullification.  */
1285     cond_free(&ctx->null_cond);
1286     ctx->null_cond = cond;
1287     return DISAS_NEXT;
1288 }
1289 
1290 static DisasJumpType do_log(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1291                             TCGv_reg in2, unsigned cf,
1292                             void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1293 {
1294     TCGv_reg dest = dest_gpr(ctx, rt);
1295 
1296     /* Perform the operation, and writeback.  */
1297     fn(dest, in1, in2);
1298     save_gpr(ctx, rt, dest);
1299 
1300     /* Install the new nullification.  */
1301     cond_free(&ctx->null_cond);
1302     if (cf) {
1303         ctx->null_cond = do_log_cond(cf, dest);
1304     }
1305     return DISAS_NEXT;
1306 }
1307 
1308 static DisasJumpType do_unit(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1309                              TCGv_reg in2, unsigned cf, bool is_tc,
1310                              void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1311 {
1312     TCGv_reg dest;
1313     DisasCond cond;
1314 
1315     if (cf == 0) {
1316         dest = dest_gpr(ctx, rt);
1317         fn(dest, in1, in2);
1318         save_gpr(ctx, rt, dest);
1319         cond_free(&ctx->null_cond);
1320     } else {
1321         dest = tcg_temp_new();
1322         fn(dest, in1, in2);
1323 
1324         cond = do_unit_cond(cf, dest, in1, in2);
1325 
1326         if (is_tc) {
1327             TCGv_reg tmp = tcg_temp_new();
1328             cond_prep(&cond);
1329             tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1330             gen_helper_tcond(cpu_env, tmp);
1331             tcg_temp_free(tmp);
1332         }
1333         save_gpr(ctx, rt, dest);
1334 
1335         cond_free(&ctx->null_cond);
1336         ctx->null_cond = cond;
1337     }
1338     return DISAS_NEXT;
1339 }
1340 
1341 #ifndef CONFIG_USER_ONLY
1342 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1343    from the top 2 bits of the base register.  There are a few system
1344    instructions that have a 3-bit space specifier, for which SR0 is
1345    not special.  To handle this, pass ~SP.  */
1346 static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_reg base)
1347 {
1348     TCGv_ptr ptr;
1349     TCGv_reg tmp;
1350     TCGv_i64 spc;
1351 
1352     if (sp != 0) {
1353         if (sp < 0) {
1354             sp = ~sp;
1355         }
1356         spc = get_temp_tl(ctx);
1357         load_spr(ctx, spc, sp);
1358         return spc;
1359     }
1360     if (ctx->tb_flags & TB_FLAG_SR_SAME) {
1361         return cpu_srH;
1362     }
1363 
1364     ptr = tcg_temp_new_ptr();
1365     tmp = tcg_temp_new();
1366     spc = get_temp_tl(ctx);
1367 
1368     tcg_gen_shri_reg(tmp, base, TARGET_REGISTER_BITS - 5);
1369     tcg_gen_andi_reg(tmp, tmp, 030);
1370     tcg_gen_trunc_reg_ptr(ptr, tmp);
1371     tcg_temp_free(tmp);
1372 
1373     tcg_gen_add_ptr(ptr, ptr, cpu_env);
1374     tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
1375     tcg_temp_free_ptr(ptr);
1376 
1377     return spc;
1378 }
1379 #endif
1380 
1381 static void form_gva(DisasContext *ctx, TCGv_tl *pgva, TCGv_reg *pofs,
1382                      unsigned rb, unsigned rx, int scale, target_sreg disp,
1383                      unsigned sp, int modify, bool is_phys)
1384 {
1385     TCGv_reg base = load_gpr(ctx, rb);
1386     TCGv_reg ofs;
1387 
1388     /* Note that RX is mutually exclusive with DISP.  */
1389     if (rx) {
1390         ofs = get_temp(ctx);
1391         tcg_gen_shli_reg(ofs, cpu_gr[rx], scale);
1392         tcg_gen_add_reg(ofs, ofs, base);
1393     } else if (disp || modify) {
1394         ofs = get_temp(ctx);
1395         tcg_gen_addi_reg(ofs, base, disp);
1396     } else {
1397         ofs = base;
1398     }
1399 
1400     *pofs = ofs;
1401 #ifdef CONFIG_USER_ONLY
1402     *pgva = (modify <= 0 ? ofs : base);
1403 #else
1404     TCGv_tl addr = get_temp_tl(ctx);
1405     tcg_gen_extu_reg_tl(addr, modify <= 0 ? ofs : base);
1406     if (ctx->tb_flags & PSW_W) {
1407         tcg_gen_andi_tl(addr, addr, 0x3fffffffffffffffull);
1408     }
1409     if (!is_phys) {
1410         tcg_gen_or_tl(addr, addr, space_select(ctx, sp, base));
1411     }
1412     *pgva = addr;
1413 #endif
1414 }
1415 
1416 /* Emit a memory load.  The modify parameter should be
1417  * < 0 for pre-modify,
1418  * > 0 for post-modify,
1419  * = 0 for no base register update.
1420  */
1421 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
1422                        unsigned rx, int scale, target_sreg disp,
1423                        unsigned sp, int modify, TCGMemOp mop)
1424 {
1425     TCGv_reg ofs;
1426     TCGv_tl addr;
1427 
1428     /* Caller uses nullify_over/nullify_end.  */
1429     assert(ctx->null_cond.c == TCG_COND_NEVER);
1430 
1431     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1432              ctx->mmu_idx == MMU_PHYS_IDX);
1433     tcg_gen_qemu_ld_reg(dest, addr, ctx->mmu_idx, mop);
1434     if (modify) {
1435         save_gpr(ctx, rb, ofs);
1436     }
1437 }
1438 
1439 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1440                        unsigned rx, int scale, target_sreg disp,
1441                        unsigned sp, int modify, TCGMemOp mop)
1442 {
1443     TCGv_reg ofs;
1444     TCGv_tl addr;
1445 
1446     /* Caller uses nullify_over/nullify_end.  */
1447     assert(ctx->null_cond.c == TCG_COND_NEVER);
1448 
1449     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1450              ctx->mmu_idx == MMU_PHYS_IDX);
1451     tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop);
1452     if (modify) {
1453         save_gpr(ctx, rb, ofs);
1454     }
1455 }
1456 
1457 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1458                         unsigned rx, int scale, target_sreg disp,
1459                         unsigned sp, int modify, TCGMemOp mop)
1460 {
1461     TCGv_reg ofs;
1462     TCGv_tl addr;
1463 
1464     /* Caller uses nullify_over/nullify_end.  */
1465     assert(ctx->null_cond.c == TCG_COND_NEVER);
1466 
1467     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1468              ctx->mmu_idx == MMU_PHYS_IDX);
1469     tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop);
1470     if (modify) {
1471         save_gpr(ctx, rb, ofs);
1472     }
1473 }
1474 
1475 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1476                         unsigned rx, int scale, target_sreg disp,
1477                         unsigned sp, int modify, TCGMemOp mop)
1478 {
1479     TCGv_reg ofs;
1480     TCGv_tl addr;
1481 
1482     /* Caller uses nullify_over/nullify_end.  */
1483     assert(ctx->null_cond.c == TCG_COND_NEVER);
1484 
1485     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1486              ctx->mmu_idx == MMU_PHYS_IDX);
1487     tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop);
1488     if (modify) {
1489         save_gpr(ctx, rb, ofs);
1490     }
1491 }
1492 
1493 #if TARGET_REGISTER_BITS == 64
1494 #define do_load_reg   do_load_64
1495 #define do_store_reg  do_store_64
1496 #else
1497 #define do_load_reg   do_load_32
1498 #define do_store_reg  do_store_32
1499 #endif
1500 
1501 static DisasJumpType do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1502                              unsigned rx, int scale, target_sreg disp,
1503                              unsigned sp, int modify, TCGMemOp mop)
1504 {
1505     TCGv_reg dest;
1506 
1507     nullify_over(ctx);
1508 
1509     if (modify == 0) {
1510         /* No base register update.  */
1511         dest = dest_gpr(ctx, rt);
1512     } else {
1513         /* Make sure if RT == RB, we see the result of the load.  */
1514         dest = get_temp(ctx);
1515     }
1516     do_load_reg(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
1517     save_gpr(ctx, rt, dest);
1518 
1519     return nullify_end(ctx, DISAS_NEXT);
1520 }
1521 
1522 static DisasJumpType do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1523                                unsigned rx, int scale, target_sreg disp,
1524                                unsigned sp, int modify)
1525 {
1526     TCGv_i32 tmp;
1527 
1528     nullify_over(ctx);
1529 
1530     tmp = tcg_temp_new_i32();
1531     do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1532     save_frw_i32(rt, tmp);
1533     tcg_temp_free_i32(tmp);
1534 
1535     if (rt == 0) {
1536         gen_helper_loaded_fr0(cpu_env);
1537     }
1538 
1539     return nullify_end(ctx, DISAS_NEXT);
1540 }
1541 
1542 static DisasJumpType do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1543                                unsigned rx, int scale, target_sreg disp,
1544                                unsigned sp, int modify)
1545 {
1546     TCGv_i64 tmp;
1547 
1548     nullify_over(ctx);
1549 
1550     tmp = tcg_temp_new_i64();
1551     do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEQ);
1552     save_frd(rt, tmp);
1553     tcg_temp_free_i64(tmp);
1554 
1555     if (rt == 0) {
1556         gen_helper_loaded_fr0(cpu_env);
1557     }
1558 
1559     return nullify_end(ctx, DISAS_NEXT);
1560 }
1561 
1562 static DisasJumpType do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1563                               target_sreg disp, unsigned sp,
1564                               int modify, TCGMemOp mop)
1565 {
1566     nullify_over(ctx);
1567     do_store_reg(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
1568     return nullify_end(ctx, DISAS_NEXT);
1569 }
1570 
1571 static DisasJumpType do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1572                                 unsigned rx, int scale, target_sreg disp,
1573                                 unsigned sp, int modify)
1574 {
1575     TCGv_i32 tmp;
1576 
1577     nullify_over(ctx);
1578 
1579     tmp = load_frw_i32(rt);
1580     do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1581     tcg_temp_free_i32(tmp);
1582 
1583     return nullify_end(ctx, DISAS_NEXT);
1584 }
1585 
1586 static DisasJumpType do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1587                                 unsigned rx, int scale, target_sreg disp,
1588                                 unsigned sp, int modify)
1589 {
1590     TCGv_i64 tmp;
1591 
1592     nullify_over(ctx);
1593 
1594     tmp = load_frd(rt);
1595     do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEQ);
1596     tcg_temp_free_i64(tmp);
1597 
1598     return nullify_end(ctx, DISAS_NEXT);
1599 }
1600 
1601 static DisasJumpType do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
1602                                 void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
1603 {
1604     TCGv_i32 tmp;
1605 
1606     nullify_over(ctx);
1607     tmp = load_frw0_i32(ra);
1608 
1609     func(tmp, cpu_env, tmp);
1610 
1611     save_frw_i32(rt, tmp);
1612     tcg_temp_free_i32(tmp);
1613     return nullify_end(ctx, DISAS_NEXT);
1614 }
1615 
1616 static DisasJumpType do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
1617                                 void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
1618 {
1619     TCGv_i32 dst;
1620     TCGv_i64 src;
1621 
1622     nullify_over(ctx);
1623     src = load_frd(ra);
1624     dst = tcg_temp_new_i32();
1625 
1626     func(dst, cpu_env, src);
1627 
1628     tcg_temp_free_i64(src);
1629     save_frw_i32(rt, dst);
1630     tcg_temp_free_i32(dst);
1631     return nullify_end(ctx, DISAS_NEXT);
1632 }
1633 
1634 static DisasJumpType do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
1635                                 void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
1636 {
1637     TCGv_i64 tmp;
1638 
1639     nullify_over(ctx);
1640     tmp = load_frd0(ra);
1641 
1642     func(tmp, cpu_env, tmp);
1643 
1644     save_frd(rt, tmp);
1645     tcg_temp_free_i64(tmp);
1646     return nullify_end(ctx, DISAS_NEXT);
1647 }
1648 
1649 static DisasJumpType do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
1650                                 void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
1651 {
1652     TCGv_i32 src;
1653     TCGv_i64 dst;
1654 
1655     nullify_over(ctx);
1656     src = load_frw0_i32(ra);
1657     dst = tcg_temp_new_i64();
1658 
1659     func(dst, cpu_env, src);
1660 
1661     tcg_temp_free_i32(src);
1662     save_frd(rt, dst);
1663     tcg_temp_free_i64(dst);
1664     return nullify_end(ctx, DISAS_NEXT);
1665 }
1666 
1667 static DisasJumpType do_fop_weww(DisasContext *ctx, unsigned rt,
1668                                  unsigned ra, unsigned rb,
1669                                  void (*func)(TCGv_i32, TCGv_env,
1670                                               TCGv_i32, TCGv_i32))
1671 {
1672     TCGv_i32 a, b;
1673 
1674     nullify_over(ctx);
1675     a = load_frw0_i32(ra);
1676     b = load_frw0_i32(rb);
1677 
1678     func(a, cpu_env, a, b);
1679 
1680     tcg_temp_free_i32(b);
1681     save_frw_i32(rt, a);
1682     tcg_temp_free_i32(a);
1683     return nullify_end(ctx, DISAS_NEXT);
1684 }
1685 
1686 static DisasJumpType do_fop_dedd(DisasContext *ctx, unsigned rt,
1687                                  unsigned ra, unsigned rb,
1688                                  void (*func)(TCGv_i64, TCGv_env,
1689                                               TCGv_i64, TCGv_i64))
1690 {
1691     TCGv_i64 a, b;
1692 
1693     nullify_over(ctx);
1694     a = load_frd0(ra);
1695     b = load_frd0(rb);
1696 
1697     func(a, cpu_env, a, b);
1698 
1699     tcg_temp_free_i64(b);
1700     save_frd(rt, a);
1701     tcg_temp_free_i64(a);
1702     return nullify_end(ctx, DISAS_NEXT);
1703 }
1704 
1705 /* Emit an unconditional branch to a direct target, which may or may not
1706    have already had nullification handled.  */
1707 static DisasJumpType do_dbranch(DisasContext *ctx, target_ureg dest,
1708                                 unsigned link, bool is_n)
1709 {
1710     if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1711         if (link != 0) {
1712             copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1713         }
1714         ctx->iaoq_n = dest;
1715         if (is_n) {
1716             ctx->null_cond.c = TCG_COND_ALWAYS;
1717         }
1718         return DISAS_NEXT;
1719     } else {
1720         nullify_over(ctx);
1721 
1722         if (link != 0) {
1723             copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1724         }
1725 
1726         if (is_n && use_nullify_skip(ctx)) {
1727             nullify_set(ctx, 0);
1728             gen_goto_tb(ctx, 0, dest, dest + 4);
1729         } else {
1730             nullify_set(ctx, is_n);
1731             gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1732         }
1733 
1734         nullify_end(ctx, DISAS_NEXT);
1735 
1736         nullify_set(ctx, 0);
1737         gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
1738         return DISAS_NORETURN;
1739     }
1740 }
1741 
1742 /* Emit a conditional branch to a direct target.  If the branch itself
1743    is nullified, we should have already used nullify_over.  */
1744 static DisasJumpType do_cbranch(DisasContext *ctx, target_sreg disp, bool is_n,
1745                                 DisasCond *cond)
1746 {
1747     target_ureg dest = iaoq_dest(ctx, disp);
1748     TCGLabel *taken = NULL;
1749     TCGCond c = cond->c;
1750     bool n;
1751 
1752     assert(ctx->null_cond.c == TCG_COND_NEVER);
1753 
1754     /* Handle TRUE and NEVER as direct branches.  */
1755     if (c == TCG_COND_ALWAYS) {
1756         return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
1757     }
1758     if (c == TCG_COND_NEVER) {
1759         return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
1760     }
1761 
1762     taken = gen_new_label();
1763     cond_prep(cond);
1764     tcg_gen_brcond_reg(c, cond->a0, cond->a1, taken);
1765     cond_free(cond);
1766 
1767     /* Not taken: Condition not satisfied; nullify on backward branches. */
1768     n = is_n && disp < 0;
1769     if (n && use_nullify_skip(ctx)) {
1770         nullify_set(ctx, 0);
1771         gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
1772     } else {
1773         if (!n && ctx->null_lab) {
1774             gen_set_label(ctx->null_lab);
1775             ctx->null_lab = NULL;
1776         }
1777         nullify_set(ctx, n);
1778         if (ctx->iaoq_n == -1) {
1779             /* The temporary iaoq_n_var died at the branch above.
1780                Regenerate it here instead of saving it.  */
1781             tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
1782         }
1783         gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
1784     }
1785 
1786     gen_set_label(taken);
1787 
1788     /* Taken: Condition satisfied; nullify on forward branches.  */
1789     n = is_n && disp >= 0;
1790     if (n && use_nullify_skip(ctx)) {
1791         nullify_set(ctx, 0);
1792         gen_goto_tb(ctx, 1, dest, dest + 4);
1793     } else {
1794         nullify_set(ctx, n);
1795         gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
1796     }
1797 
1798     /* Not taken: the branch itself was nullified.  */
1799     if (ctx->null_lab) {
1800         gen_set_label(ctx->null_lab);
1801         ctx->null_lab = NULL;
1802         return DISAS_IAQ_N_STALE;
1803     } else {
1804         return DISAS_NORETURN;
1805     }
1806 }
1807 
1808 /* Emit an unconditional branch to an indirect target.  This handles
1809    nullification of the branch itself.  */
1810 static DisasJumpType do_ibranch(DisasContext *ctx, TCGv_reg dest,
1811                                 unsigned link, bool is_n)
1812 {
1813     TCGv_reg a0, a1, next, tmp;
1814     TCGCond c;
1815 
1816     assert(ctx->null_lab == NULL);
1817 
1818     if (ctx->null_cond.c == TCG_COND_NEVER) {
1819         if (link != 0) {
1820             copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1821         }
1822         next = get_temp(ctx);
1823         tcg_gen_mov_reg(next, dest);
1824         if (is_n) {
1825             if (use_nullify_skip(ctx)) {
1826                 tcg_gen_mov_reg(cpu_iaoq_f, next);
1827                 tcg_gen_addi_reg(cpu_iaoq_b, next, 4);
1828                 nullify_set(ctx, 0);
1829                 return DISAS_IAQ_N_UPDATED;
1830             }
1831             ctx->null_cond.c = TCG_COND_ALWAYS;
1832         }
1833         ctx->iaoq_n = -1;
1834         ctx->iaoq_n_var = next;
1835     } else if (is_n && use_nullify_skip(ctx)) {
1836         /* The (conditional) branch, B, nullifies the next insn, N,
1837            and we're allowed to skip execution N (no single-step or
1838            tracepoint in effect).  Since the goto_ptr that we must use
1839            for the indirect branch consumes no special resources, we
1840            can (conditionally) skip B and continue execution.  */
1841         /* The use_nullify_skip test implies we have a known control path.  */
1842         tcg_debug_assert(ctx->iaoq_b != -1);
1843         tcg_debug_assert(ctx->iaoq_n != -1);
1844 
1845         /* We do have to handle the non-local temporary, DEST, before
1846            branching.  Since IOAQ_F is not really live at this point, we
1847            can simply store DEST optimistically.  Similarly with IAOQ_B.  */
1848         tcg_gen_mov_reg(cpu_iaoq_f, dest);
1849         tcg_gen_addi_reg(cpu_iaoq_b, dest, 4);
1850 
1851         nullify_over(ctx);
1852         if (link != 0) {
1853             tcg_gen_movi_reg(cpu_gr[link], ctx->iaoq_n);
1854         }
1855         tcg_gen_lookup_and_goto_ptr();
1856         return nullify_end(ctx, DISAS_NEXT);
1857     } else {
1858         cond_prep(&ctx->null_cond);
1859         c = ctx->null_cond.c;
1860         a0 = ctx->null_cond.a0;
1861         a1 = ctx->null_cond.a1;
1862 
1863         tmp = tcg_temp_new();
1864         next = get_temp(ctx);
1865 
1866         copy_iaoq_entry(tmp, ctx->iaoq_n, ctx->iaoq_n_var);
1867         tcg_gen_movcond_reg(c, next, a0, a1, tmp, dest);
1868         ctx->iaoq_n = -1;
1869         ctx->iaoq_n_var = next;
1870 
1871         if (link != 0) {
1872             tcg_gen_movcond_reg(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
1873         }
1874 
1875         if (is_n) {
1876             /* The branch nullifies the next insn, which means the state of N
1877                after the branch is the inverse of the state of N that applied
1878                to the branch.  */
1879             tcg_gen_setcond_reg(tcg_invert_cond(c), cpu_psw_n, a0, a1);
1880             cond_free(&ctx->null_cond);
1881             ctx->null_cond = cond_make_n();
1882             ctx->psw_n_nonzero = true;
1883         } else {
1884             cond_free(&ctx->null_cond);
1885         }
1886     }
1887 
1888     return DISAS_NEXT;
1889 }
1890 
1891 /* Implement
1892  *    if (IAOQ_Front{30..31} < GR[b]{30..31})
1893  *      IAOQ_Next{30..31} ← GR[b]{30..31};
1894  *    else
1895  *      IAOQ_Next{30..31} ← IAOQ_Front{30..31};
1896  * which keeps the privilege level from being increased.
1897  */
1898 static TCGv_reg do_ibranch_priv(DisasContext *ctx, TCGv_reg offset)
1899 {
1900     TCGv_reg dest;
1901     switch (ctx->privilege) {
1902     case 0:
1903         /* Privilege 0 is maximum and is allowed to decrease.  */
1904         return offset;
1905     case 3:
1906         /* Privilege 3 is minimum and is never allowed increase.  */
1907         dest = get_temp(ctx);
1908         tcg_gen_ori_reg(dest, offset, 3);
1909         break;
1910     default:
1911         dest = tcg_temp_new();
1912         tcg_gen_andi_reg(dest, offset, -4);
1913         tcg_gen_ori_reg(dest, dest, ctx->privilege);
1914         tcg_gen_movcond_reg(TCG_COND_GTU, dest, dest, offset, dest, offset);
1915         tcg_temp_free(dest);
1916         break;
1917     }
1918     return dest;
1919 }
1920 
1921 #ifdef CONFIG_USER_ONLY
1922 /* On Linux, page zero is normally marked execute only + gateway.
1923    Therefore normal read or write is supposed to fail, but specific
1924    offsets have kernel code mapped to raise permissions to implement
1925    system calls.  Handling this via an explicit check here, rather
1926    in than the "be disp(sr2,r0)" instruction that probably sent us
1927    here, is the easiest way to handle the branch delay slot on the
1928    aforementioned BE.  */
1929 static DisasJumpType do_page_zero(DisasContext *ctx)
1930 {
1931     /* If by some means we get here with PSW[N]=1, that implies that
1932        the B,GATE instruction would be skipped, and we'd fault on the
1933        next insn within the privilaged page.  */
1934     switch (ctx->null_cond.c) {
1935     case TCG_COND_NEVER:
1936         break;
1937     case TCG_COND_ALWAYS:
1938         tcg_gen_movi_reg(cpu_psw_n, 0);
1939         goto do_sigill;
1940     default:
1941         /* Since this is always the first (and only) insn within the
1942            TB, we should know the state of PSW[N] from TB->FLAGS.  */
1943         g_assert_not_reached();
1944     }
1945 
1946     /* Check that we didn't arrive here via some means that allowed
1947        non-sequential instruction execution.  Normally the PSW[B] bit
1948        detects this by disallowing the B,GATE instruction to execute
1949        under such conditions.  */
1950     if (ctx->iaoq_b != ctx->iaoq_f + 4) {
1951         goto do_sigill;
1952     }
1953 
1954     switch (ctx->iaoq_f & -4) {
1955     case 0x00: /* Null pointer call */
1956         gen_excp_1(EXCP_IMP);
1957         return DISAS_NORETURN;
1958 
1959     case 0xb0: /* LWS */
1960         gen_excp_1(EXCP_SYSCALL_LWS);
1961         return DISAS_NORETURN;
1962 
1963     case 0xe0: /* SET_THREAD_POINTER */
1964         tcg_gen_st_reg(cpu_gr[26], cpu_env, offsetof(CPUHPPAState, cr[27]));
1965         tcg_gen_ori_reg(cpu_iaoq_f, cpu_gr[31], 3);
1966         tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
1967         return DISAS_IAQ_N_UPDATED;
1968 
1969     case 0x100: /* SYSCALL */
1970         gen_excp_1(EXCP_SYSCALL);
1971         return DISAS_NORETURN;
1972 
1973     default:
1974     do_sigill:
1975         gen_excp_1(EXCP_ILL);
1976         return DISAS_NORETURN;
1977     }
1978 }
1979 #endif
1980 
1981 static DisasJumpType trans_nop(DisasContext *ctx, uint32_t insn,
1982                                const DisasInsn *di)
1983 {
1984     cond_free(&ctx->null_cond);
1985     return DISAS_NEXT;
1986 }
1987 
1988 static DisasJumpType trans_break(DisasContext *ctx, uint32_t insn,
1989                                  const DisasInsn *di)
1990 {
1991     nullify_over(ctx);
1992     return nullify_end(ctx, gen_excp_iir(ctx, EXCP_BREAK));
1993 }
1994 
1995 static DisasJumpType trans_sync(DisasContext *ctx, uint32_t insn,
1996                                 const DisasInsn *di)
1997 {
1998     /* No point in nullifying the memory barrier.  */
1999     tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
2000 
2001     cond_free(&ctx->null_cond);
2002     return DISAS_NEXT;
2003 }
2004 
2005 static DisasJumpType trans_mfia(DisasContext *ctx, uint32_t insn,
2006                                 const DisasInsn *di)
2007 {
2008     unsigned rt = extract32(insn, 0, 5);
2009     TCGv_reg tmp = dest_gpr(ctx, rt);
2010     tcg_gen_movi_reg(tmp, ctx->iaoq_f);
2011     save_gpr(ctx, rt, tmp);
2012 
2013     cond_free(&ctx->null_cond);
2014     return DISAS_NEXT;
2015 }
2016 
2017 static DisasJumpType trans_mfsp(DisasContext *ctx, uint32_t insn,
2018                                 const DisasInsn *di)
2019 {
2020     unsigned rt = extract32(insn, 0, 5);
2021     unsigned rs = assemble_sr3(insn);
2022     TCGv_i64 t0 = tcg_temp_new_i64();
2023     TCGv_reg t1 = tcg_temp_new();
2024 
2025     load_spr(ctx, t0, rs);
2026     tcg_gen_shri_i64(t0, t0, 32);
2027     tcg_gen_trunc_i64_reg(t1, t0);
2028 
2029     save_gpr(ctx, rt, t1);
2030     tcg_temp_free(t1);
2031     tcg_temp_free_i64(t0);
2032 
2033     cond_free(&ctx->null_cond);
2034     return DISAS_NEXT;
2035 }
2036 
2037 static DisasJumpType trans_mfctl(DisasContext *ctx, uint32_t insn,
2038                                  const DisasInsn *di)
2039 {
2040     unsigned rt = extract32(insn, 0, 5);
2041     unsigned ctl = extract32(insn, 21, 5);
2042     TCGv_reg tmp;
2043     DisasJumpType ret;
2044 
2045     switch (ctl) {
2046     case CR_SAR:
2047 #ifdef TARGET_HPPA64
2048         if (extract32(insn, 14, 1) == 0) {
2049             /* MFSAR without ,W masks low 5 bits.  */
2050             tmp = dest_gpr(ctx, rt);
2051             tcg_gen_andi_reg(tmp, cpu_sar, 31);
2052             save_gpr(ctx, rt, tmp);
2053             goto done;
2054         }
2055 #endif
2056         save_gpr(ctx, rt, cpu_sar);
2057         goto done;
2058     case CR_IT: /* Interval Timer */
2059         /* FIXME: Respect PSW_S bit.  */
2060         nullify_over(ctx);
2061         tmp = dest_gpr(ctx, rt);
2062         if (ctx->base.tb->cflags & CF_USE_ICOUNT) {
2063             gen_io_start();
2064             gen_helper_read_interval_timer(tmp);
2065             gen_io_end();
2066             ret = DISAS_IAQ_N_STALE;
2067         } else {
2068             gen_helper_read_interval_timer(tmp);
2069             ret = DISAS_NEXT;
2070         }
2071         save_gpr(ctx, rt, tmp);
2072         return nullify_end(ctx, ret);
2073     case 26:
2074     case 27:
2075         break;
2076     default:
2077         /* All other control registers are privileged.  */
2078         CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2079         break;
2080     }
2081 
2082     tmp = get_temp(ctx);
2083     tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2084     save_gpr(ctx, rt, tmp);
2085 
2086  done:
2087     cond_free(&ctx->null_cond);
2088     return DISAS_NEXT;
2089 }
2090 
2091 static DisasJumpType trans_mtsp(DisasContext *ctx, uint32_t insn,
2092                                 const DisasInsn *di)
2093 {
2094     unsigned rr = extract32(insn, 16, 5);
2095     unsigned rs = assemble_sr3(insn);
2096     TCGv_i64 t64;
2097 
2098     if (rs >= 5) {
2099         CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2100     }
2101     nullify_over(ctx);
2102 
2103     t64 = tcg_temp_new_i64();
2104     tcg_gen_extu_reg_i64(t64, load_gpr(ctx, rr));
2105     tcg_gen_shli_i64(t64, t64, 32);
2106 
2107     if (rs >= 4) {
2108         tcg_gen_st_i64(t64, cpu_env, offsetof(CPUHPPAState, sr[rs]));
2109         ctx->tb_flags &= ~TB_FLAG_SR_SAME;
2110     } else {
2111         tcg_gen_mov_i64(cpu_sr[rs], t64);
2112     }
2113     tcg_temp_free_i64(t64);
2114 
2115     return nullify_end(ctx, DISAS_NEXT);
2116 }
2117 
2118 static DisasJumpType trans_mtctl(DisasContext *ctx, uint32_t insn,
2119                                  const DisasInsn *di)
2120 {
2121     unsigned rin = extract32(insn, 16, 5);
2122     unsigned ctl = extract32(insn, 21, 5);
2123     TCGv_reg reg = load_gpr(ctx, rin);
2124     TCGv_reg tmp;
2125 
2126     if (ctl == CR_SAR) {
2127         tmp = tcg_temp_new();
2128         tcg_gen_andi_reg(tmp, reg, TARGET_REGISTER_BITS - 1);
2129         save_or_nullify(ctx, cpu_sar, tmp);
2130         tcg_temp_free(tmp);
2131 
2132         cond_free(&ctx->null_cond);
2133         return DISAS_NEXT;
2134     }
2135 
2136     /* All other control registers are privileged or read-only.  */
2137     CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2138 
2139 #ifdef CONFIG_USER_ONLY
2140     g_assert_not_reached();
2141 #else
2142     DisasJumpType ret = DISAS_NEXT;
2143 
2144     nullify_over(ctx);
2145     switch (ctl) {
2146     case CR_IT:
2147         gen_helper_write_interval_timer(cpu_env, reg);
2148         break;
2149     case CR_EIRR:
2150         gen_helper_write_eirr(cpu_env, reg);
2151         break;
2152     case CR_EIEM:
2153         gen_helper_write_eiem(cpu_env, reg);
2154         ret = DISAS_IAQ_N_STALE_EXIT;
2155         break;
2156 
2157     case CR_IIASQ:
2158     case CR_IIAOQ:
2159         /* FIXME: Respect PSW_Q bit */
2160         /* The write advances the queue and stores to the back element.  */
2161         tmp = get_temp(ctx);
2162         tcg_gen_ld_reg(tmp, cpu_env,
2163                        offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2164         tcg_gen_st_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2165         tcg_gen_st_reg(reg, cpu_env,
2166                        offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2167         break;
2168 
2169     default:
2170         tcg_gen_st_reg(reg, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2171         break;
2172     }
2173     return nullify_end(ctx, ret);
2174 #endif
2175 }
2176 
2177 static DisasJumpType trans_mtsarcm(DisasContext *ctx, uint32_t insn,
2178                                    const DisasInsn *di)
2179 {
2180     unsigned rin = extract32(insn, 16, 5);
2181     TCGv_reg tmp = tcg_temp_new();
2182 
2183     tcg_gen_not_reg(tmp, load_gpr(ctx, rin));
2184     tcg_gen_andi_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
2185     save_or_nullify(ctx, cpu_sar, tmp);
2186     tcg_temp_free(tmp);
2187 
2188     cond_free(&ctx->null_cond);
2189     return DISAS_NEXT;
2190 }
2191 
2192 static DisasJumpType trans_ldsid(DisasContext *ctx, uint32_t insn,
2193                                  const DisasInsn *di)
2194 {
2195     unsigned rt = extract32(insn, 0, 5);
2196     TCGv_reg dest = dest_gpr(ctx, rt);
2197 
2198 #ifdef CONFIG_USER_ONLY
2199     /* We don't implement space registers in user mode. */
2200     tcg_gen_movi_reg(dest, 0);
2201 #else
2202     unsigned rb = extract32(insn, 21, 5);
2203     unsigned sp = extract32(insn, 14, 2);
2204     TCGv_i64 t0 = tcg_temp_new_i64();
2205 
2206     tcg_gen_mov_i64(t0, space_select(ctx, sp, load_gpr(ctx, rb)));
2207     tcg_gen_shri_i64(t0, t0, 32);
2208     tcg_gen_trunc_i64_reg(dest, t0);
2209 
2210     tcg_temp_free_i64(t0);
2211 #endif
2212     save_gpr(ctx, rt, dest);
2213 
2214     cond_free(&ctx->null_cond);
2215     return DISAS_NEXT;
2216 }
2217 
2218 #ifndef CONFIG_USER_ONLY
2219 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently.  */
2220 static target_ureg extract_sm_imm(uint32_t insn)
2221 {
2222     target_ureg val = extract32(insn, 16, 10);
2223 
2224     if (val & PSW_SM_E) {
2225         val = (val & ~PSW_SM_E) | PSW_E;
2226     }
2227     if (val & PSW_SM_W) {
2228         val = (val & ~PSW_SM_W) | PSW_W;
2229     }
2230     return val;
2231 }
2232 
2233 static DisasJumpType trans_rsm(DisasContext *ctx, uint32_t insn,
2234                                const DisasInsn *di)
2235 {
2236     unsigned rt = extract32(insn, 0, 5);
2237     target_ureg sm = extract_sm_imm(insn);
2238     TCGv_reg tmp;
2239 
2240     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2241     nullify_over(ctx);
2242 
2243     tmp = get_temp(ctx);
2244     tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw));
2245     tcg_gen_andi_reg(tmp, tmp, ~sm);
2246     gen_helper_swap_system_mask(tmp, cpu_env, tmp);
2247     save_gpr(ctx, rt, tmp);
2248 
2249     /* Exit the TB to recognize new interrupts, e.g. PSW_M.  */
2250     return nullify_end(ctx, DISAS_IAQ_N_STALE_EXIT);
2251 }
2252 
2253 static DisasJumpType trans_ssm(DisasContext *ctx, uint32_t insn,
2254                                const DisasInsn *di)
2255 {
2256     unsigned rt = extract32(insn, 0, 5);
2257     target_ureg sm = extract_sm_imm(insn);
2258     TCGv_reg tmp;
2259 
2260     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2261     nullify_over(ctx);
2262 
2263     tmp = get_temp(ctx);
2264     tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw));
2265     tcg_gen_ori_reg(tmp, tmp, sm);
2266     gen_helper_swap_system_mask(tmp, cpu_env, tmp);
2267     save_gpr(ctx, rt, tmp);
2268 
2269     /* Exit the TB to recognize new interrupts, e.g. PSW_I.  */
2270     return nullify_end(ctx, DISAS_IAQ_N_STALE_EXIT);
2271 }
2272 
2273 static DisasJumpType trans_mtsm(DisasContext *ctx, uint32_t insn,
2274                                 const DisasInsn *di)
2275 {
2276     unsigned rr = extract32(insn, 16, 5);
2277     TCGv_reg tmp, reg;
2278 
2279     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2280     nullify_over(ctx);
2281 
2282     reg = load_gpr(ctx, rr);
2283     tmp = get_temp(ctx);
2284     gen_helper_swap_system_mask(tmp, cpu_env, reg);
2285 
2286     /* Exit the TB to recognize new interrupts.  */
2287     return nullify_end(ctx, DISAS_IAQ_N_STALE_EXIT);
2288 }
2289 
2290 static DisasJumpType trans_rfi(DisasContext *ctx, uint32_t insn,
2291                                const DisasInsn *di)
2292 {
2293     unsigned comp = extract32(insn, 5, 4);
2294 
2295     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2296     nullify_over(ctx);
2297 
2298     if (comp == 5) {
2299         gen_helper_rfi_r(cpu_env);
2300     } else {
2301         gen_helper_rfi(cpu_env);
2302     }
2303     if (ctx->base.singlestep_enabled) {
2304         gen_excp_1(EXCP_DEBUG);
2305     } else {
2306         tcg_gen_exit_tb(NULL, 0);
2307     }
2308 
2309     /* Exit the TB to recognize new interrupts.  */
2310     return nullify_end(ctx, DISAS_NORETURN);
2311 }
2312 
2313 static DisasJumpType gen_hlt(DisasContext *ctx, int reset)
2314 {
2315     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2316     nullify_over(ctx);
2317     if (reset) {
2318         gen_helper_reset(cpu_env);
2319     } else {
2320         gen_helper_halt(cpu_env);
2321     }
2322     return nullify_end(ctx, DISAS_NORETURN);
2323 }
2324 #endif /* !CONFIG_USER_ONLY */
2325 
2326 static const DisasInsn table_system[] = {
2327     { 0x00000000u, 0xfc001fe0u, trans_break },
2328     { 0x00001820u, 0xffe01fffu, trans_mtsp },
2329     { 0x00001840u, 0xfc00ffffu, trans_mtctl },
2330     { 0x016018c0u, 0xffe0ffffu, trans_mtsarcm },
2331     { 0x000014a0u, 0xffffffe0u, trans_mfia },
2332     { 0x000004a0u, 0xffff1fe0u, trans_mfsp },
2333     { 0x000008a0u, 0xfc1fbfe0u, trans_mfctl },
2334     { 0x00000400u, 0xffffffffu, trans_sync },  /* sync */
2335     { 0x00100400u, 0xffffffffu, trans_sync },  /* syncdma */
2336     { 0x000010a0u, 0xfc1f3fe0u, trans_ldsid },
2337 #ifndef CONFIG_USER_ONLY
2338     { 0x00000e60u, 0xfc00ffe0u, trans_rsm },
2339     { 0x00000d60u, 0xfc00ffe0u, trans_ssm },
2340     { 0x00001860u, 0xffe0ffffu, trans_mtsm },
2341     { 0x00000c00u, 0xfffffe1fu, trans_rfi },
2342 #endif
2343 };
2344 
2345 static DisasJumpType trans_base_idx_mod(DisasContext *ctx, uint32_t insn,
2346                                         const DisasInsn *di)
2347 {
2348     unsigned rb = extract32(insn, 21, 5);
2349     unsigned rx = extract32(insn, 16, 5);
2350     TCGv_reg dest = dest_gpr(ctx, rb);
2351     TCGv_reg src1 = load_gpr(ctx, rb);
2352     TCGv_reg src2 = load_gpr(ctx, rx);
2353 
2354     /* The only thing we need to do is the base register modification.  */
2355     tcg_gen_add_reg(dest, src1, src2);
2356     save_gpr(ctx, rb, dest);
2357 
2358     cond_free(&ctx->null_cond);
2359     return DISAS_NEXT;
2360 }
2361 
2362 static DisasJumpType trans_probe(DisasContext *ctx, uint32_t insn,
2363                                  const DisasInsn *di)
2364 {
2365     unsigned rt = extract32(insn, 0, 5);
2366     unsigned sp = extract32(insn, 14, 2);
2367     unsigned rr = extract32(insn, 16, 5);
2368     unsigned rb = extract32(insn, 21, 5);
2369     unsigned is_write = extract32(insn, 6, 1);
2370     unsigned is_imm = extract32(insn, 13, 1);
2371     TCGv_reg dest, ofs;
2372     TCGv_i32 level, want;
2373     TCGv_tl addr;
2374 
2375     nullify_over(ctx);
2376 
2377     dest = dest_gpr(ctx, rt);
2378     form_gva(ctx, &addr, &ofs, rb, 0, 0, 0, sp, 0, false);
2379 
2380     if (is_imm) {
2381         level = tcg_const_i32(extract32(insn, 16, 2));
2382     } else {
2383         level = tcg_temp_new_i32();
2384         tcg_gen_trunc_reg_i32(level, load_gpr(ctx, rr));
2385         tcg_gen_andi_i32(level, level, 3);
2386     }
2387     want = tcg_const_i32(is_write ? PAGE_WRITE : PAGE_READ);
2388 
2389     gen_helper_probe(dest, cpu_env, addr, level, want);
2390 
2391     tcg_temp_free_i32(want);
2392     tcg_temp_free_i32(level);
2393 
2394     save_gpr(ctx, rt, dest);
2395     return nullify_end(ctx, DISAS_NEXT);
2396 }
2397 
2398 #ifndef CONFIG_USER_ONLY
2399 static DisasJumpType trans_ixtlbx(DisasContext *ctx, uint32_t insn,
2400                                   const DisasInsn *di)
2401 {
2402     unsigned sp;
2403     unsigned rr = extract32(insn, 16, 5);
2404     unsigned rb = extract32(insn, 21, 5);
2405     unsigned is_data = insn & 0x1000;
2406     unsigned is_addr = insn & 0x40;
2407     TCGv_tl addr;
2408     TCGv_reg ofs, reg;
2409 
2410     if (is_data) {
2411         sp = extract32(insn, 14, 2);
2412     } else {
2413         sp = ~assemble_sr3(insn);
2414     }
2415 
2416     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2417     nullify_over(ctx);
2418 
2419     form_gva(ctx, &addr, &ofs, rb, 0, 0, 0, sp, 0, false);
2420     reg = load_gpr(ctx, rr);
2421     if (is_addr) {
2422         gen_helper_itlba(cpu_env, addr, reg);
2423     } else {
2424         gen_helper_itlbp(cpu_env, addr, reg);
2425     }
2426 
2427     /* Exit TB for ITLB change if mmu is enabled.  This *should* not be
2428        the case, since the OS TLB fill handler runs with mmu disabled.  */
2429     return nullify_end(ctx, !is_data && (ctx->tb_flags & PSW_C)
2430                        ? DISAS_IAQ_N_STALE : DISAS_NEXT);
2431 }
2432 
2433 static DisasJumpType trans_pxtlbx(DisasContext *ctx, uint32_t insn,
2434                                   const DisasInsn *di)
2435 {
2436     unsigned m = extract32(insn, 5, 1);
2437     unsigned sp;
2438     unsigned rx = extract32(insn, 16, 5);
2439     unsigned rb = extract32(insn, 21, 5);
2440     unsigned is_data = insn & 0x1000;
2441     unsigned is_local = insn & 0x40;
2442     TCGv_tl addr;
2443     TCGv_reg ofs;
2444 
2445     if (is_data) {
2446         sp = extract32(insn, 14, 2);
2447     } else {
2448         sp = ~assemble_sr3(insn);
2449     }
2450 
2451     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2452     nullify_over(ctx);
2453 
2454     form_gva(ctx, &addr, &ofs, rb, rx, 0, 0, sp, m, false);
2455     if (m) {
2456         save_gpr(ctx, rb, ofs);
2457     }
2458     if (is_local) {
2459         gen_helper_ptlbe(cpu_env);
2460     } else {
2461         gen_helper_ptlb(cpu_env, addr);
2462     }
2463 
2464     /* Exit TB for TLB change if mmu is enabled.  */
2465     return nullify_end(ctx, !is_data && (ctx->tb_flags & PSW_C)
2466                        ? DISAS_IAQ_N_STALE : DISAS_NEXT);
2467 }
2468 
2469 static DisasJumpType trans_lpa(DisasContext *ctx, uint32_t insn,
2470                                const DisasInsn *di)
2471 {
2472     unsigned rt = extract32(insn, 0, 5);
2473     unsigned m = extract32(insn, 5, 1);
2474     unsigned sp = extract32(insn, 14, 2);
2475     unsigned rx = extract32(insn, 16, 5);
2476     unsigned rb = extract32(insn, 21, 5);
2477     TCGv_tl vaddr;
2478     TCGv_reg ofs, paddr;
2479 
2480     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2481     nullify_over(ctx);
2482 
2483     form_gva(ctx, &vaddr, &ofs, rb, rx, 0, 0, sp, m, false);
2484 
2485     paddr = tcg_temp_new();
2486     gen_helper_lpa(paddr, cpu_env, vaddr);
2487 
2488     /* Note that physical address result overrides base modification.  */
2489     if (m) {
2490         save_gpr(ctx, rb, ofs);
2491     }
2492     save_gpr(ctx, rt, paddr);
2493     tcg_temp_free(paddr);
2494 
2495     return nullify_end(ctx, DISAS_NEXT);
2496 }
2497 
2498 static DisasJumpType trans_lci(DisasContext *ctx, uint32_t insn,
2499                                const DisasInsn *di)
2500 {
2501     unsigned rt = extract32(insn, 0, 5);
2502     TCGv_reg ci;
2503 
2504     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2505 
2506     /* The Coherence Index is an implementation-defined function of the
2507        physical address.  Two addresses with the same CI have a coherent
2508        view of the cache.  Our implementation is to return 0 for all,
2509        since the entire address space is coherent.  */
2510     ci = tcg_const_reg(0);
2511     save_gpr(ctx, rt, ci);
2512     tcg_temp_free(ci);
2513 
2514     return DISAS_NEXT;
2515 }
2516 #endif /* !CONFIG_USER_ONLY */
2517 
2518 static const DisasInsn table_mem_mgmt[] = {
2519     { 0x04003280u, 0xfc003fffu, trans_nop },          /* fdc, disp */
2520     { 0x04001280u, 0xfc003fffu, trans_nop },          /* fdc, index */
2521     { 0x040012a0u, 0xfc003fffu, trans_base_idx_mod }, /* fdc, index, base mod */
2522     { 0x040012c0u, 0xfc003fffu, trans_nop },          /* fdce */
2523     { 0x040012e0u, 0xfc003fffu, trans_base_idx_mod }, /* fdce, base mod */
2524     { 0x04000280u, 0xfc001fffu, trans_nop },          /* fic 0a */
2525     { 0x040002a0u, 0xfc001fffu, trans_base_idx_mod }, /* fic 0a, base mod */
2526     { 0x040013c0u, 0xfc003fffu, trans_nop },          /* fic 4f */
2527     { 0x040013e0u, 0xfc003fffu, trans_base_idx_mod }, /* fic 4f, base mod */
2528     { 0x040002c0u, 0xfc001fffu, trans_nop },          /* fice */
2529     { 0x040002e0u, 0xfc001fffu, trans_base_idx_mod }, /* fice, base mod */
2530     { 0x04002700u, 0xfc003fffu, trans_nop },          /* pdc */
2531     { 0x04002720u, 0xfc003fffu, trans_base_idx_mod }, /* pdc, base mod */
2532     { 0x04001180u, 0xfc003fa0u, trans_probe },        /* probe */
2533     { 0x04003180u, 0xfc003fa0u, trans_probe },        /* probei */
2534 #ifndef CONFIG_USER_ONLY
2535     { 0x04000000u, 0xfc001fffu, trans_ixtlbx },       /* iitlbp */
2536     { 0x04000040u, 0xfc001fffu, trans_ixtlbx },       /* iitlba */
2537     { 0x04001000u, 0xfc001fffu, trans_ixtlbx },       /* idtlbp */
2538     { 0x04001040u, 0xfc001fffu, trans_ixtlbx },       /* idtlba */
2539     { 0x04000200u, 0xfc001fdfu, trans_pxtlbx },       /* pitlb */
2540     { 0x04000240u, 0xfc001fdfu, trans_pxtlbx },       /* pitlbe */
2541     { 0x04001200u, 0xfc001fdfu, trans_pxtlbx },       /* pdtlb */
2542     { 0x04001240u, 0xfc001fdfu, trans_pxtlbx },       /* pdtlbe */
2543     { 0x04001340u, 0xfc003fc0u, trans_lpa },
2544     { 0x04001300u, 0xfc003fe0u, trans_lci },
2545 #endif
2546 };
2547 
2548 static DisasJumpType trans_add(DisasContext *ctx, uint32_t insn,
2549                                const DisasInsn *di)
2550 {
2551     unsigned r2 = extract32(insn, 21, 5);
2552     unsigned r1 = extract32(insn, 16, 5);
2553     unsigned cf = extract32(insn, 12, 4);
2554     unsigned ext = extract32(insn, 8, 4);
2555     unsigned shift = extract32(insn, 6, 2);
2556     unsigned rt = extract32(insn,  0, 5);
2557     TCGv_reg tcg_r1, tcg_r2;
2558     bool is_c = false;
2559     bool is_l = false;
2560     bool is_tc = false;
2561     bool is_tsv = false;
2562     DisasJumpType ret;
2563 
2564     switch (ext) {
2565     case 0x6: /* ADD, SHLADD */
2566         break;
2567     case 0xa: /* ADD,L, SHLADD,L */
2568         is_l = true;
2569         break;
2570     case 0xe: /* ADD,TSV, SHLADD,TSV (1) */
2571         is_tsv = true;
2572         break;
2573     case 0x7: /* ADD,C */
2574         is_c = true;
2575         break;
2576     case 0xf: /* ADD,C,TSV */
2577         is_c = is_tsv = true;
2578         break;
2579     default:
2580         return gen_illegal(ctx);
2581     }
2582 
2583     if (cf) {
2584         nullify_over(ctx);
2585     }
2586     tcg_r1 = load_gpr(ctx, r1);
2587     tcg_r2 = load_gpr(ctx, r2);
2588     ret = do_add(ctx, rt, tcg_r1, tcg_r2, shift, is_l, is_tsv, is_tc, is_c, cf);
2589     return nullify_end(ctx, ret);
2590 }
2591 
2592 static DisasJumpType trans_sub(DisasContext *ctx, uint32_t insn,
2593                                const DisasInsn *di)
2594 {
2595     unsigned r2 = extract32(insn, 21, 5);
2596     unsigned r1 = extract32(insn, 16, 5);
2597     unsigned cf = extract32(insn, 12, 4);
2598     unsigned ext = extract32(insn, 6, 6);
2599     unsigned rt = extract32(insn,  0, 5);
2600     TCGv_reg tcg_r1, tcg_r2;
2601     bool is_b = false;
2602     bool is_tc = false;
2603     bool is_tsv = false;
2604     DisasJumpType ret;
2605 
2606     switch (ext) {
2607     case 0x10: /* SUB */
2608         break;
2609     case 0x30: /* SUB,TSV */
2610         is_tsv = true;
2611         break;
2612     case 0x14: /* SUB,B */
2613         is_b = true;
2614         break;
2615     case 0x34: /* SUB,B,TSV */
2616         is_b = is_tsv = true;
2617         break;
2618     case 0x13: /* SUB,TC */
2619         is_tc = true;
2620         break;
2621     case 0x33: /* SUB,TSV,TC */
2622         is_tc = is_tsv = true;
2623         break;
2624     default:
2625         return gen_illegal(ctx);
2626     }
2627 
2628     if (cf) {
2629         nullify_over(ctx);
2630     }
2631     tcg_r1 = load_gpr(ctx, r1);
2632     tcg_r2 = load_gpr(ctx, r2);
2633     ret = do_sub(ctx, rt, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, cf);
2634     return nullify_end(ctx, ret);
2635 }
2636 
2637 static DisasJumpType trans_log(DisasContext *ctx, uint32_t insn,
2638                                const DisasInsn *di)
2639 {
2640     unsigned r2 = extract32(insn, 21, 5);
2641     unsigned r1 = extract32(insn, 16, 5);
2642     unsigned cf = extract32(insn, 12, 4);
2643     unsigned rt = extract32(insn,  0, 5);
2644     TCGv_reg tcg_r1, tcg_r2;
2645     DisasJumpType ret;
2646 
2647     if (cf) {
2648         nullify_over(ctx);
2649     }
2650     tcg_r1 = load_gpr(ctx, r1);
2651     tcg_r2 = load_gpr(ctx, r2);
2652     ret = do_log(ctx, rt, tcg_r1, tcg_r2, cf, di->f.ttt);
2653     return nullify_end(ctx, ret);
2654 }
2655 
2656 /* OR r,0,t -> COPY (according to gas) */
2657 static DisasJumpType trans_copy(DisasContext *ctx, uint32_t insn,
2658                                 const DisasInsn *di)
2659 {
2660     unsigned r1 = extract32(insn, 16, 5);
2661     unsigned rt = extract32(insn,  0, 5);
2662 
2663     if (r1 == 0) {
2664         TCGv_reg dest = dest_gpr(ctx, rt);
2665         tcg_gen_movi_reg(dest, 0);
2666         save_gpr(ctx, rt, dest);
2667     } else {
2668         save_gpr(ctx, rt, cpu_gr[r1]);
2669     }
2670     cond_free(&ctx->null_cond);
2671     return DISAS_NEXT;
2672 }
2673 
2674 static DisasJumpType trans_cmpclr(DisasContext *ctx, uint32_t insn,
2675                                   const DisasInsn *di)
2676 {
2677     unsigned r2 = extract32(insn, 21, 5);
2678     unsigned r1 = extract32(insn, 16, 5);
2679     unsigned cf = extract32(insn, 12, 4);
2680     unsigned rt = extract32(insn,  0, 5);
2681     TCGv_reg tcg_r1, tcg_r2;
2682     DisasJumpType ret;
2683 
2684     if (cf) {
2685         nullify_over(ctx);
2686     }
2687     tcg_r1 = load_gpr(ctx, r1);
2688     tcg_r2 = load_gpr(ctx, r2);
2689     ret = do_cmpclr(ctx, rt, tcg_r1, tcg_r2, cf);
2690     return nullify_end(ctx, ret);
2691 }
2692 
2693 static DisasJumpType trans_uxor(DisasContext *ctx, uint32_t insn,
2694                                 const DisasInsn *di)
2695 {
2696     unsigned r2 = extract32(insn, 21, 5);
2697     unsigned r1 = extract32(insn, 16, 5);
2698     unsigned cf = extract32(insn, 12, 4);
2699     unsigned rt = extract32(insn,  0, 5);
2700     TCGv_reg tcg_r1, tcg_r2;
2701     DisasJumpType ret;
2702 
2703     if (cf) {
2704         nullify_over(ctx);
2705     }
2706     tcg_r1 = load_gpr(ctx, r1);
2707     tcg_r2 = load_gpr(ctx, r2);
2708     ret = do_unit(ctx, rt, tcg_r1, tcg_r2, cf, false, tcg_gen_xor_reg);
2709     return nullify_end(ctx, ret);
2710 }
2711 
2712 static DisasJumpType trans_uaddcm(DisasContext *ctx, uint32_t insn,
2713                                   const DisasInsn *di)
2714 {
2715     unsigned r2 = extract32(insn, 21, 5);
2716     unsigned r1 = extract32(insn, 16, 5);
2717     unsigned cf = extract32(insn, 12, 4);
2718     unsigned is_tc = extract32(insn, 6, 1);
2719     unsigned rt = extract32(insn,  0, 5);
2720     TCGv_reg tcg_r1, tcg_r2, tmp;
2721     DisasJumpType ret;
2722 
2723     if (cf) {
2724         nullify_over(ctx);
2725     }
2726     tcg_r1 = load_gpr(ctx, r1);
2727     tcg_r2 = load_gpr(ctx, r2);
2728     tmp = get_temp(ctx);
2729     tcg_gen_not_reg(tmp, tcg_r2);
2730     ret = do_unit(ctx, rt, tcg_r1, tmp, cf, is_tc, tcg_gen_add_reg);
2731     return nullify_end(ctx, ret);
2732 }
2733 
2734 static DisasJumpType trans_dcor(DisasContext *ctx, uint32_t insn,
2735                                 const DisasInsn *di)
2736 {
2737     unsigned r2 = extract32(insn, 21, 5);
2738     unsigned cf = extract32(insn, 12, 4);
2739     unsigned is_i = extract32(insn, 6, 1);
2740     unsigned rt = extract32(insn,  0, 5);
2741     TCGv_reg tmp;
2742     DisasJumpType ret;
2743 
2744     nullify_over(ctx);
2745 
2746     tmp = get_temp(ctx);
2747     tcg_gen_shri_reg(tmp, cpu_psw_cb, 3);
2748     if (!is_i) {
2749         tcg_gen_not_reg(tmp, tmp);
2750     }
2751     tcg_gen_andi_reg(tmp, tmp, 0x11111111);
2752     tcg_gen_muli_reg(tmp, tmp, 6);
2753     ret = do_unit(ctx, rt, tmp, load_gpr(ctx, r2), cf, false,
2754                   is_i ? tcg_gen_add_reg : tcg_gen_sub_reg);
2755 
2756     return nullify_end(ctx, ret);
2757 }
2758 
2759 static DisasJumpType trans_ds(DisasContext *ctx, uint32_t insn,
2760                               const DisasInsn *di)
2761 {
2762     unsigned r2 = extract32(insn, 21, 5);
2763     unsigned r1 = extract32(insn, 16, 5);
2764     unsigned cf = extract32(insn, 12, 4);
2765     unsigned rt = extract32(insn,  0, 5);
2766     TCGv_reg dest, add1, add2, addc, zero, in1, in2;
2767 
2768     nullify_over(ctx);
2769 
2770     in1 = load_gpr(ctx, r1);
2771     in2 = load_gpr(ctx, r2);
2772 
2773     add1 = tcg_temp_new();
2774     add2 = tcg_temp_new();
2775     addc = tcg_temp_new();
2776     dest = tcg_temp_new();
2777     zero = tcg_const_reg(0);
2778 
2779     /* Form R1 << 1 | PSW[CB]{8}.  */
2780     tcg_gen_add_reg(add1, in1, in1);
2781     tcg_gen_add_reg(add1, add1, cpu_psw_cb_msb);
2782 
2783     /* Add or subtract R2, depending on PSW[V].  Proper computation of
2784        carry{8} requires that we subtract via + ~R2 + 1, as described in
2785        the manual.  By extracting and masking V, we can produce the
2786        proper inputs to the addition without movcond.  */
2787     tcg_gen_sari_reg(addc, cpu_psw_v, TARGET_REGISTER_BITS - 1);
2788     tcg_gen_xor_reg(add2, in2, addc);
2789     tcg_gen_andi_reg(addc, addc, 1);
2790     /* ??? This is only correct for 32-bit.  */
2791     tcg_gen_add2_i32(dest, cpu_psw_cb_msb, add1, zero, add2, zero);
2792     tcg_gen_add2_i32(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero);
2793 
2794     tcg_temp_free(addc);
2795     tcg_temp_free(zero);
2796 
2797     /* Write back the result register.  */
2798     save_gpr(ctx, rt, dest);
2799 
2800     /* Write back PSW[CB].  */
2801     tcg_gen_xor_reg(cpu_psw_cb, add1, add2);
2802     tcg_gen_xor_reg(cpu_psw_cb, cpu_psw_cb, dest);
2803 
2804     /* Write back PSW[V] for the division step.  */
2805     tcg_gen_neg_reg(cpu_psw_v, cpu_psw_cb_msb);
2806     tcg_gen_xor_reg(cpu_psw_v, cpu_psw_v, in2);
2807 
2808     /* Install the new nullification.  */
2809     if (cf) {
2810         TCGv_reg sv = NULL;
2811         if (cf >> 1 == 6) {
2812             /* ??? The lshift is supposed to contribute to overflow.  */
2813             sv = do_add_sv(ctx, dest, add1, add2);
2814         }
2815         ctx->null_cond = do_cond(cf, dest, cpu_psw_cb_msb, sv);
2816     }
2817 
2818     tcg_temp_free(add1);
2819     tcg_temp_free(add2);
2820     tcg_temp_free(dest);
2821 
2822     return nullify_end(ctx, DISAS_NEXT);
2823 }
2824 
2825 #ifndef CONFIG_USER_ONLY
2826 /* These are QEMU extensions and are nops in the real architecture:
2827  *
2828  * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2829  * or %r31,%r31,%r31 -- death loop; offline cpu
2830  *                      currently implemented as idle.
2831  */
2832 static DisasJumpType trans_pause(DisasContext *ctx, uint32_t insn,
2833                                  const DisasInsn *di)
2834 {
2835     TCGv_i32 tmp;
2836 
2837     /* No need to check for supervisor, as userland can only pause
2838        until the next timer interrupt.  */
2839     nullify_over(ctx);
2840 
2841     /* Advance the instruction queue.  */
2842     copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
2843     copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
2844     nullify_set(ctx, 0);
2845 
2846     /* Tell the qemu main loop to halt until this cpu has work.  */
2847     tmp = tcg_const_i32(1);
2848     tcg_gen_st_i32(tmp, cpu_env, -offsetof(HPPACPU, env) +
2849                                  offsetof(CPUState, halted));
2850     tcg_temp_free_i32(tmp);
2851     gen_excp_1(EXCP_HALTED);
2852 
2853     return nullify_end(ctx, DISAS_NORETURN);
2854 }
2855 #endif
2856 
2857 static const DisasInsn table_arith_log[] = {
2858     { 0x08000240u, 0xfc00ffffu, trans_nop },  /* or x,y,0 */
2859     { 0x08000240u, 0xffe0ffe0u, trans_copy }, /* or x,0,t */
2860 #ifndef CONFIG_USER_ONLY
2861     { 0x094a024au, 0xffffffffu, trans_pause }, /* or r10,r10,r10 */
2862     { 0x0bff025fu, 0xffffffffu, trans_pause }, /* or r31,r31,r31 */
2863 #endif
2864     { 0x08000000u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_andc_reg },
2865     { 0x08000200u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_and_reg },
2866     { 0x08000240u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_or_reg },
2867     { 0x08000280u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_xor_reg },
2868     { 0x08000880u, 0xfc000fe0u, trans_cmpclr },
2869     { 0x08000380u, 0xfc000fe0u, trans_uxor },
2870     { 0x08000980u, 0xfc000fa0u, trans_uaddcm },
2871     { 0x08000b80u, 0xfc1f0fa0u, trans_dcor },
2872     { 0x08000440u, 0xfc000fe0u, trans_ds },
2873     { 0x08000700u, 0xfc0007e0u, trans_add }, /* add */
2874     { 0x08000400u, 0xfc0006e0u, trans_sub }, /* sub; sub,b; sub,tsv */
2875     { 0x080004c0u, 0xfc0007e0u, trans_sub }, /* sub,tc; sub,tsv,tc */
2876     { 0x08000200u, 0xfc000320u, trans_add }, /* shladd */
2877 };
2878 
2879 static DisasJumpType trans_addi(DisasContext *ctx, uint32_t insn)
2880 {
2881     target_sreg im = low_sextract(insn, 0, 11);
2882     unsigned e1 = extract32(insn, 11, 1);
2883     unsigned cf = extract32(insn, 12, 4);
2884     unsigned rt = extract32(insn, 16, 5);
2885     unsigned r2 = extract32(insn, 21, 5);
2886     unsigned o1 = extract32(insn, 26, 1);
2887     TCGv_reg tcg_im, tcg_r2;
2888     DisasJumpType ret;
2889 
2890     if (cf) {
2891         nullify_over(ctx);
2892     }
2893 
2894     tcg_im = load_const(ctx, im);
2895     tcg_r2 = load_gpr(ctx, r2);
2896     ret = do_add(ctx, rt, tcg_im, tcg_r2, 0, false, e1, !o1, false, cf);
2897 
2898     return nullify_end(ctx, ret);
2899 }
2900 
2901 static DisasJumpType trans_subi(DisasContext *ctx, uint32_t insn)
2902 {
2903     target_sreg im = low_sextract(insn, 0, 11);
2904     unsigned e1 = extract32(insn, 11, 1);
2905     unsigned cf = extract32(insn, 12, 4);
2906     unsigned rt = extract32(insn, 16, 5);
2907     unsigned r2 = extract32(insn, 21, 5);
2908     TCGv_reg tcg_im, tcg_r2;
2909     DisasJumpType ret;
2910 
2911     if (cf) {
2912         nullify_over(ctx);
2913     }
2914 
2915     tcg_im = load_const(ctx, im);
2916     tcg_r2 = load_gpr(ctx, r2);
2917     ret = do_sub(ctx, rt, tcg_im, tcg_r2, e1, false, false, cf);
2918 
2919     return nullify_end(ctx, ret);
2920 }
2921 
2922 static DisasJumpType trans_cmpiclr(DisasContext *ctx, uint32_t insn)
2923 {
2924     target_sreg im = low_sextract(insn, 0, 11);
2925     unsigned cf = extract32(insn, 12, 4);
2926     unsigned rt = extract32(insn, 16, 5);
2927     unsigned r2 = extract32(insn, 21, 5);
2928     TCGv_reg tcg_im, tcg_r2;
2929     DisasJumpType ret;
2930 
2931     if (cf) {
2932         nullify_over(ctx);
2933     }
2934 
2935     tcg_im = load_const(ctx, im);
2936     tcg_r2 = load_gpr(ctx, r2);
2937     ret = do_cmpclr(ctx, rt, tcg_im, tcg_r2, cf);
2938 
2939     return nullify_end(ctx, ret);
2940 }
2941 
2942 static DisasJumpType trans_ld_idx_i(DisasContext *ctx, uint32_t insn,
2943                                     const DisasInsn *di)
2944 {
2945     unsigned rt = extract32(insn, 0, 5);
2946     unsigned m = extract32(insn, 5, 1);
2947     unsigned sz = extract32(insn, 6, 2);
2948     unsigned a = extract32(insn, 13, 1);
2949     unsigned sp = extract32(insn, 14, 2);
2950     int disp = low_sextract(insn, 16, 5);
2951     unsigned rb = extract32(insn, 21, 5);
2952     int modify = (m ? (a ? -1 : 1) : 0);
2953     TCGMemOp mop = MO_TE | sz;
2954 
2955     return do_load(ctx, rt, rb, 0, 0, disp, sp, modify, mop);
2956 }
2957 
2958 static DisasJumpType trans_ld_idx_x(DisasContext *ctx, uint32_t insn,
2959                                     const DisasInsn *di)
2960 {
2961     unsigned rt = extract32(insn, 0, 5);
2962     unsigned m = extract32(insn, 5, 1);
2963     unsigned sz = extract32(insn, 6, 2);
2964     unsigned u = extract32(insn, 13, 1);
2965     unsigned sp = extract32(insn, 14, 2);
2966     unsigned rx = extract32(insn, 16, 5);
2967     unsigned rb = extract32(insn, 21, 5);
2968     TCGMemOp mop = MO_TE | sz;
2969 
2970     return do_load(ctx, rt, rb, rx, u ? sz : 0, 0, sp, m, mop);
2971 }
2972 
2973 static DisasJumpType trans_st_idx_i(DisasContext *ctx, uint32_t insn,
2974                                     const DisasInsn *di)
2975 {
2976     int disp = low_sextract(insn, 0, 5);
2977     unsigned m = extract32(insn, 5, 1);
2978     unsigned sz = extract32(insn, 6, 2);
2979     unsigned a = extract32(insn, 13, 1);
2980     unsigned sp = extract32(insn, 14, 2);
2981     unsigned rr = extract32(insn, 16, 5);
2982     unsigned rb = extract32(insn, 21, 5);
2983     int modify = (m ? (a ? -1 : 1) : 0);
2984     TCGMemOp mop = MO_TE | sz;
2985 
2986     return do_store(ctx, rr, rb, disp, sp, modify, mop);
2987 }
2988 
2989 static DisasJumpType trans_ldcw(DisasContext *ctx, uint32_t insn,
2990                                 const DisasInsn *di)
2991 {
2992     unsigned rt = extract32(insn, 0, 5);
2993     unsigned m = extract32(insn, 5, 1);
2994     unsigned i = extract32(insn, 12, 1);
2995     unsigned au = extract32(insn, 13, 1);
2996     unsigned sp = extract32(insn, 14, 2);
2997     unsigned rx = extract32(insn, 16, 5);
2998     unsigned rb = extract32(insn, 21, 5);
2999     TCGMemOp mop = MO_TEUL | MO_ALIGN_16;
3000     TCGv_reg zero, dest, ofs;
3001     TCGv_tl addr;
3002     int modify, disp = 0, scale = 0;
3003 
3004     nullify_over(ctx);
3005 
3006     if (i) {
3007         modify = (m ? (au ? -1 : 1) : 0);
3008         disp = low_sextract(rx, 0, 5);
3009         rx = 0;
3010     } else {
3011         modify = m;
3012         if (au) {
3013             scale = mop & MO_SIZE;
3014         }
3015     }
3016     if (modify) {
3017         /* Base register modification.  Make sure if RT == RB,
3018            we see the result of the load.  */
3019         dest = get_temp(ctx);
3020     } else {
3021         dest = dest_gpr(ctx, rt);
3022     }
3023 
3024     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
3025              ctx->mmu_idx == MMU_PHYS_IDX);
3026     zero = tcg_const_reg(0);
3027     tcg_gen_atomic_xchg_reg(dest, addr, zero, ctx->mmu_idx, mop);
3028     if (modify) {
3029         save_gpr(ctx, rb, ofs);
3030     }
3031     save_gpr(ctx, rt, dest);
3032 
3033     return nullify_end(ctx, DISAS_NEXT);
3034 }
3035 
3036 static DisasJumpType trans_stby(DisasContext *ctx, uint32_t insn,
3037                                 const DisasInsn *di)
3038 {
3039     target_sreg disp = low_sextract(insn, 0, 5);
3040     unsigned m = extract32(insn, 5, 1);
3041     unsigned a = extract32(insn, 13, 1);
3042     unsigned sp = extract32(insn, 14, 2);
3043     unsigned rt = extract32(insn, 16, 5);
3044     unsigned rb = extract32(insn, 21, 5);
3045     TCGv_reg ofs, val;
3046     TCGv_tl addr;
3047 
3048     nullify_over(ctx);
3049 
3050     form_gva(ctx, &addr, &ofs, rb, 0, 0, disp, sp, m,
3051              ctx->mmu_idx == MMU_PHYS_IDX);
3052     val = load_gpr(ctx, rt);
3053     if (a) {
3054         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3055             gen_helper_stby_e_parallel(cpu_env, addr, val);
3056         } else {
3057             gen_helper_stby_e(cpu_env, addr, val);
3058         }
3059     } else {
3060         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3061             gen_helper_stby_b_parallel(cpu_env, addr, val);
3062         } else {
3063             gen_helper_stby_b(cpu_env, addr, val);
3064         }
3065     }
3066 
3067     if (m) {
3068         tcg_gen_andi_reg(ofs, ofs, ~3);
3069         save_gpr(ctx, rb, ofs);
3070     }
3071 
3072     return nullify_end(ctx, DISAS_NEXT);
3073 }
3074 
3075 #ifndef CONFIG_USER_ONLY
3076 static DisasJumpType trans_ldwa_idx_i(DisasContext *ctx, uint32_t insn,
3077                                       const DisasInsn *di)
3078 {
3079     int hold_mmu_idx = ctx->mmu_idx;
3080     DisasJumpType ret;
3081 
3082     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3083 
3084     /* ??? needs fixing for hppa64 -- ldda does not follow the same
3085        format wrt the sub-opcode in bits 6:9.  */
3086     ctx->mmu_idx = MMU_PHYS_IDX;
3087     ret = trans_ld_idx_i(ctx, insn, di);
3088     ctx->mmu_idx = hold_mmu_idx;
3089     return ret;
3090 }
3091 
3092 static DisasJumpType trans_ldwa_idx_x(DisasContext *ctx, uint32_t insn,
3093                                       const DisasInsn *di)
3094 {
3095     int hold_mmu_idx = ctx->mmu_idx;
3096     DisasJumpType ret;
3097 
3098     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3099 
3100     /* ??? needs fixing for hppa64 -- ldda does not follow the same
3101        format wrt the sub-opcode in bits 6:9.  */
3102     ctx->mmu_idx = MMU_PHYS_IDX;
3103     ret = trans_ld_idx_x(ctx, insn, di);
3104     ctx->mmu_idx = hold_mmu_idx;
3105     return ret;
3106 }
3107 
3108 static DisasJumpType trans_stwa_idx_i(DisasContext *ctx, uint32_t insn,
3109                                       const DisasInsn *di)
3110 {
3111     int hold_mmu_idx = ctx->mmu_idx;
3112     DisasJumpType ret;
3113 
3114     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3115 
3116     /* ??? needs fixing for hppa64 -- ldda does not follow the same
3117        format wrt the sub-opcode in bits 6:9.  */
3118     ctx->mmu_idx = MMU_PHYS_IDX;
3119     ret = trans_st_idx_i(ctx, insn, di);
3120     ctx->mmu_idx = hold_mmu_idx;
3121     return ret;
3122 }
3123 #endif
3124 
3125 static const DisasInsn table_index_mem[] = {
3126     { 0x0c001000u, 0xfc001300, trans_ld_idx_i }, /* LD[BHWD], im */
3127     { 0x0c000000u, 0xfc001300, trans_ld_idx_x }, /* LD[BHWD], rx */
3128     { 0x0c001200u, 0xfc001300, trans_st_idx_i }, /* ST[BHWD] */
3129     { 0x0c0001c0u, 0xfc0003c0, trans_ldcw },
3130     { 0x0c001300u, 0xfc0013c0, trans_stby },
3131 #ifndef CONFIG_USER_ONLY
3132     { 0x0c000180u, 0xfc00d3c0, trans_ldwa_idx_x }, /* LDWA, rx */
3133     { 0x0c001180u, 0xfc00d3c0, trans_ldwa_idx_i }, /* LDWA, im */
3134     { 0x0c001380u, 0xfc00d3c0, trans_stwa_idx_i }, /* STWA, im */
3135 #endif
3136 };
3137 
3138 static DisasJumpType trans_ldil(DisasContext *ctx, uint32_t insn)
3139 {
3140     unsigned rt = extract32(insn, 21, 5);
3141     target_sreg i = assemble_21(insn);
3142     TCGv_reg tcg_rt = dest_gpr(ctx, rt);
3143 
3144     tcg_gen_movi_reg(tcg_rt, i);
3145     save_gpr(ctx, rt, tcg_rt);
3146     cond_free(&ctx->null_cond);
3147 
3148     return DISAS_NEXT;
3149 }
3150 
3151 static DisasJumpType trans_addil(DisasContext *ctx, uint32_t insn)
3152 {
3153     unsigned rt = extract32(insn, 21, 5);
3154     target_sreg i = assemble_21(insn);
3155     TCGv_reg tcg_rt = load_gpr(ctx, rt);
3156     TCGv_reg tcg_r1 = dest_gpr(ctx, 1);
3157 
3158     tcg_gen_addi_reg(tcg_r1, tcg_rt, i);
3159     save_gpr(ctx, 1, tcg_r1);
3160     cond_free(&ctx->null_cond);
3161 
3162     return DISAS_NEXT;
3163 }
3164 
3165 static DisasJumpType trans_ldo(DisasContext *ctx, uint32_t insn)
3166 {
3167     unsigned rb = extract32(insn, 21, 5);
3168     unsigned rt = extract32(insn, 16, 5);
3169     target_sreg i = assemble_16(insn);
3170     TCGv_reg tcg_rt = dest_gpr(ctx, rt);
3171 
3172     /* Special case rb == 0, for the LDI pseudo-op.
3173        The COPY pseudo-op is handled for free within tcg_gen_addi_tl.  */
3174     if (rb == 0) {
3175         tcg_gen_movi_reg(tcg_rt, i);
3176     } else {
3177         tcg_gen_addi_reg(tcg_rt, cpu_gr[rb], i);
3178     }
3179     save_gpr(ctx, rt, tcg_rt);
3180     cond_free(&ctx->null_cond);
3181 
3182     return DISAS_NEXT;
3183 }
3184 
3185 static DisasJumpType trans_load(DisasContext *ctx, uint32_t insn,
3186                                 bool is_mod, TCGMemOp mop)
3187 {
3188     unsigned rb = extract32(insn, 21, 5);
3189     unsigned rt = extract32(insn, 16, 5);
3190     unsigned sp = extract32(insn, 14, 2);
3191     target_sreg i = assemble_16(insn);
3192 
3193     return do_load(ctx, rt, rb, 0, 0, i, sp,
3194                    is_mod ? (i < 0 ? -1 : 1) : 0, mop);
3195 }
3196 
3197 static DisasJumpType trans_load_w(DisasContext *ctx, uint32_t insn)
3198 {
3199     unsigned rb = extract32(insn, 21, 5);
3200     unsigned rt = extract32(insn, 16, 5);
3201     unsigned sp = extract32(insn, 14, 2);
3202     target_sreg i = assemble_16a(insn);
3203     unsigned ext2 = extract32(insn, 1, 2);
3204 
3205     switch (ext2) {
3206     case 0:
3207     case 1:
3208         /* FLDW without modification.  */
3209         return do_floadw(ctx, ext2 * 32 + rt, rb, 0, 0, i, sp, 0);
3210     case 2:
3211         /* LDW with modification.  Note that the sign of I selects
3212            post-dec vs pre-inc.  */
3213         return do_load(ctx, rt, rb, 0, 0, i, sp, (i < 0 ? 1 : -1), MO_TEUL);
3214     default:
3215         return gen_illegal(ctx);
3216     }
3217 }
3218 
3219 static DisasJumpType trans_fload_mod(DisasContext *ctx, uint32_t insn)
3220 {
3221     target_sreg i = assemble_16a(insn);
3222     unsigned t1 = extract32(insn, 1, 1);
3223     unsigned a = extract32(insn, 2, 1);
3224     unsigned sp = extract32(insn, 14, 2);
3225     unsigned t0 = extract32(insn, 16, 5);
3226     unsigned rb = extract32(insn, 21, 5);
3227 
3228     /* FLDW with modification.  */
3229     return do_floadw(ctx, t1 * 32 + t0, rb, 0, 0, i, sp, (a ? -1 : 1));
3230 }
3231 
3232 static DisasJumpType trans_store(DisasContext *ctx, uint32_t insn,
3233                                  bool is_mod, TCGMemOp mop)
3234 {
3235     unsigned rb = extract32(insn, 21, 5);
3236     unsigned rt = extract32(insn, 16, 5);
3237     unsigned sp = extract32(insn, 14, 2);
3238     target_sreg i = assemble_16(insn);
3239 
3240     return do_store(ctx, rt, rb, i, sp, is_mod ? (i < 0 ? -1 : 1) : 0, mop);
3241 }
3242 
3243 static DisasJumpType trans_store_w(DisasContext *ctx, uint32_t insn)
3244 {
3245     unsigned rb = extract32(insn, 21, 5);
3246     unsigned rt = extract32(insn, 16, 5);
3247     unsigned sp = extract32(insn, 14, 2);
3248     target_sreg i = assemble_16a(insn);
3249     unsigned ext2 = extract32(insn, 1, 2);
3250 
3251     switch (ext2) {
3252     case 0:
3253     case 1:
3254         /* FSTW without modification.  */
3255         return do_fstorew(ctx, ext2 * 32 + rt, rb, 0, 0, i, sp, 0);
3256     case 2:
3257         /* STW with modification.  */
3258         return do_store(ctx, rt, rb, i, sp, (i < 0 ? 1 : -1), MO_TEUL);
3259     default:
3260         return gen_illegal(ctx);
3261     }
3262 }
3263 
3264 static DisasJumpType trans_fstore_mod(DisasContext *ctx, uint32_t insn)
3265 {
3266     target_sreg i = assemble_16a(insn);
3267     unsigned t1 = extract32(insn, 1, 1);
3268     unsigned a = extract32(insn, 2, 1);
3269     unsigned sp = extract32(insn, 14, 2);
3270     unsigned t0 = extract32(insn, 16, 5);
3271     unsigned rb = extract32(insn, 21, 5);
3272 
3273     /* FSTW with modification.  */
3274     return do_fstorew(ctx, t1 * 32 + t0, rb, 0, 0, i, sp, (a ? -1 : 1));
3275 }
3276 
3277 static DisasJumpType trans_copr_w(DisasContext *ctx, uint32_t insn)
3278 {
3279     unsigned t0 = extract32(insn, 0, 5);
3280     unsigned m = extract32(insn, 5, 1);
3281     unsigned t1 = extract32(insn, 6, 1);
3282     unsigned ext3 = extract32(insn, 7, 3);
3283     /* unsigned cc = extract32(insn, 10, 2); */
3284     unsigned i = extract32(insn, 12, 1);
3285     unsigned ua = extract32(insn, 13, 1);
3286     unsigned sp = extract32(insn, 14, 2);
3287     unsigned rx = extract32(insn, 16, 5);
3288     unsigned rb = extract32(insn, 21, 5);
3289     unsigned rt = t1 * 32 + t0;
3290     int modify = (m ? (ua ? -1 : 1) : 0);
3291     int disp, scale;
3292 
3293     if (i == 0) {
3294         scale = (ua ? 2 : 0);
3295         disp = 0;
3296         modify = m;
3297     } else {
3298         disp = low_sextract(rx, 0, 5);
3299         scale = 0;
3300         rx = 0;
3301         modify = (m ? (ua ? -1 : 1) : 0);
3302     }
3303 
3304     switch (ext3) {
3305     case 0: /* FLDW */
3306         return do_floadw(ctx, rt, rb, rx, scale, disp, sp, modify);
3307     case 4: /* FSTW */
3308         return do_fstorew(ctx, rt, rb, rx, scale, disp, sp, modify);
3309     }
3310     return gen_illegal(ctx);
3311 }
3312 
3313 static DisasJumpType trans_copr_dw(DisasContext *ctx, uint32_t insn)
3314 {
3315     unsigned rt = extract32(insn, 0, 5);
3316     unsigned m = extract32(insn, 5, 1);
3317     unsigned ext4 = extract32(insn, 6, 4);
3318     /* unsigned cc = extract32(insn, 10, 2); */
3319     unsigned i = extract32(insn, 12, 1);
3320     unsigned ua = extract32(insn, 13, 1);
3321     unsigned sp = extract32(insn, 14, 2);
3322     unsigned rx = extract32(insn, 16, 5);
3323     unsigned rb = extract32(insn, 21, 5);
3324     int modify = (m ? (ua ? -1 : 1) : 0);
3325     int disp, scale;
3326 
3327     if (i == 0) {
3328         scale = (ua ? 3 : 0);
3329         disp = 0;
3330         modify = m;
3331     } else {
3332         disp = low_sextract(rx, 0, 5);
3333         scale = 0;
3334         rx = 0;
3335         modify = (m ? (ua ? -1 : 1) : 0);
3336     }
3337 
3338     switch (ext4) {
3339     case 0: /* FLDD */
3340         return do_floadd(ctx, rt, rb, rx, scale, disp, sp, modify);
3341     case 8: /* FSTD */
3342         return do_fstored(ctx, rt, rb, rx, scale, disp, sp, modify);
3343     default:
3344         return gen_illegal(ctx);
3345     }
3346 }
3347 
3348 static DisasJumpType trans_cmpb(DisasContext *ctx, uint32_t insn,
3349                                 bool is_true, bool is_imm, bool is_dw)
3350 {
3351     target_sreg disp = assemble_12(insn) * 4;
3352     unsigned n = extract32(insn, 1, 1);
3353     unsigned c = extract32(insn, 13, 3);
3354     unsigned r = extract32(insn, 21, 5);
3355     unsigned cf = c * 2 + !is_true;
3356     TCGv_reg dest, in1, in2, sv;
3357     DisasCond cond;
3358 
3359     nullify_over(ctx);
3360 
3361     if (is_imm) {
3362         in1 = load_const(ctx, low_sextract(insn, 16, 5));
3363     } else {
3364         in1 = load_gpr(ctx, extract32(insn, 16, 5));
3365     }
3366     in2 = load_gpr(ctx, r);
3367     dest = get_temp(ctx);
3368 
3369     tcg_gen_sub_reg(dest, in1, in2);
3370 
3371     sv = NULL;
3372     if (c == 6) {
3373         sv = do_sub_sv(ctx, dest, in1, in2);
3374     }
3375 
3376     cond = do_sub_cond(cf, dest, in1, in2, sv);
3377     return do_cbranch(ctx, disp, n, &cond);
3378 }
3379 
3380 static DisasJumpType trans_addb(DisasContext *ctx, uint32_t insn,
3381                                 bool is_true, bool is_imm)
3382 {
3383     target_sreg disp = assemble_12(insn) * 4;
3384     unsigned n = extract32(insn, 1, 1);
3385     unsigned c = extract32(insn, 13, 3);
3386     unsigned r = extract32(insn, 21, 5);
3387     unsigned cf = c * 2 + !is_true;
3388     TCGv_reg dest, in1, in2, sv, cb_msb;
3389     DisasCond cond;
3390 
3391     nullify_over(ctx);
3392 
3393     if (is_imm) {
3394         in1 = load_const(ctx, low_sextract(insn, 16, 5));
3395     } else {
3396         in1 = load_gpr(ctx, extract32(insn, 16, 5));
3397     }
3398     in2 = load_gpr(ctx, r);
3399     dest = dest_gpr(ctx, r);
3400     sv = NULL;
3401     cb_msb = NULL;
3402 
3403     switch (c) {
3404     default:
3405         tcg_gen_add_reg(dest, in1, in2);
3406         break;
3407     case 4: case 5:
3408         cb_msb = get_temp(ctx);
3409         tcg_gen_movi_reg(cb_msb, 0);
3410         tcg_gen_add2_reg(dest, cb_msb, in1, cb_msb, in2, cb_msb);
3411         break;
3412     case 6:
3413         tcg_gen_add_reg(dest, in1, in2);
3414         sv = do_add_sv(ctx, dest, in1, in2);
3415         break;
3416     }
3417 
3418     cond = do_cond(cf, dest, cb_msb, sv);
3419     return do_cbranch(ctx, disp, n, &cond);
3420 }
3421 
3422 static DisasJumpType trans_bb(DisasContext *ctx, uint32_t insn)
3423 {
3424     target_sreg disp = assemble_12(insn) * 4;
3425     unsigned n = extract32(insn, 1, 1);
3426     unsigned c = extract32(insn, 15, 1);
3427     unsigned r = extract32(insn, 16, 5);
3428     unsigned p = extract32(insn, 21, 5);
3429     unsigned i = extract32(insn, 26, 1);
3430     TCGv_reg tmp, tcg_r;
3431     DisasCond cond;
3432 
3433     nullify_over(ctx);
3434 
3435     tmp = tcg_temp_new();
3436     tcg_r = load_gpr(ctx, r);
3437     if (i) {
3438         tcg_gen_shli_reg(tmp, tcg_r, p);
3439     } else {
3440         tcg_gen_shl_reg(tmp, tcg_r, cpu_sar);
3441     }
3442 
3443     cond = cond_make_0(c ? TCG_COND_GE : TCG_COND_LT, tmp);
3444     tcg_temp_free(tmp);
3445     return do_cbranch(ctx, disp, n, &cond);
3446 }
3447 
3448 static DisasJumpType trans_movb(DisasContext *ctx, uint32_t insn, bool is_imm)
3449 {
3450     target_sreg disp = assemble_12(insn) * 4;
3451     unsigned n = extract32(insn, 1, 1);
3452     unsigned c = extract32(insn, 13, 3);
3453     unsigned t = extract32(insn, 16, 5);
3454     unsigned r = extract32(insn, 21, 5);
3455     TCGv_reg dest;
3456     DisasCond cond;
3457 
3458     nullify_over(ctx);
3459 
3460     dest = dest_gpr(ctx, r);
3461     if (is_imm) {
3462         tcg_gen_movi_reg(dest, low_sextract(t, 0, 5));
3463     } else if (t == 0) {
3464         tcg_gen_movi_reg(dest, 0);
3465     } else {
3466         tcg_gen_mov_reg(dest, cpu_gr[t]);
3467     }
3468 
3469     cond = do_sed_cond(c, dest);
3470     return do_cbranch(ctx, disp, n, &cond);
3471 }
3472 
3473 static DisasJumpType trans_shrpw_sar(DisasContext *ctx, uint32_t insn,
3474                                     const DisasInsn *di)
3475 {
3476     unsigned rt = extract32(insn, 0, 5);
3477     unsigned c = extract32(insn, 13, 3);
3478     unsigned r1 = extract32(insn, 16, 5);
3479     unsigned r2 = extract32(insn, 21, 5);
3480     TCGv_reg dest;
3481 
3482     if (c) {
3483         nullify_over(ctx);
3484     }
3485 
3486     dest = dest_gpr(ctx, rt);
3487     if (r1 == 0) {
3488         tcg_gen_ext32u_reg(dest, load_gpr(ctx, r2));
3489         tcg_gen_shr_reg(dest, dest, cpu_sar);
3490     } else if (r1 == r2) {
3491         TCGv_i32 t32 = tcg_temp_new_i32();
3492         tcg_gen_trunc_reg_i32(t32, load_gpr(ctx, r2));
3493         tcg_gen_rotr_i32(t32, t32, cpu_sar);
3494         tcg_gen_extu_i32_reg(dest, t32);
3495         tcg_temp_free_i32(t32);
3496     } else {
3497         TCGv_i64 t = tcg_temp_new_i64();
3498         TCGv_i64 s = tcg_temp_new_i64();
3499 
3500         tcg_gen_concat_reg_i64(t, load_gpr(ctx, r2), load_gpr(ctx, r1));
3501         tcg_gen_extu_reg_i64(s, cpu_sar);
3502         tcg_gen_shr_i64(t, t, s);
3503         tcg_gen_trunc_i64_reg(dest, t);
3504 
3505         tcg_temp_free_i64(t);
3506         tcg_temp_free_i64(s);
3507     }
3508     save_gpr(ctx, rt, dest);
3509 
3510     /* Install the new nullification.  */
3511     cond_free(&ctx->null_cond);
3512     if (c) {
3513         ctx->null_cond = do_sed_cond(c, dest);
3514     }
3515     return nullify_end(ctx, DISAS_NEXT);
3516 }
3517 
3518 static DisasJumpType trans_shrpw_imm(DisasContext *ctx, uint32_t insn,
3519                                      const DisasInsn *di)
3520 {
3521     unsigned rt = extract32(insn, 0, 5);
3522     unsigned cpos = extract32(insn, 5, 5);
3523     unsigned c = extract32(insn, 13, 3);
3524     unsigned r1 = extract32(insn, 16, 5);
3525     unsigned r2 = extract32(insn, 21, 5);
3526     unsigned sa = 31 - cpos;
3527     TCGv_reg dest, t2;
3528 
3529     if (c) {
3530         nullify_over(ctx);
3531     }
3532 
3533     dest = dest_gpr(ctx, rt);
3534     t2 = load_gpr(ctx, r2);
3535     if (r1 == r2) {
3536         TCGv_i32 t32 = tcg_temp_new_i32();
3537         tcg_gen_trunc_reg_i32(t32, t2);
3538         tcg_gen_rotri_i32(t32, t32, sa);
3539         tcg_gen_extu_i32_reg(dest, t32);
3540         tcg_temp_free_i32(t32);
3541     } else if (r1 == 0) {
3542         tcg_gen_extract_reg(dest, t2, sa, 32 - sa);
3543     } else {
3544         TCGv_reg t0 = tcg_temp_new();
3545         tcg_gen_extract_reg(t0, t2, sa, 32 - sa);
3546         tcg_gen_deposit_reg(dest, t0, cpu_gr[r1], 32 - sa, sa);
3547         tcg_temp_free(t0);
3548     }
3549     save_gpr(ctx, rt, dest);
3550 
3551     /* Install the new nullification.  */
3552     cond_free(&ctx->null_cond);
3553     if (c) {
3554         ctx->null_cond = do_sed_cond(c, dest);
3555     }
3556     return nullify_end(ctx, DISAS_NEXT);
3557 }
3558 
3559 static DisasJumpType trans_extrw_sar(DisasContext *ctx, uint32_t insn,
3560                                      const DisasInsn *di)
3561 {
3562     unsigned clen = extract32(insn, 0, 5);
3563     unsigned is_se = extract32(insn, 10, 1);
3564     unsigned c = extract32(insn, 13, 3);
3565     unsigned rt = extract32(insn, 16, 5);
3566     unsigned rr = extract32(insn, 21, 5);
3567     unsigned len = 32 - clen;
3568     TCGv_reg dest, src, tmp;
3569 
3570     if (c) {
3571         nullify_over(ctx);
3572     }
3573 
3574     dest = dest_gpr(ctx, rt);
3575     src = load_gpr(ctx, rr);
3576     tmp = tcg_temp_new();
3577 
3578     /* Recall that SAR is using big-endian bit numbering.  */
3579     tcg_gen_xori_reg(tmp, cpu_sar, TARGET_REGISTER_BITS - 1);
3580     if (is_se) {
3581         tcg_gen_sar_reg(dest, src, tmp);
3582         tcg_gen_sextract_reg(dest, dest, 0, len);
3583     } else {
3584         tcg_gen_shr_reg(dest, src, tmp);
3585         tcg_gen_extract_reg(dest, dest, 0, len);
3586     }
3587     tcg_temp_free(tmp);
3588     save_gpr(ctx, rt, dest);
3589 
3590     /* Install the new nullification.  */
3591     cond_free(&ctx->null_cond);
3592     if (c) {
3593         ctx->null_cond = do_sed_cond(c, dest);
3594     }
3595     return nullify_end(ctx, DISAS_NEXT);
3596 }
3597 
3598 static DisasJumpType trans_extrw_imm(DisasContext *ctx, uint32_t insn,
3599                                      const DisasInsn *di)
3600 {
3601     unsigned clen = extract32(insn, 0, 5);
3602     unsigned pos = extract32(insn, 5, 5);
3603     unsigned is_se = extract32(insn, 10, 1);
3604     unsigned c = extract32(insn, 13, 3);
3605     unsigned rt = extract32(insn, 16, 5);
3606     unsigned rr = extract32(insn, 21, 5);
3607     unsigned len = 32 - clen;
3608     unsigned cpos = 31 - pos;
3609     TCGv_reg dest, src;
3610 
3611     if (c) {
3612         nullify_over(ctx);
3613     }
3614 
3615     dest = dest_gpr(ctx, rt);
3616     src = load_gpr(ctx, rr);
3617     if (is_se) {
3618         tcg_gen_sextract_reg(dest, src, cpos, len);
3619     } else {
3620         tcg_gen_extract_reg(dest, src, cpos, len);
3621     }
3622     save_gpr(ctx, rt, dest);
3623 
3624     /* Install the new nullification.  */
3625     cond_free(&ctx->null_cond);
3626     if (c) {
3627         ctx->null_cond = do_sed_cond(c, dest);
3628     }
3629     return nullify_end(ctx, DISAS_NEXT);
3630 }
3631 
3632 static const DisasInsn table_sh_ex[] = {
3633     { 0xd0000000u, 0xfc001fe0u, trans_shrpw_sar },
3634     { 0xd0000800u, 0xfc001c00u, trans_shrpw_imm },
3635     { 0xd0001000u, 0xfc001be0u, trans_extrw_sar },
3636     { 0xd0001800u, 0xfc001800u, trans_extrw_imm },
3637 };
3638 
3639 static DisasJumpType trans_depw_imm_c(DisasContext *ctx, uint32_t insn,
3640                                       const DisasInsn *di)
3641 {
3642     unsigned clen = extract32(insn, 0, 5);
3643     unsigned cpos = extract32(insn, 5, 5);
3644     unsigned nz = extract32(insn, 10, 1);
3645     unsigned c = extract32(insn, 13, 3);
3646     target_sreg val = low_sextract(insn, 16, 5);
3647     unsigned rt = extract32(insn, 21, 5);
3648     unsigned len = 32 - clen;
3649     target_sreg mask0, mask1;
3650     TCGv_reg dest;
3651 
3652     if (c) {
3653         nullify_over(ctx);
3654     }
3655     if (cpos + len > 32) {
3656         len = 32 - cpos;
3657     }
3658 
3659     dest = dest_gpr(ctx, rt);
3660     mask0 = deposit64(0, cpos, len, val);
3661     mask1 = deposit64(-1, cpos, len, val);
3662 
3663     if (nz) {
3664         TCGv_reg src = load_gpr(ctx, rt);
3665         if (mask1 != -1) {
3666             tcg_gen_andi_reg(dest, src, mask1);
3667             src = dest;
3668         }
3669         tcg_gen_ori_reg(dest, src, mask0);
3670     } else {
3671         tcg_gen_movi_reg(dest, mask0);
3672     }
3673     save_gpr(ctx, rt, dest);
3674 
3675     /* Install the new nullification.  */
3676     cond_free(&ctx->null_cond);
3677     if (c) {
3678         ctx->null_cond = do_sed_cond(c, dest);
3679     }
3680     return nullify_end(ctx, DISAS_NEXT);
3681 }
3682 
3683 static DisasJumpType trans_depw_imm(DisasContext *ctx, uint32_t insn,
3684                                     const DisasInsn *di)
3685 {
3686     unsigned clen = extract32(insn, 0, 5);
3687     unsigned cpos = extract32(insn, 5, 5);
3688     unsigned nz = extract32(insn, 10, 1);
3689     unsigned c = extract32(insn, 13, 3);
3690     unsigned rr = extract32(insn, 16, 5);
3691     unsigned rt = extract32(insn, 21, 5);
3692     unsigned rs = nz ? rt : 0;
3693     unsigned len = 32 - clen;
3694     TCGv_reg dest, val;
3695 
3696     if (c) {
3697         nullify_over(ctx);
3698     }
3699     if (cpos + len > 32) {
3700         len = 32 - cpos;
3701     }
3702 
3703     dest = dest_gpr(ctx, rt);
3704     val = load_gpr(ctx, rr);
3705     if (rs == 0) {
3706         tcg_gen_deposit_z_reg(dest, val, cpos, len);
3707     } else {
3708         tcg_gen_deposit_reg(dest, cpu_gr[rs], val, cpos, len);
3709     }
3710     save_gpr(ctx, rt, dest);
3711 
3712     /* Install the new nullification.  */
3713     cond_free(&ctx->null_cond);
3714     if (c) {
3715         ctx->null_cond = do_sed_cond(c, dest);
3716     }
3717     return nullify_end(ctx, DISAS_NEXT);
3718 }
3719 
3720 static DisasJumpType trans_depw_sar(DisasContext *ctx, uint32_t insn,
3721                                     const DisasInsn *di)
3722 {
3723     unsigned clen = extract32(insn, 0, 5);
3724     unsigned nz = extract32(insn, 10, 1);
3725     unsigned i = extract32(insn, 12, 1);
3726     unsigned c = extract32(insn, 13, 3);
3727     unsigned rt = extract32(insn, 21, 5);
3728     unsigned rs = nz ? rt : 0;
3729     unsigned len = 32 - clen;
3730     TCGv_reg val, mask, tmp, shift, dest;
3731     unsigned msb = 1U << (len - 1);
3732 
3733     if (c) {
3734         nullify_over(ctx);
3735     }
3736 
3737     if (i) {
3738         val = load_const(ctx, low_sextract(insn, 16, 5));
3739     } else {
3740         val = load_gpr(ctx, extract32(insn, 16, 5));
3741     }
3742     dest = dest_gpr(ctx, rt);
3743     shift = tcg_temp_new();
3744     tmp = tcg_temp_new();
3745 
3746     /* Convert big-endian bit numbering in SAR to left-shift.  */
3747     tcg_gen_xori_reg(shift, cpu_sar, TARGET_REGISTER_BITS - 1);
3748 
3749     mask = tcg_const_reg(msb + (msb - 1));
3750     tcg_gen_and_reg(tmp, val, mask);
3751     if (rs) {
3752         tcg_gen_shl_reg(mask, mask, shift);
3753         tcg_gen_shl_reg(tmp, tmp, shift);
3754         tcg_gen_andc_reg(dest, cpu_gr[rs], mask);
3755         tcg_gen_or_reg(dest, dest, tmp);
3756     } else {
3757         tcg_gen_shl_reg(dest, tmp, shift);
3758     }
3759     tcg_temp_free(shift);
3760     tcg_temp_free(mask);
3761     tcg_temp_free(tmp);
3762     save_gpr(ctx, rt, dest);
3763 
3764     /* Install the new nullification.  */
3765     cond_free(&ctx->null_cond);
3766     if (c) {
3767         ctx->null_cond = do_sed_cond(c, dest);
3768     }
3769     return nullify_end(ctx, DISAS_NEXT);
3770 }
3771 
3772 static const DisasInsn table_depw[] = {
3773     { 0xd4000000u, 0xfc000be0u, trans_depw_sar },
3774     { 0xd4000800u, 0xfc001800u, trans_depw_imm },
3775     { 0xd4001800u, 0xfc001800u, trans_depw_imm_c },
3776 };
3777 
3778 static DisasJumpType trans_be(DisasContext *ctx, uint32_t insn, bool is_l)
3779 {
3780     unsigned n = extract32(insn, 1, 1);
3781     unsigned b = extract32(insn, 21, 5);
3782     target_sreg disp = assemble_17(insn);
3783     TCGv_reg tmp;
3784 
3785 #ifdef CONFIG_USER_ONLY
3786     /* ??? It seems like there should be a good way of using
3787        "be disp(sr2, r0)", the canonical gateway entry mechanism
3788        to our advantage.  But that appears to be inconvenient to
3789        manage along side branch delay slots.  Therefore we handle
3790        entry into the gateway page via absolute address.  */
3791     /* Since we don't implement spaces, just branch.  Do notice the special
3792        case of "be disp(*,r0)" using a direct branch to disp, so that we can
3793        goto_tb to the TB containing the syscall.  */
3794     if (b == 0) {
3795         return do_dbranch(ctx, disp, is_l ? 31 : 0, n);
3796     }
3797 #else
3798     int sp = assemble_sr3(insn);
3799     nullify_over(ctx);
3800 #endif
3801 
3802     tmp = get_temp(ctx);
3803     tcg_gen_addi_reg(tmp, load_gpr(ctx, b), disp);
3804     tmp = do_ibranch_priv(ctx, tmp);
3805 
3806 #ifdef CONFIG_USER_ONLY
3807     return do_ibranch(ctx, tmp, is_l ? 31 : 0, n);
3808 #else
3809     TCGv_i64 new_spc = tcg_temp_new_i64();
3810 
3811     load_spr(ctx, new_spc, sp);
3812     if (is_l) {
3813         copy_iaoq_entry(cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
3814         tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f);
3815     }
3816     if (n && use_nullify_skip(ctx)) {
3817         tcg_gen_mov_reg(cpu_iaoq_f, tmp);
3818         tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
3819         tcg_gen_mov_i64(cpu_iasq_f, new_spc);
3820         tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f);
3821     } else {
3822         copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3823         if (ctx->iaoq_b == -1) {
3824             tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3825         }
3826         tcg_gen_mov_reg(cpu_iaoq_b, tmp);
3827         tcg_gen_mov_i64(cpu_iasq_b, new_spc);
3828         nullify_set(ctx, n);
3829     }
3830     tcg_temp_free_i64(new_spc);
3831     tcg_gen_lookup_and_goto_ptr();
3832     return nullify_end(ctx, DISAS_NORETURN);
3833 #endif
3834 }
3835 
3836 static DisasJumpType trans_bl(DisasContext *ctx, uint32_t insn,
3837                               const DisasInsn *di)
3838 {
3839     unsigned n = extract32(insn, 1, 1);
3840     unsigned link = extract32(insn, 21, 5);
3841     target_sreg disp = assemble_17(insn);
3842 
3843     return do_dbranch(ctx, iaoq_dest(ctx, disp), link, n);
3844 }
3845 
3846 static DisasJumpType trans_b_gate(DisasContext *ctx, uint32_t insn,
3847                                   const DisasInsn *di)
3848 {
3849     unsigned n = extract32(insn, 1, 1);
3850     unsigned link = extract32(insn, 21, 5);
3851     target_sreg disp = assemble_17(insn);
3852     target_ureg dest = iaoq_dest(ctx, disp);
3853 
3854     /* Make sure the caller hasn't done something weird with the queue.
3855      * ??? This is not quite the same as the PSW[B] bit, which would be
3856      * expensive to track.  Real hardware will trap for
3857      *    b  gateway
3858      *    b  gateway+4  (in delay slot of first branch)
3859      * However, checking for a non-sequential instruction queue *will*
3860      * diagnose the security hole
3861      *    b  gateway
3862      *    b  evil
3863      * in which instructions at evil would run with increased privs.
3864      */
3865     if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) {
3866         return gen_illegal(ctx);
3867     }
3868 
3869 #ifndef CONFIG_USER_ONLY
3870     if (ctx->tb_flags & PSW_C) {
3871         CPUHPPAState *env = ctx->cs->env_ptr;
3872         int type = hppa_artype_for_page(env, ctx->base.pc_next);
3873         /* If we could not find a TLB entry, then we need to generate an
3874            ITLB miss exception so the kernel will provide it.
3875            The resulting TLB fill operation will invalidate this TB and
3876            we will re-translate, at which point we *will* be able to find
3877            the TLB entry and determine if this is in fact a gateway page.  */
3878         if (type < 0) {
3879             return gen_excp(ctx, EXCP_ITLB_MISS);
3880         }
3881         /* No change for non-gateway pages or for priv decrease.  */
3882         if (type >= 4 && type - 4 < ctx->privilege) {
3883             dest = deposit32(dest, 0, 2, type - 4);
3884         }
3885     } else {
3886         dest &= -4;  /* priv = 0 */
3887     }
3888 #endif
3889 
3890     return do_dbranch(ctx, dest, link, n);
3891 }
3892 
3893 static DisasJumpType trans_bl_long(DisasContext *ctx, uint32_t insn,
3894                                    const DisasInsn *di)
3895 {
3896     unsigned n = extract32(insn, 1, 1);
3897     target_sreg disp = assemble_22(insn);
3898 
3899     return do_dbranch(ctx, iaoq_dest(ctx, disp), 2, n);
3900 }
3901 
3902 static DisasJumpType trans_blr(DisasContext *ctx, uint32_t insn,
3903                                const DisasInsn *di)
3904 {
3905     unsigned n = extract32(insn, 1, 1);
3906     unsigned rx = extract32(insn, 16, 5);
3907     unsigned link = extract32(insn, 21, 5);
3908     TCGv_reg tmp = get_temp(ctx);
3909 
3910     tcg_gen_shli_reg(tmp, load_gpr(ctx, rx), 3);
3911     tcg_gen_addi_reg(tmp, tmp, ctx->iaoq_f + 8);
3912     /* The computation here never changes privilege level.  */
3913     return do_ibranch(ctx, tmp, link, n);
3914 }
3915 
3916 static DisasJumpType trans_bv(DisasContext *ctx, uint32_t insn,
3917                               const DisasInsn *di)
3918 {
3919     unsigned n = extract32(insn, 1, 1);
3920     unsigned rx = extract32(insn, 16, 5);
3921     unsigned rb = extract32(insn, 21, 5);
3922     TCGv_reg dest;
3923 
3924     if (rx == 0) {
3925         dest = load_gpr(ctx, rb);
3926     } else {
3927         dest = get_temp(ctx);
3928         tcg_gen_shli_reg(dest, load_gpr(ctx, rx), 3);
3929         tcg_gen_add_reg(dest, dest, load_gpr(ctx, rb));
3930     }
3931     dest = do_ibranch_priv(ctx, dest);
3932     return do_ibranch(ctx, dest, 0, n);
3933 }
3934 
3935 static DisasJumpType trans_bve(DisasContext *ctx, uint32_t insn,
3936                                const DisasInsn *di)
3937 {
3938     unsigned n = extract32(insn, 1, 1);
3939     unsigned rb = extract32(insn, 21, 5);
3940     unsigned link = extract32(insn, 13, 1) ? 2 : 0;
3941     TCGv_reg dest;
3942 
3943 #ifdef CONFIG_USER_ONLY
3944     dest = do_ibranch_priv(ctx, load_gpr(ctx, rb));
3945     return do_ibranch(ctx, dest, link, n);
3946 #else
3947     nullify_over(ctx);
3948     dest = do_ibranch_priv(ctx, load_gpr(ctx, rb));
3949 
3950     copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3951     if (ctx->iaoq_b == -1) {
3952         tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3953     }
3954     copy_iaoq_entry(cpu_iaoq_b, -1, dest);
3955     tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest));
3956     if (link) {
3957         copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
3958     }
3959     nullify_set(ctx, n);
3960     tcg_gen_lookup_and_goto_ptr();
3961     return nullify_end(ctx, DISAS_NORETURN);
3962 #endif
3963 }
3964 
3965 static const DisasInsn table_branch[] = {
3966     { 0xe8000000u, 0xfc006000u, trans_bl }, /* B,L and B,L,PUSH */
3967     { 0xe800a000u, 0xfc00e000u, trans_bl_long },
3968     { 0xe8004000u, 0xfc00fffdu, trans_blr },
3969     { 0xe800c000u, 0xfc00fffdu, trans_bv },
3970     { 0xe800d000u, 0xfc00dffcu, trans_bve },
3971     { 0xe8002000u, 0xfc00e000u, trans_b_gate },
3972 };
3973 
3974 static DisasJumpType trans_fop_wew_0c(DisasContext *ctx, uint32_t insn,
3975                                       const DisasInsn *di)
3976 {
3977     unsigned rt = extract32(insn, 0, 5);
3978     unsigned ra = extract32(insn, 21, 5);
3979     return do_fop_wew(ctx, rt, ra, di->f.wew);
3980 }
3981 
3982 static DisasJumpType trans_fop_wew_0e(DisasContext *ctx, uint32_t insn,
3983                                       const DisasInsn *di)
3984 {
3985     unsigned rt = assemble_rt64(insn);
3986     unsigned ra = assemble_ra64(insn);
3987     return do_fop_wew(ctx, rt, ra, di->f.wew);
3988 }
3989 
3990 static DisasJumpType trans_fop_ded(DisasContext *ctx, uint32_t insn,
3991                                    const DisasInsn *di)
3992 {
3993     unsigned rt = extract32(insn, 0, 5);
3994     unsigned ra = extract32(insn, 21, 5);
3995     return do_fop_ded(ctx, rt, ra, di->f.ded);
3996 }
3997 
3998 static DisasJumpType trans_fop_wed_0c(DisasContext *ctx, uint32_t insn,
3999                                       const DisasInsn *di)
4000 {
4001     unsigned rt = extract32(insn, 0, 5);
4002     unsigned ra = extract32(insn, 21, 5);
4003     return do_fop_wed(ctx, rt, ra, di->f.wed);
4004 }
4005 
4006 static DisasJumpType trans_fop_wed_0e(DisasContext *ctx, uint32_t insn,
4007                                       const DisasInsn *di)
4008 {
4009     unsigned rt = assemble_rt64(insn);
4010     unsigned ra = extract32(insn, 21, 5);
4011     return do_fop_wed(ctx, rt, ra, di->f.wed);
4012 }
4013 
4014 static DisasJumpType trans_fop_dew_0c(DisasContext *ctx, uint32_t insn,
4015                                       const DisasInsn *di)
4016 {
4017     unsigned rt = extract32(insn, 0, 5);
4018     unsigned ra = extract32(insn, 21, 5);
4019     return do_fop_dew(ctx, rt, ra, di->f.dew);
4020 }
4021 
4022 static DisasJumpType trans_fop_dew_0e(DisasContext *ctx, uint32_t insn,
4023                                       const DisasInsn *di)
4024 {
4025     unsigned rt = extract32(insn, 0, 5);
4026     unsigned ra = assemble_ra64(insn);
4027     return do_fop_dew(ctx, rt, ra, di->f.dew);
4028 }
4029 
4030 static DisasJumpType trans_fop_weww_0c(DisasContext *ctx, uint32_t insn,
4031                                        const DisasInsn *di)
4032 {
4033     unsigned rt = extract32(insn, 0, 5);
4034     unsigned rb = extract32(insn, 16, 5);
4035     unsigned ra = extract32(insn, 21, 5);
4036     return do_fop_weww(ctx, rt, ra, rb, di->f.weww);
4037 }
4038 
4039 static DisasJumpType trans_fop_weww_0e(DisasContext *ctx, uint32_t insn,
4040                                        const DisasInsn *di)
4041 {
4042     unsigned rt = assemble_rt64(insn);
4043     unsigned rb = assemble_rb64(insn);
4044     unsigned ra = assemble_ra64(insn);
4045     return do_fop_weww(ctx, rt, ra, rb, di->f.weww);
4046 }
4047 
4048 static DisasJumpType trans_fop_dedd(DisasContext *ctx, uint32_t insn,
4049                                     const DisasInsn *di)
4050 {
4051     unsigned rt = extract32(insn, 0, 5);
4052     unsigned rb = extract32(insn, 16, 5);
4053     unsigned ra = extract32(insn, 21, 5);
4054     return do_fop_dedd(ctx, rt, ra, rb, di->f.dedd);
4055 }
4056 
4057 static void gen_fcpy_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4058 {
4059     tcg_gen_mov_i32(dst, src);
4060 }
4061 
4062 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4063 {
4064     tcg_gen_mov_i64(dst, src);
4065 }
4066 
4067 static void gen_fabs_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4068 {
4069     tcg_gen_andi_i32(dst, src, INT32_MAX);
4070 }
4071 
4072 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4073 {
4074     tcg_gen_andi_i64(dst, src, INT64_MAX);
4075 }
4076 
4077 static void gen_fneg_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4078 {
4079     tcg_gen_xori_i32(dst, src, INT32_MIN);
4080 }
4081 
4082 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4083 {
4084     tcg_gen_xori_i64(dst, src, INT64_MIN);
4085 }
4086 
4087 static void gen_fnegabs_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4088 {
4089     tcg_gen_ori_i32(dst, src, INT32_MIN);
4090 }
4091 
4092 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4093 {
4094     tcg_gen_ori_i64(dst, src, INT64_MIN);
4095 }
4096 
4097 static DisasJumpType do_fcmp_s(DisasContext *ctx, unsigned ra, unsigned rb,
4098                                unsigned y, unsigned c)
4099 {
4100     TCGv_i32 ta, tb, tc, ty;
4101 
4102     nullify_over(ctx);
4103 
4104     ta = load_frw0_i32(ra);
4105     tb = load_frw0_i32(rb);
4106     ty = tcg_const_i32(y);
4107     tc = tcg_const_i32(c);
4108 
4109     gen_helper_fcmp_s(cpu_env, ta, tb, ty, tc);
4110 
4111     tcg_temp_free_i32(ta);
4112     tcg_temp_free_i32(tb);
4113     tcg_temp_free_i32(ty);
4114     tcg_temp_free_i32(tc);
4115 
4116     return nullify_end(ctx, DISAS_NEXT);
4117 }
4118 
4119 static DisasJumpType trans_fcmp_s_0c(DisasContext *ctx, uint32_t insn,
4120                                      const DisasInsn *di)
4121 {
4122     unsigned c = extract32(insn, 0, 5);
4123     unsigned y = extract32(insn, 13, 3);
4124     unsigned rb = extract32(insn, 16, 5);
4125     unsigned ra = extract32(insn, 21, 5);
4126     return do_fcmp_s(ctx, ra, rb, y, c);
4127 }
4128 
4129 static DisasJumpType trans_fcmp_s_0e(DisasContext *ctx, uint32_t insn,
4130                                      const DisasInsn *di)
4131 {
4132     unsigned c = extract32(insn, 0, 5);
4133     unsigned y = extract32(insn, 13, 3);
4134     unsigned rb = assemble_rb64(insn);
4135     unsigned ra = assemble_ra64(insn);
4136     return do_fcmp_s(ctx, ra, rb, y, c);
4137 }
4138 
4139 static DisasJumpType trans_fcmp_d(DisasContext *ctx, uint32_t insn,
4140                                   const DisasInsn *di)
4141 {
4142     unsigned c = extract32(insn, 0, 5);
4143     unsigned y = extract32(insn, 13, 3);
4144     unsigned rb = extract32(insn, 16, 5);
4145     unsigned ra = extract32(insn, 21, 5);
4146     TCGv_i64 ta, tb;
4147     TCGv_i32 tc, ty;
4148 
4149     nullify_over(ctx);
4150 
4151     ta = load_frd0(ra);
4152     tb = load_frd0(rb);
4153     ty = tcg_const_i32(y);
4154     tc = tcg_const_i32(c);
4155 
4156     gen_helper_fcmp_d(cpu_env, ta, tb, ty, tc);
4157 
4158     tcg_temp_free_i64(ta);
4159     tcg_temp_free_i64(tb);
4160     tcg_temp_free_i32(ty);
4161     tcg_temp_free_i32(tc);
4162 
4163     return nullify_end(ctx, DISAS_NEXT);
4164 }
4165 
4166 static DisasJumpType trans_ftest_t(DisasContext *ctx, uint32_t insn,
4167                                    const DisasInsn *di)
4168 {
4169     unsigned y = extract32(insn, 13, 3);
4170     unsigned cbit = (y ^ 1) - 1;
4171     TCGv_reg t;
4172 
4173     nullify_over(ctx);
4174 
4175     t = tcg_temp_new();
4176     tcg_gen_ld32u_reg(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
4177     tcg_gen_extract_reg(t, t, 21 - cbit, 1);
4178     ctx->null_cond = cond_make_0(TCG_COND_NE, t);
4179     tcg_temp_free(t);
4180 
4181     return nullify_end(ctx, DISAS_NEXT);
4182 }
4183 
4184 static DisasJumpType trans_ftest_q(DisasContext *ctx, uint32_t insn,
4185                                    const DisasInsn *di)
4186 {
4187     unsigned c = extract32(insn, 0, 5);
4188     int mask;
4189     bool inv = false;
4190     TCGv_reg t;
4191 
4192     nullify_over(ctx);
4193 
4194     t = tcg_temp_new();
4195     tcg_gen_ld32u_reg(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
4196 
4197     switch (c) {
4198     case 0: /* simple */
4199         tcg_gen_andi_reg(t, t, 0x4000000);
4200         ctx->null_cond = cond_make_0(TCG_COND_NE, t);
4201         goto done;
4202     case 2: /* rej */
4203         inv = true;
4204         /* fallthru */
4205     case 1: /* acc */
4206         mask = 0x43ff800;
4207         break;
4208     case 6: /* rej8 */
4209         inv = true;
4210         /* fallthru */
4211     case 5: /* acc8 */
4212         mask = 0x43f8000;
4213         break;
4214     case 9: /* acc6 */
4215         mask = 0x43e0000;
4216         break;
4217     case 13: /* acc4 */
4218         mask = 0x4380000;
4219         break;
4220     case 17: /* acc2 */
4221         mask = 0x4200000;
4222         break;
4223     default:
4224         return gen_illegal(ctx);
4225     }
4226     if (inv) {
4227         TCGv_reg c = load_const(ctx, mask);
4228         tcg_gen_or_reg(t, t, c);
4229         ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
4230     } else {
4231         tcg_gen_andi_reg(t, t, mask);
4232         ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
4233     }
4234  done:
4235     return nullify_end(ctx, DISAS_NEXT);
4236 }
4237 
4238 static DisasJumpType trans_xmpyu(DisasContext *ctx, uint32_t insn,
4239                                  const DisasInsn *di)
4240 {
4241     unsigned rt = extract32(insn, 0, 5);
4242     unsigned rb = assemble_rb64(insn);
4243     unsigned ra = assemble_ra64(insn);
4244     TCGv_i64 a, b;
4245 
4246     nullify_over(ctx);
4247 
4248     a = load_frw0_i64(ra);
4249     b = load_frw0_i64(rb);
4250     tcg_gen_mul_i64(a, a, b);
4251     save_frd(rt, a);
4252     tcg_temp_free_i64(a);
4253     tcg_temp_free_i64(b);
4254 
4255     return nullify_end(ctx, DISAS_NEXT);
4256 }
4257 
4258 #define FOP_DED  trans_fop_ded, .f.ded
4259 #define FOP_DEDD trans_fop_dedd, .f.dedd
4260 
4261 #define FOP_WEW  trans_fop_wew_0c, .f.wew
4262 #define FOP_DEW  trans_fop_dew_0c, .f.dew
4263 #define FOP_WED  trans_fop_wed_0c, .f.wed
4264 #define FOP_WEWW trans_fop_weww_0c, .f.weww
4265 
4266 static const DisasInsn table_float_0c[] = {
4267     /* floating point class zero */
4268     { 0x30004000, 0xfc1fffe0, FOP_WEW = gen_fcpy_s },
4269     { 0x30006000, 0xfc1fffe0, FOP_WEW = gen_fabs_s },
4270     { 0x30008000, 0xfc1fffe0, FOP_WEW = gen_helper_fsqrt_s },
4271     { 0x3000a000, 0xfc1fffe0, FOP_WEW = gen_helper_frnd_s },
4272     { 0x3000c000, 0xfc1fffe0, FOP_WEW = gen_fneg_s },
4273     { 0x3000e000, 0xfc1fffe0, FOP_WEW = gen_fnegabs_s },
4274 
4275     { 0x30004800, 0xfc1fffe0, FOP_DED = gen_fcpy_d },
4276     { 0x30006800, 0xfc1fffe0, FOP_DED = gen_fabs_d },
4277     { 0x30008800, 0xfc1fffe0, FOP_DED = gen_helper_fsqrt_d },
4278     { 0x3000a800, 0xfc1fffe0, FOP_DED = gen_helper_frnd_d },
4279     { 0x3000c800, 0xfc1fffe0, FOP_DED = gen_fneg_d },
4280     { 0x3000e800, 0xfc1fffe0, FOP_DED = gen_fnegabs_d },
4281 
4282     /* floating point class three */
4283     { 0x30000600, 0xfc00ffe0, FOP_WEWW = gen_helper_fadd_s },
4284     { 0x30002600, 0xfc00ffe0, FOP_WEWW = gen_helper_fsub_s },
4285     { 0x30004600, 0xfc00ffe0, FOP_WEWW = gen_helper_fmpy_s },
4286     { 0x30006600, 0xfc00ffe0, FOP_WEWW = gen_helper_fdiv_s },
4287 
4288     { 0x30000e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fadd_d },
4289     { 0x30002e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fsub_d },
4290     { 0x30004e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fmpy_d },
4291     { 0x30006e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fdiv_d },
4292 
4293     /* floating point class one */
4294     /* float/float */
4295     { 0x30000a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_d_s },
4296     { 0x30002200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_s_d },
4297     /* int/float */
4298     { 0x30008200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_w_s },
4299     { 0x30008a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_dw_s },
4300     { 0x3000a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_w_d },
4301     { 0x3000aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_dw_d },
4302     /* float/int */
4303     { 0x30010200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_s_w },
4304     { 0x30010a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_d_w },
4305     { 0x30012200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_s_dw },
4306     { 0x30012a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_dw },
4307     /* float/int truncate */
4308     { 0x30018200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_t_s_w },
4309     { 0x30018a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_t_d_w },
4310     { 0x3001a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_t_s_dw },
4311     { 0x3001aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_dw },
4312     /* uint/float */
4313     { 0x30028200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_uw_s },
4314     { 0x30028a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_udw_s },
4315     { 0x3002a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_uw_d },
4316     { 0x3002aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_udw_d },
4317     /* float/uint */
4318     { 0x30030200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_s_uw },
4319     { 0x30030a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_d_uw },
4320     { 0x30032200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_s_udw },
4321     { 0x30032a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_udw },
4322     /* float/uint truncate */
4323     { 0x30038200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_t_s_uw },
4324     { 0x30038a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_t_d_uw },
4325     { 0x3003a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_t_s_udw },
4326     { 0x3003aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_udw },
4327 
4328     /* floating point class two */
4329     { 0x30000400, 0xfc001fe0, trans_fcmp_s_0c },
4330     { 0x30000c00, 0xfc001fe0, trans_fcmp_d },
4331     { 0x30002420, 0xffffffe0, trans_ftest_q },
4332     { 0x30000420, 0xffff1fff, trans_ftest_t },
4333 
4334     /* FID.  Note that ra == rt == 0, which via fcpy puts 0 into fr0.
4335        This is machine/revision == 0, which is reserved for simulator.  */
4336     { 0x30000000, 0xffffffff, FOP_WEW = gen_fcpy_s },
4337 };
4338 
4339 #undef FOP_WEW
4340 #undef FOP_DEW
4341 #undef FOP_WED
4342 #undef FOP_WEWW
4343 #define FOP_WEW  trans_fop_wew_0e, .f.wew
4344 #define FOP_DEW  trans_fop_dew_0e, .f.dew
4345 #define FOP_WED  trans_fop_wed_0e, .f.wed
4346 #define FOP_WEWW trans_fop_weww_0e, .f.weww
4347 
4348 static const DisasInsn table_float_0e[] = {
4349     /* floating point class zero */
4350     { 0x38004000, 0xfc1fff20, FOP_WEW = gen_fcpy_s },
4351     { 0x38006000, 0xfc1fff20, FOP_WEW = gen_fabs_s },
4352     { 0x38008000, 0xfc1fff20, FOP_WEW = gen_helper_fsqrt_s },
4353     { 0x3800a000, 0xfc1fff20, FOP_WEW = gen_helper_frnd_s },
4354     { 0x3800c000, 0xfc1fff20, FOP_WEW = gen_fneg_s },
4355     { 0x3800e000, 0xfc1fff20, FOP_WEW = gen_fnegabs_s },
4356 
4357     { 0x38004800, 0xfc1fffe0, FOP_DED = gen_fcpy_d },
4358     { 0x38006800, 0xfc1fffe0, FOP_DED = gen_fabs_d },
4359     { 0x38008800, 0xfc1fffe0, FOP_DED = gen_helper_fsqrt_d },
4360     { 0x3800a800, 0xfc1fffe0, FOP_DED = gen_helper_frnd_d },
4361     { 0x3800c800, 0xfc1fffe0, FOP_DED = gen_fneg_d },
4362     { 0x3800e800, 0xfc1fffe0, FOP_DED = gen_fnegabs_d },
4363 
4364     /* floating point class three */
4365     { 0x38000600, 0xfc00ef20, FOP_WEWW = gen_helper_fadd_s },
4366     { 0x38002600, 0xfc00ef20, FOP_WEWW = gen_helper_fsub_s },
4367     { 0x38004600, 0xfc00ef20, FOP_WEWW = gen_helper_fmpy_s },
4368     { 0x38006600, 0xfc00ef20, FOP_WEWW = gen_helper_fdiv_s },
4369 
4370     { 0x38000e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fadd_d },
4371     { 0x38002e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fsub_d },
4372     { 0x38004e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fmpy_d },
4373     { 0x38006e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fdiv_d },
4374 
4375     { 0x38004700, 0xfc00ef60, trans_xmpyu },
4376 
4377     /* floating point class one */
4378     /* float/float */
4379     { 0x38000a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_s },
4380     { 0x38002200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_s_d },
4381     /* int/float */
4382     { 0x38008200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_w_s },
4383     { 0x38008a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_dw_s },
4384     { 0x3800a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_w_d },
4385     { 0x3800aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_dw_d },
4386     /* float/int */
4387     { 0x38010200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_s_w },
4388     { 0x38010a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_w },
4389     { 0x38012200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_s_dw },
4390     { 0x38012a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_dw },
4391     /* float/int truncate */
4392     { 0x38018200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_t_s_w },
4393     { 0x38018a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_t_d_w },
4394     { 0x3801a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_t_s_dw },
4395     { 0x3801aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_dw },
4396     /* uint/float */
4397     { 0x38028200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_uw_s },
4398     { 0x38028a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_udw_s },
4399     { 0x3802a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_uw_d },
4400     { 0x3802aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_udw_d },
4401     /* float/uint */
4402     { 0x38030200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_s_uw },
4403     { 0x38030a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_uw },
4404     { 0x38032200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_s_udw },
4405     { 0x38032a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_udw },
4406     /* float/uint truncate */
4407     { 0x38038200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_t_s_uw },
4408     { 0x38038a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_t_d_uw },
4409     { 0x3803a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_t_s_udw },
4410     { 0x3803aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_udw },
4411 
4412     /* floating point class two */
4413     { 0x38000400, 0xfc000f60, trans_fcmp_s_0e },
4414     { 0x38000c00, 0xfc001fe0, trans_fcmp_d },
4415 };
4416 
4417 #undef FOP_WEW
4418 #undef FOP_DEW
4419 #undef FOP_WED
4420 #undef FOP_WEWW
4421 #undef FOP_DED
4422 #undef FOP_DEDD
4423 
4424 /* Convert the fmpyadd single-precision register encodings to standard.  */
4425 static inline int fmpyadd_s_reg(unsigned r)
4426 {
4427     return (r & 16) * 2 + 16 + (r & 15);
4428 }
4429 
4430 static DisasJumpType trans_fmpyadd(DisasContext *ctx,
4431                                    uint32_t insn, bool is_sub)
4432 {
4433     unsigned tm = extract32(insn, 0, 5);
4434     unsigned f = extract32(insn, 5, 1);
4435     unsigned ra = extract32(insn, 6, 5);
4436     unsigned ta = extract32(insn, 11, 5);
4437     unsigned rm2 = extract32(insn, 16, 5);
4438     unsigned rm1 = extract32(insn, 21, 5);
4439 
4440     nullify_over(ctx);
4441 
4442     /* Independent multiply & add/sub, with undefined behaviour
4443        if outputs overlap inputs.  */
4444     if (f == 0) {
4445         tm = fmpyadd_s_reg(tm);
4446         ra = fmpyadd_s_reg(ra);
4447         ta = fmpyadd_s_reg(ta);
4448         rm2 = fmpyadd_s_reg(rm2);
4449         rm1 = fmpyadd_s_reg(rm1);
4450         do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
4451         do_fop_weww(ctx, ta, ta, ra,
4452                     is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
4453     } else {
4454         do_fop_dedd(ctx, tm, rm1, rm2, gen_helper_fmpy_d);
4455         do_fop_dedd(ctx, ta, ta, ra,
4456                     is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
4457     }
4458 
4459     return nullify_end(ctx, DISAS_NEXT);
4460 }
4461 
4462 static DisasJumpType trans_fmpyfadd_s(DisasContext *ctx, uint32_t insn,
4463                                       const DisasInsn *di)
4464 {
4465     unsigned rt = assemble_rt64(insn);
4466     unsigned neg = extract32(insn, 5, 1);
4467     unsigned rm1 = assemble_ra64(insn);
4468     unsigned rm2 = assemble_rb64(insn);
4469     unsigned ra3 = assemble_rc64(insn);
4470     TCGv_i32 a, b, c;
4471 
4472     nullify_over(ctx);
4473     a = load_frw0_i32(rm1);
4474     b = load_frw0_i32(rm2);
4475     c = load_frw0_i32(ra3);
4476 
4477     if (neg) {
4478         gen_helper_fmpynfadd_s(a, cpu_env, a, b, c);
4479     } else {
4480         gen_helper_fmpyfadd_s(a, cpu_env, a, b, c);
4481     }
4482 
4483     tcg_temp_free_i32(b);
4484     tcg_temp_free_i32(c);
4485     save_frw_i32(rt, a);
4486     tcg_temp_free_i32(a);
4487     return nullify_end(ctx, DISAS_NEXT);
4488 }
4489 
4490 static DisasJumpType trans_fmpyfadd_d(DisasContext *ctx, uint32_t insn,
4491                                       const DisasInsn *di)
4492 {
4493     unsigned rt = extract32(insn, 0, 5);
4494     unsigned neg = extract32(insn, 5, 1);
4495     unsigned rm1 = extract32(insn, 21, 5);
4496     unsigned rm2 = extract32(insn, 16, 5);
4497     unsigned ra3 = assemble_rc64(insn);
4498     TCGv_i64 a, b, c;
4499 
4500     nullify_over(ctx);
4501     a = load_frd0(rm1);
4502     b = load_frd0(rm2);
4503     c = load_frd0(ra3);
4504 
4505     if (neg) {
4506         gen_helper_fmpynfadd_d(a, cpu_env, a, b, c);
4507     } else {
4508         gen_helper_fmpyfadd_d(a, cpu_env, a, b, c);
4509     }
4510 
4511     tcg_temp_free_i64(b);
4512     tcg_temp_free_i64(c);
4513     save_frd(rt, a);
4514     tcg_temp_free_i64(a);
4515     return nullify_end(ctx, DISAS_NEXT);
4516 }
4517 
4518 static const DisasInsn table_fp_fused[] = {
4519     { 0xb8000000u, 0xfc000800u, trans_fmpyfadd_s },
4520     { 0xb8000800u, 0xfc0019c0u, trans_fmpyfadd_d }
4521 };
4522 
4523 static DisasJumpType translate_table_int(DisasContext *ctx, uint32_t insn,
4524                                          const DisasInsn table[], size_t n)
4525 {
4526     size_t i;
4527     for (i = 0; i < n; ++i) {
4528         if ((insn & table[i].mask) == table[i].insn) {
4529             return table[i].trans(ctx, insn, &table[i]);
4530         }
4531     }
4532     qemu_log_mask(LOG_UNIMP, "UNIMP insn %08x @ " TARGET_FMT_lx "\n",
4533                   insn, ctx->base.pc_next);
4534     return gen_illegal(ctx);
4535 }
4536 
4537 #define translate_table(ctx, insn, table) \
4538     translate_table_int(ctx, insn, table, ARRAY_SIZE(table))
4539 
4540 static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
4541 {
4542     uint32_t opc = extract32(insn, 26, 6);
4543 
4544     switch (opc) {
4545     case 0x00: /* system op */
4546         return translate_table(ctx, insn, table_system);
4547     case 0x01:
4548         return translate_table(ctx, insn, table_mem_mgmt);
4549     case 0x02:
4550         return translate_table(ctx, insn, table_arith_log);
4551     case 0x03:
4552         return translate_table(ctx, insn, table_index_mem);
4553     case 0x06:
4554         return trans_fmpyadd(ctx, insn, false);
4555     case 0x08:
4556         return trans_ldil(ctx, insn);
4557     case 0x09:
4558         return trans_copr_w(ctx, insn);
4559     case 0x0A:
4560         return trans_addil(ctx, insn);
4561     case 0x0B:
4562         return trans_copr_dw(ctx, insn);
4563     case 0x0C:
4564         return translate_table(ctx, insn, table_float_0c);
4565     case 0x0D:
4566         return trans_ldo(ctx, insn);
4567     case 0x0E:
4568         return translate_table(ctx, insn, table_float_0e);
4569 
4570     case 0x10:
4571         return trans_load(ctx, insn, false, MO_UB);
4572     case 0x11:
4573         return trans_load(ctx, insn, false, MO_TEUW);
4574     case 0x12:
4575         return trans_load(ctx, insn, false, MO_TEUL);
4576     case 0x13:
4577         return trans_load(ctx, insn, true, MO_TEUL);
4578     case 0x16:
4579         return trans_fload_mod(ctx, insn);
4580     case 0x17:
4581         return trans_load_w(ctx, insn);
4582     case 0x18:
4583         return trans_store(ctx, insn, false, MO_UB);
4584     case 0x19:
4585         return trans_store(ctx, insn, false, MO_TEUW);
4586     case 0x1A:
4587         return trans_store(ctx, insn, false, MO_TEUL);
4588     case 0x1B:
4589         return trans_store(ctx, insn, true, MO_TEUL);
4590     case 0x1E:
4591         return trans_fstore_mod(ctx, insn);
4592     case 0x1F:
4593         return trans_store_w(ctx, insn);
4594 
4595     case 0x20:
4596         return trans_cmpb(ctx, insn, true, false, false);
4597     case 0x21:
4598         return trans_cmpb(ctx, insn, true, true, false);
4599     case 0x22:
4600         return trans_cmpb(ctx, insn, false, false, false);
4601     case 0x23:
4602         return trans_cmpb(ctx, insn, false, true, false);
4603     case 0x24:
4604         return trans_cmpiclr(ctx, insn);
4605     case 0x25:
4606         return trans_subi(ctx, insn);
4607     case 0x26:
4608         return trans_fmpyadd(ctx, insn, true);
4609     case 0x27:
4610         return trans_cmpb(ctx, insn, true, false, true);
4611     case 0x28:
4612         return trans_addb(ctx, insn, true, false);
4613     case 0x29:
4614         return trans_addb(ctx, insn, true, true);
4615     case 0x2A:
4616         return trans_addb(ctx, insn, false, false);
4617     case 0x2B:
4618         return trans_addb(ctx, insn, false, true);
4619     case 0x2C:
4620     case 0x2D:
4621         return trans_addi(ctx, insn);
4622     case 0x2E:
4623         return translate_table(ctx, insn, table_fp_fused);
4624     case 0x2F:
4625         return trans_cmpb(ctx, insn, false, false, true);
4626 
4627     case 0x30:
4628     case 0x31:
4629         return trans_bb(ctx, insn);
4630     case 0x32:
4631         return trans_movb(ctx, insn, false);
4632     case 0x33:
4633         return trans_movb(ctx, insn, true);
4634     case 0x34:
4635         return translate_table(ctx, insn, table_sh_ex);
4636     case 0x35:
4637         return translate_table(ctx, insn, table_depw);
4638     case 0x38:
4639         return trans_be(ctx, insn, false);
4640     case 0x39:
4641         return trans_be(ctx, insn, true);
4642     case 0x3A:
4643         return translate_table(ctx, insn, table_branch);
4644 
4645     case 0x04: /* spopn */
4646     case 0x05: /* diag */
4647     case 0x0F: /* product specific */
4648         break;
4649 
4650     case 0x07: /* unassigned */
4651     case 0x15: /* unassigned */
4652     case 0x1D: /* unassigned */
4653     case 0x37: /* unassigned */
4654         break;
4655     case 0x3F:
4656 #ifndef CONFIG_USER_ONLY
4657         /* Unassigned, but use as system-halt.  */
4658         if (insn == 0xfffdead0) {
4659             return gen_hlt(ctx, 0); /* halt system */
4660         }
4661         if (insn == 0xfffdead1) {
4662             return gen_hlt(ctx, 1); /* reset system */
4663         }
4664 #endif
4665         break;
4666     default:
4667         break;
4668     }
4669     return gen_illegal(ctx);
4670 }
4671 
4672 static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
4673 {
4674     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4675     int bound;
4676 
4677     ctx->cs = cs;
4678     ctx->tb_flags = ctx->base.tb->flags;
4679 
4680 #ifdef CONFIG_USER_ONLY
4681     ctx->privilege = MMU_USER_IDX;
4682     ctx->mmu_idx = MMU_USER_IDX;
4683     ctx->iaoq_f = ctx->base.pc_first | MMU_USER_IDX;
4684     ctx->iaoq_b = ctx->base.tb->cs_base | MMU_USER_IDX;
4685 #else
4686     ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
4687     ctx->mmu_idx = (ctx->tb_flags & PSW_D ? ctx->privilege : MMU_PHYS_IDX);
4688 
4689     /* Recover the IAOQ values from the GVA + PRIV.  */
4690     uint64_t cs_base = ctx->base.tb->cs_base;
4691     uint64_t iasq_f = cs_base & ~0xffffffffull;
4692     int32_t diff = cs_base;
4693 
4694     ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege;
4695     ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1);
4696 #endif
4697     ctx->iaoq_n = -1;
4698     ctx->iaoq_n_var = NULL;
4699 
4700     /* Bound the number of instructions by those left on the page.  */
4701     bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
4702     ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
4703 
4704     ctx->ntempr = 0;
4705     ctx->ntempl = 0;
4706     memset(ctx->tempr, 0, sizeof(ctx->tempr));
4707     memset(ctx->templ, 0, sizeof(ctx->templ));
4708 }
4709 
4710 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
4711 {
4712     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4713 
4714     /* Seed the nullification status from PSW[N], as saved in TB->FLAGS.  */
4715     ctx->null_cond = cond_make_f();
4716     ctx->psw_n_nonzero = false;
4717     if (ctx->tb_flags & PSW_N) {
4718         ctx->null_cond.c = TCG_COND_ALWAYS;
4719         ctx->psw_n_nonzero = true;
4720     }
4721     ctx->null_lab = NULL;
4722 }
4723 
4724 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4725 {
4726     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4727 
4728     tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b);
4729 }
4730 
4731 static bool hppa_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
4732                                       const CPUBreakpoint *bp)
4733 {
4734     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4735 
4736     ctx->base.is_jmp = gen_excp(ctx, EXCP_DEBUG);
4737     ctx->base.pc_next += 4;
4738     return true;
4739 }
4740 
4741 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4742 {
4743     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4744     CPUHPPAState *env = cs->env_ptr;
4745     DisasJumpType ret;
4746     int i, n;
4747 
4748     /* Execute one insn.  */
4749 #ifdef CONFIG_USER_ONLY
4750     if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
4751         ret = do_page_zero(ctx);
4752         assert(ret != DISAS_NEXT);
4753     } else
4754 #endif
4755     {
4756         /* Always fetch the insn, even if nullified, so that we check
4757            the page permissions for execute.  */
4758         uint32_t insn = cpu_ldl_code(env, ctx->base.pc_next);
4759 
4760         /* Set up the IA queue for the next insn.
4761            This will be overwritten by a branch.  */
4762         if (ctx->iaoq_b == -1) {
4763             ctx->iaoq_n = -1;
4764             ctx->iaoq_n_var = get_temp(ctx);
4765             tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
4766         } else {
4767             ctx->iaoq_n = ctx->iaoq_b + 4;
4768             ctx->iaoq_n_var = NULL;
4769         }
4770 
4771         if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
4772             ctx->null_cond.c = TCG_COND_NEVER;
4773             ret = DISAS_NEXT;
4774         } else {
4775             ctx->insn = insn;
4776             ret = translate_one(ctx, insn);
4777             assert(ctx->null_lab == NULL);
4778         }
4779     }
4780 
4781     /* Free any temporaries allocated.  */
4782     for (i = 0, n = ctx->ntempr; i < n; ++i) {
4783         tcg_temp_free(ctx->tempr[i]);
4784         ctx->tempr[i] = NULL;
4785     }
4786     for (i = 0, n = ctx->ntempl; i < n; ++i) {
4787         tcg_temp_free_tl(ctx->templ[i]);
4788         ctx->templ[i] = NULL;
4789     }
4790     ctx->ntempr = 0;
4791     ctx->ntempl = 0;
4792 
4793     /* Advance the insn queue.  Note that this check also detects
4794        a priority change within the instruction queue.  */
4795     if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
4796         if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1
4797             && use_goto_tb(ctx, ctx->iaoq_b)
4798             && (ctx->null_cond.c == TCG_COND_NEVER
4799                 || ctx->null_cond.c == TCG_COND_ALWAYS)) {
4800             nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
4801             gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
4802             ret = DISAS_NORETURN;
4803         } else {
4804             ret = DISAS_IAQ_N_STALE;
4805         }
4806     }
4807     ctx->iaoq_f = ctx->iaoq_b;
4808     ctx->iaoq_b = ctx->iaoq_n;
4809     ctx->base.is_jmp = ret;
4810     ctx->base.pc_next += 4;
4811 
4812     if (ret == DISAS_NORETURN || ret == DISAS_IAQ_N_UPDATED) {
4813         return;
4814     }
4815     if (ctx->iaoq_f == -1) {
4816         tcg_gen_mov_reg(cpu_iaoq_f, cpu_iaoq_b);
4817         copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
4818 #ifndef CONFIG_USER_ONLY
4819         tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
4820 #endif
4821         nullify_save(ctx);
4822         ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
4823     } else if (ctx->iaoq_b == -1) {
4824         tcg_gen_mov_reg(cpu_iaoq_b, ctx->iaoq_n_var);
4825     }
4826 }
4827 
4828 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4829 {
4830     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4831     DisasJumpType is_jmp = ctx->base.is_jmp;
4832 
4833     switch (is_jmp) {
4834     case DISAS_NORETURN:
4835         break;
4836     case DISAS_TOO_MANY:
4837     case DISAS_IAQ_N_STALE:
4838     case DISAS_IAQ_N_STALE_EXIT:
4839         copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
4840         copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
4841         nullify_save(ctx);
4842         /* FALLTHRU */
4843     case DISAS_IAQ_N_UPDATED:
4844         if (ctx->base.singlestep_enabled) {
4845             gen_excp_1(EXCP_DEBUG);
4846         } else if (is_jmp == DISAS_IAQ_N_STALE_EXIT) {
4847             tcg_gen_exit_tb(NULL, 0);
4848         } else {
4849             tcg_gen_lookup_and_goto_ptr();
4850         }
4851         break;
4852     default:
4853         g_assert_not_reached();
4854     }
4855 }
4856 
4857 static void hppa_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
4858 {
4859     target_ulong pc = dcbase->pc_first;
4860 
4861 #ifdef CONFIG_USER_ONLY
4862     switch (pc) {
4863     case 0x00:
4864         qemu_log("IN:\n0x00000000:  (null)\n");
4865         return;
4866     case 0xb0:
4867         qemu_log("IN:\n0x000000b0:  light-weight-syscall\n");
4868         return;
4869     case 0xe0:
4870         qemu_log("IN:\n0x000000e0:  set-thread-pointer-syscall\n");
4871         return;
4872     case 0x100:
4873         qemu_log("IN:\n0x00000100:  syscall\n");
4874         return;
4875     }
4876 #endif
4877 
4878     qemu_log("IN: %s\n", lookup_symbol(pc));
4879     log_target_disas(cs, pc, dcbase->tb->size);
4880 }
4881 
4882 static const TranslatorOps hppa_tr_ops = {
4883     .init_disas_context = hppa_tr_init_disas_context,
4884     .tb_start           = hppa_tr_tb_start,
4885     .insn_start         = hppa_tr_insn_start,
4886     .breakpoint_check   = hppa_tr_breakpoint_check,
4887     .translate_insn     = hppa_tr_translate_insn,
4888     .tb_stop            = hppa_tr_tb_stop,
4889     .disas_log          = hppa_tr_disas_log,
4890 };
4891 
4892 void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
4893 
4894 {
4895     DisasContext ctx;
4896     translator_loop(&hppa_tr_ops, &ctx.base, cs, tb);
4897 }
4898 
4899 void restore_state_to_opc(CPUHPPAState *env, TranslationBlock *tb,
4900                           target_ulong *data)
4901 {
4902     env->iaoq_f = data[0];
4903     if (data[1] != (target_ureg)-1) {
4904         env->iaoq_b = data[1];
4905     }
4906     /* Since we were executing the instruction at IAOQ_F, and took some
4907        sort of action that provoked the cpu_restore_state, we can infer
4908        that the instruction was not nullified.  */
4909     env->psw_n = 0;
4910 }
4911