xref: /openbmc/qemu/target/hppa/translate.c (revision d53106c997e5c8e61e37ae9ff9f0e1f243b03968)
1 /*
2  * HPPA emulation cpu translation for qemu.
3  *
4  * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "exec/log.h"
31 
32 #define HELPER_H "helper.h"
33 #include "exec/helper-info.c.inc"
34 #undef  HELPER_H
35 
36 
37 /* Since we have a distinction between register size and address size,
38    we need to redefine all of these.  */
39 
40 #undef TCGv
41 #undef tcg_temp_new
42 #undef tcg_global_mem_new
43 
44 #if TARGET_LONG_BITS == 64
45 #define TCGv_tl              TCGv_i64
46 #define tcg_temp_new_tl      tcg_temp_new_i64
47 #if TARGET_REGISTER_BITS == 64
48 #define tcg_gen_extu_reg_tl  tcg_gen_mov_i64
49 #else
50 #define tcg_gen_extu_reg_tl  tcg_gen_extu_i32_i64
51 #endif
52 #else
53 #define TCGv_tl              TCGv_i32
54 #define tcg_temp_new_tl      tcg_temp_new_i32
55 #define tcg_gen_extu_reg_tl  tcg_gen_mov_i32
56 #endif
57 
58 #if TARGET_REGISTER_BITS == 64
59 #define TCGv_reg             TCGv_i64
60 
61 #define tcg_temp_new         tcg_temp_new_i64
62 #define tcg_global_mem_new   tcg_global_mem_new_i64
63 
64 #define tcg_gen_movi_reg     tcg_gen_movi_i64
65 #define tcg_gen_mov_reg      tcg_gen_mov_i64
66 #define tcg_gen_ld8u_reg     tcg_gen_ld8u_i64
67 #define tcg_gen_ld8s_reg     tcg_gen_ld8s_i64
68 #define tcg_gen_ld16u_reg    tcg_gen_ld16u_i64
69 #define tcg_gen_ld16s_reg    tcg_gen_ld16s_i64
70 #define tcg_gen_ld32u_reg    tcg_gen_ld32u_i64
71 #define tcg_gen_ld32s_reg    tcg_gen_ld32s_i64
72 #define tcg_gen_ld_reg       tcg_gen_ld_i64
73 #define tcg_gen_st8_reg      tcg_gen_st8_i64
74 #define tcg_gen_st16_reg     tcg_gen_st16_i64
75 #define tcg_gen_st32_reg     tcg_gen_st32_i64
76 #define tcg_gen_st_reg       tcg_gen_st_i64
77 #define tcg_gen_add_reg      tcg_gen_add_i64
78 #define tcg_gen_addi_reg     tcg_gen_addi_i64
79 #define tcg_gen_sub_reg      tcg_gen_sub_i64
80 #define tcg_gen_neg_reg      tcg_gen_neg_i64
81 #define tcg_gen_subfi_reg    tcg_gen_subfi_i64
82 #define tcg_gen_subi_reg     tcg_gen_subi_i64
83 #define tcg_gen_and_reg      tcg_gen_and_i64
84 #define tcg_gen_andi_reg     tcg_gen_andi_i64
85 #define tcg_gen_or_reg       tcg_gen_or_i64
86 #define tcg_gen_ori_reg      tcg_gen_ori_i64
87 #define tcg_gen_xor_reg      tcg_gen_xor_i64
88 #define tcg_gen_xori_reg     tcg_gen_xori_i64
89 #define tcg_gen_not_reg      tcg_gen_not_i64
90 #define tcg_gen_shl_reg      tcg_gen_shl_i64
91 #define tcg_gen_shli_reg     tcg_gen_shli_i64
92 #define tcg_gen_shr_reg      tcg_gen_shr_i64
93 #define tcg_gen_shri_reg     tcg_gen_shri_i64
94 #define tcg_gen_sar_reg      tcg_gen_sar_i64
95 #define tcg_gen_sari_reg     tcg_gen_sari_i64
96 #define tcg_gen_brcond_reg   tcg_gen_brcond_i64
97 #define tcg_gen_brcondi_reg  tcg_gen_brcondi_i64
98 #define tcg_gen_setcond_reg  tcg_gen_setcond_i64
99 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i64
100 #define tcg_gen_mul_reg      tcg_gen_mul_i64
101 #define tcg_gen_muli_reg     tcg_gen_muli_i64
102 #define tcg_gen_div_reg      tcg_gen_div_i64
103 #define tcg_gen_rem_reg      tcg_gen_rem_i64
104 #define tcg_gen_divu_reg     tcg_gen_divu_i64
105 #define tcg_gen_remu_reg     tcg_gen_remu_i64
106 #define tcg_gen_discard_reg  tcg_gen_discard_i64
107 #define tcg_gen_trunc_reg_i32 tcg_gen_extrl_i64_i32
108 #define tcg_gen_trunc_i64_reg tcg_gen_mov_i64
109 #define tcg_gen_extu_i32_reg tcg_gen_extu_i32_i64
110 #define tcg_gen_ext_i32_reg  tcg_gen_ext_i32_i64
111 #define tcg_gen_extu_reg_i64 tcg_gen_mov_i64
112 #define tcg_gen_ext_reg_i64  tcg_gen_mov_i64
113 #define tcg_gen_ext8u_reg    tcg_gen_ext8u_i64
114 #define tcg_gen_ext8s_reg    tcg_gen_ext8s_i64
115 #define tcg_gen_ext16u_reg   tcg_gen_ext16u_i64
116 #define tcg_gen_ext16s_reg   tcg_gen_ext16s_i64
117 #define tcg_gen_ext32u_reg   tcg_gen_ext32u_i64
118 #define tcg_gen_ext32s_reg   tcg_gen_ext32s_i64
119 #define tcg_gen_bswap16_reg  tcg_gen_bswap16_i64
120 #define tcg_gen_bswap32_reg  tcg_gen_bswap32_i64
121 #define tcg_gen_bswap64_reg  tcg_gen_bswap64_i64
122 #define tcg_gen_concat_reg_i64 tcg_gen_concat32_i64
123 #define tcg_gen_andc_reg     tcg_gen_andc_i64
124 #define tcg_gen_eqv_reg      tcg_gen_eqv_i64
125 #define tcg_gen_nand_reg     tcg_gen_nand_i64
126 #define tcg_gen_nor_reg      tcg_gen_nor_i64
127 #define tcg_gen_orc_reg      tcg_gen_orc_i64
128 #define tcg_gen_clz_reg      tcg_gen_clz_i64
129 #define tcg_gen_ctz_reg      tcg_gen_ctz_i64
130 #define tcg_gen_clzi_reg     tcg_gen_clzi_i64
131 #define tcg_gen_ctzi_reg     tcg_gen_ctzi_i64
132 #define tcg_gen_clrsb_reg    tcg_gen_clrsb_i64
133 #define tcg_gen_ctpop_reg    tcg_gen_ctpop_i64
134 #define tcg_gen_rotl_reg     tcg_gen_rotl_i64
135 #define tcg_gen_rotli_reg    tcg_gen_rotli_i64
136 #define tcg_gen_rotr_reg     tcg_gen_rotr_i64
137 #define tcg_gen_rotri_reg    tcg_gen_rotri_i64
138 #define tcg_gen_deposit_reg  tcg_gen_deposit_i64
139 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i64
140 #define tcg_gen_extract_reg  tcg_gen_extract_i64
141 #define tcg_gen_sextract_reg tcg_gen_sextract_i64
142 #define tcg_gen_extract2_reg tcg_gen_extract2_i64
143 #define tcg_constant_reg     tcg_constant_i64
144 #define tcg_gen_movcond_reg  tcg_gen_movcond_i64
145 #define tcg_gen_add2_reg     tcg_gen_add2_i64
146 #define tcg_gen_sub2_reg     tcg_gen_sub2_i64
147 #define tcg_gen_qemu_ld_reg  tcg_gen_qemu_ld_i64
148 #define tcg_gen_qemu_st_reg  tcg_gen_qemu_st_i64
149 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i64
150 #define tcg_gen_trunc_reg_ptr   tcg_gen_trunc_i64_ptr
151 #else
152 #define TCGv_reg             TCGv_i32
153 #define tcg_temp_new         tcg_temp_new_i32
154 #define tcg_global_mem_new   tcg_global_mem_new_i32
155 
156 #define tcg_gen_movi_reg     tcg_gen_movi_i32
157 #define tcg_gen_mov_reg      tcg_gen_mov_i32
158 #define tcg_gen_ld8u_reg     tcg_gen_ld8u_i32
159 #define tcg_gen_ld8s_reg     tcg_gen_ld8s_i32
160 #define tcg_gen_ld16u_reg    tcg_gen_ld16u_i32
161 #define tcg_gen_ld16s_reg    tcg_gen_ld16s_i32
162 #define tcg_gen_ld32u_reg    tcg_gen_ld_i32
163 #define tcg_gen_ld32s_reg    tcg_gen_ld_i32
164 #define tcg_gen_ld_reg       tcg_gen_ld_i32
165 #define tcg_gen_st8_reg      tcg_gen_st8_i32
166 #define tcg_gen_st16_reg     tcg_gen_st16_i32
167 #define tcg_gen_st32_reg     tcg_gen_st32_i32
168 #define tcg_gen_st_reg       tcg_gen_st_i32
169 #define tcg_gen_add_reg      tcg_gen_add_i32
170 #define tcg_gen_addi_reg     tcg_gen_addi_i32
171 #define tcg_gen_sub_reg      tcg_gen_sub_i32
172 #define tcg_gen_neg_reg      tcg_gen_neg_i32
173 #define tcg_gen_subfi_reg    tcg_gen_subfi_i32
174 #define tcg_gen_subi_reg     tcg_gen_subi_i32
175 #define tcg_gen_and_reg      tcg_gen_and_i32
176 #define tcg_gen_andi_reg     tcg_gen_andi_i32
177 #define tcg_gen_or_reg       tcg_gen_or_i32
178 #define tcg_gen_ori_reg      tcg_gen_ori_i32
179 #define tcg_gen_xor_reg      tcg_gen_xor_i32
180 #define tcg_gen_xori_reg     tcg_gen_xori_i32
181 #define tcg_gen_not_reg      tcg_gen_not_i32
182 #define tcg_gen_shl_reg      tcg_gen_shl_i32
183 #define tcg_gen_shli_reg     tcg_gen_shli_i32
184 #define tcg_gen_shr_reg      tcg_gen_shr_i32
185 #define tcg_gen_shri_reg     tcg_gen_shri_i32
186 #define tcg_gen_sar_reg      tcg_gen_sar_i32
187 #define tcg_gen_sari_reg     tcg_gen_sari_i32
188 #define tcg_gen_brcond_reg   tcg_gen_brcond_i32
189 #define tcg_gen_brcondi_reg  tcg_gen_brcondi_i32
190 #define tcg_gen_setcond_reg  tcg_gen_setcond_i32
191 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i32
192 #define tcg_gen_mul_reg      tcg_gen_mul_i32
193 #define tcg_gen_muli_reg     tcg_gen_muli_i32
194 #define tcg_gen_div_reg      tcg_gen_div_i32
195 #define tcg_gen_rem_reg      tcg_gen_rem_i32
196 #define tcg_gen_divu_reg     tcg_gen_divu_i32
197 #define tcg_gen_remu_reg     tcg_gen_remu_i32
198 #define tcg_gen_discard_reg  tcg_gen_discard_i32
199 #define tcg_gen_trunc_reg_i32 tcg_gen_mov_i32
200 #define tcg_gen_trunc_i64_reg tcg_gen_extrl_i64_i32
201 #define tcg_gen_extu_i32_reg tcg_gen_mov_i32
202 #define tcg_gen_ext_i32_reg  tcg_gen_mov_i32
203 #define tcg_gen_extu_reg_i64 tcg_gen_extu_i32_i64
204 #define tcg_gen_ext_reg_i64  tcg_gen_ext_i32_i64
205 #define tcg_gen_ext8u_reg    tcg_gen_ext8u_i32
206 #define tcg_gen_ext8s_reg    tcg_gen_ext8s_i32
207 #define tcg_gen_ext16u_reg   tcg_gen_ext16u_i32
208 #define tcg_gen_ext16s_reg   tcg_gen_ext16s_i32
209 #define tcg_gen_ext32u_reg   tcg_gen_mov_i32
210 #define tcg_gen_ext32s_reg   tcg_gen_mov_i32
211 #define tcg_gen_bswap16_reg  tcg_gen_bswap16_i32
212 #define tcg_gen_bswap32_reg  tcg_gen_bswap32_i32
213 #define tcg_gen_concat_reg_i64 tcg_gen_concat_i32_i64
214 #define tcg_gen_andc_reg     tcg_gen_andc_i32
215 #define tcg_gen_eqv_reg      tcg_gen_eqv_i32
216 #define tcg_gen_nand_reg     tcg_gen_nand_i32
217 #define tcg_gen_nor_reg      tcg_gen_nor_i32
218 #define tcg_gen_orc_reg      tcg_gen_orc_i32
219 #define tcg_gen_clz_reg      tcg_gen_clz_i32
220 #define tcg_gen_ctz_reg      tcg_gen_ctz_i32
221 #define tcg_gen_clzi_reg     tcg_gen_clzi_i32
222 #define tcg_gen_ctzi_reg     tcg_gen_ctzi_i32
223 #define tcg_gen_clrsb_reg    tcg_gen_clrsb_i32
224 #define tcg_gen_ctpop_reg    tcg_gen_ctpop_i32
225 #define tcg_gen_rotl_reg     tcg_gen_rotl_i32
226 #define tcg_gen_rotli_reg    tcg_gen_rotli_i32
227 #define tcg_gen_rotr_reg     tcg_gen_rotr_i32
228 #define tcg_gen_rotri_reg    tcg_gen_rotri_i32
229 #define tcg_gen_deposit_reg  tcg_gen_deposit_i32
230 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i32
231 #define tcg_gen_extract_reg  tcg_gen_extract_i32
232 #define tcg_gen_sextract_reg tcg_gen_sextract_i32
233 #define tcg_gen_extract2_reg tcg_gen_extract2_i32
234 #define tcg_constant_reg     tcg_constant_i32
235 #define tcg_gen_movcond_reg  tcg_gen_movcond_i32
236 #define tcg_gen_add2_reg     tcg_gen_add2_i32
237 #define tcg_gen_sub2_reg     tcg_gen_sub2_i32
238 #define tcg_gen_qemu_ld_reg  tcg_gen_qemu_ld_i32
239 #define tcg_gen_qemu_st_reg  tcg_gen_qemu_st_i32
240 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i32
241 #define tcg_gen_trunc_reg_ptr   tcg_gen_ext_i32_ptr
242 #endif /* TARGET_REGISTER_BITS */
243 
244 typedef struct DisasCond {
245     TCGCond c;
246     TCGv_reg a0, a1;
247 } DisasCond;
248 
249 typedef struct DisasContext {
250     DisasContextBase base;
251     CPUState *cs;
252 
253     target_ureg iaoq_f;
254     target_ureg iaoq_b;
255     target_ureg iaoq_n;
256     TCGv_reg iaoq_n_var;
257 
258     int ntempr, ntempl;
259     TCGv_reg tempr[8];
260     TCGv_tl  templ[4];
261 
262     DisasCond null_cond;
263     TCGLabel *null_lab;
264 
265     uint32_t insn;
266     uint32_t tb_flags;
267     int mmu_idx;
268     int privilege;
269     bool psw_n_nonzero;
270 
271 #ifdef CONFIG_USER_ONLY
272     MemOp unalign;
273 #endif
274 } DisasContext;
275 
276 #ifdef CONFIG_USER_ONLY
277 #define UNALIGN(C)  (C)->unalign
278 #else
279 #define UNALIGN(C)  MO_ALIGN
280 #endif
281 
282 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently.  */
283 static int expand_sm_imm(DisasContext *ctx, int val)
284 {
285     if (val & PSW_SM_E) {
286         val = (val & ~PSW_SM_E) | PSW_E;
287     }
288     if (val & PSW_SM_W) {
289         val = (val & ~PSW_SM_W) | PSW_W;
290     }
291     return val;
292 }
293 
294 /* Inverted space register indicates 0 means sr0 not inferred from base.  */
295 static int expand_sr3x(DisasContext *ctx, int val)
296 {
297     return ~val;
298 }
299 
300 /* Convert the M:A bits within a memory insn to the tri-state value
301    we use for the final M.  */
302 static int ma_to_m(DisasContext *ctx, int val)
303 {
304     return val & 2 ? (val & 1 ? -1 : 1) : 0;
305 }
306 
307 /* Convert the sign of the displacement to a pre or post-modify.  */
308 static int pos_to_m(DisasContext *ctx, int val)
309 {
310     return val ? 1 : -1;
311 }
312 
313 static int neg_to_m(DisasContext *ctx, int val)
314 {
315     return val ? -1 : 1;
316 }
317 
318 /* Used for branch targets and fp memory ops.  */
319 static int expand_shl2(DisasContext *ctx, int val)
320 {
321     return val << 2;
322 }
323 
324 /* Used for fp memory ops.  */
325 static int expand_shl3(DisasContext *ctx, int val)
326 {
327     return val << 3;
328 }
329 
330 /* Used for assemble_21.  */
331 static int expand_shl11(DisasContext *ctx, int val)
332 {
333     return val << 11;
334 }
335 
336 
337 /* Include the auto-generated decoder.  */
338 #include "decode-insns.c.inc"
339 
340 /* We are not using a goto_tb (for whatever reason), but have updated
341    the iaq (for whatever reason), so don't do it again on exit.  */
342 #define DISAS_IAQ_N_UPDATED  DISAS_TARGET_0
343 
344 /* We are exiting the TB, but have neither emitted a goto_tb, nor
345    updated the iaq for the next instruction to be executed.  */
346 #define DISAS_IAQ_N_STALE    DISAS_TARGET_1
347 
348 /* Similarly, but we want to return to the main loop immediately
349    to recognize unmasked interrupts.  */
350 #define DISAS_IAQ_N_STALE_EXIT      DISAS_TARGET_2
351 #define DISAS_EXIT                  DISAS_TARGET_3
352 
353 /* global register indexes */
354 static TCGv_reg cpu_gr[32];
355 static TCGv_i64 cpu_sr[4];
356 static TCGv_i64 cpu_srH;
357 static TCGv_reg cpu_iaoq_f;
358 static TCGv_reg cpu_iaoq_b;
359 static TCGv_i64 cpu_iasq_f;
360 static TCGv_i64 cpu_iasq_b;
361 static TCGv_reg cpu_sar;
362 static TCGv_reg cpu_psw_n;
363 static TCGv_reg cpu_psw_v;
364 static TCGv_reg cpu_psw_cb;
365 static TCGv_reg cpu_psw_cb_msb;
366 
367 #include "exec/gen-icount.h"
368 
369 void hppa_translate_init(void)
370 {
371 #define DEF_VAR(V)  { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
372 
373     typedef struct { TCGv_reg *var; const char *name; int ofs; } GlobalVar;
374     static const GlobalVar vars[] = {
375         { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
376         DEF_VAR(psw_n),
377         DEF_VAR(psw_v),
378         DEF_VAR(psw_cb),
379         DEF_VAR(psw_cb_msb),
380         DEF_VAR(iaoq_f),
381         DEF_VAR(iaoq_b),
382     };
383 
384 #undef DEF_VAR
385 
386     /* Use the symbolic register names that match the disassembler.  */
387     static const char gr_names[32][4] = {
388         "r0",  "r1",  "r2",  "r3",  "r4",  "r5",  "r6",  "r7",
389         "r8",  "r9",  "r10", "r11", "r12", "r13", "r14", "r15",
390         "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
391         "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
392     };
393     /* SR[4-7] are not global registers so that we can index them.  */
394     static const char sr_names[5][4] = {
395         "sr0", "sr1", "sr2", "sr3", "srH"
396     };
397 
398     int i;
399 
400     cpu_gr[0] = NULL;
401     for (i = 1; i < 32; i++) {
402         cpu_gr[i] = tcg_global_mem_new(cpu_env,
403                                        offsetof(CPUHPPAState, gr[i]),
404                                        gr_names[i]);
405     }
406     for (i = 0; i < 4; i++) {
407         cpu_sr[i] = tcg_global_mem_new_i64(cpu_env,
408                                            offsetof(CPUHPPAState, sr[i]),
409                                            sr_names[i]);
410     }
411     cpu_srH = tcg_global_mem_new_i64(cpu_env,
412                                      offsetof(CPUHPPAState, sr[4]),
413                                      sr_names[4]);
414 
415     for (i = 0; i < ARRAY_SIZE(vars); ++i) {
416         const GlobalVar *v = &vars[i];
417         *v->var = tcg_global_mem_new(cpu_env, v->ofs, v->name);
418     }
419 
420     cpu_iasq_f = tcg_global_mem_new_i64(cpu_env,
421                                         offsetof(CPUHPPAState, iasq_f),
422                                         "iasq_f");
423     cpu_iasq_b = tcg_global_mem_new_i64(cpu_env,
424                                         offsetof(CPUHPPAState, iasq_b),
425                                         "iasq_b");
426 }
427 
428 static DisasCond cond_make_f(void)
429 {
430     return (DisasCond){
431         .c = TCG_COND_NEVER,
432         .a0 = NULL,
433         .a1 = NULL,
434     };
435 }
436 
437 static DisasCond cond_make_t(void)
438 {
439     return (DisasCond){
440         .c = TCG_COND_ALWAYS,
441         .a0 = NULL,
442         .a1 = NULL,
443     };
444 }
445 
446 static DisasCond cond_make_n(void)
447 {
448     return (DisasCond){
449         .c = TCG_COND_NE,
450         .a0 = cpu_psw_n,
451         .a1 = tcg_constant_reg(0)
452     };
453 }
454 
455 static DisasCond cond_make_0_tmp(TCGCond c, TCGv_reg a0)
456 {
457     assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
458     return (DisasCond){
459         .c = c, .a0 = a0, .a1 = tcg_constant_reg(0)
460     };
461 }
462 
463 static DisasCond cond_make_0(TCGCond c, TCGv_reg a0)
464 {
465     TCGv_reg tmp = tcg_temp_new();
466     tcg_gen_mov_reg(tmp, a0);
467     return cond_make_0_tmp(c, tmp);
468 }
469 
470 static DisasCond cond_make(TCGCond c, TCGv_reg a0, TCGv_reg a1)
471 {
472     DisasCond r = { .c = c };
473 
474     assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
475     r.a0 = tcg_temp_new();
476     tcg_gen_mov_reg(r.a0, a0);
477     r.a1 = tcg_temp_new();
478     tcg_gen_mov_reg(r.a1, a1);
479 
480     return r;
481 }
482 
483 static void cond_free(DisasCond *cond)
484 {
485     switch (cond->c) {
486     default:
487         cond->a0 = NULL;
488         cond->a1 = NULL;
489         /* fallthru */
490     case TCG_COND_ALWAYS:
491         cond->c = TCG_COND_NEVER;
492         break;
493     case TCG_COND_NEVER:
494         break;
495     }
496 }
497 
498 static TCGv_reg get_temp(DisasContext *ctx)
499 {
500     unsigned i = ctx->ntempr++;
501     g_assert(i < ARRAY_SIZE(ctx->tempr));
502     return ctx->tempr[i] = tcg_temp_new();
503 }
504 
505 #ifndef CONFIG_USER_ONLY
506 static TCGv_tl get_temp_tl(DisasContext *ctx)
507 {
508     unsigned i = ctx->ntempl++;
509     g_assert(i < ARRAY_SIZE(ctx->templ));
510     return ctx->templ[i] = tcg_temp_new_tl();
511 }
512 #endif
513 
514 static TCGv_reg load_const(DisasContext *ctx, target_sreg v)
515 {
516     TCGv_reg t = get_temp(ctx);
517     tcg_gen_movi_reg(t, v);
518     return t;
519 }
520 
521 static TCGv_reg load_gpr(DisasContext *ctx, unsigned reg)
522 {
523     if (reg == 0) {
524         TCGv_reg t = get_temp(ctx);
525         tcg_gen_movi_reg(t, 0);
526         return t;
527     } else {
528         return cpu_gr[reg];
529     }
530 }
531 
532 static TCGv_reg dest_gpr(DisasContext *ctx, unsigned reg)
533 {
534     if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
535         return get_temp(ctx);
536     } else {
537         return cpu_gr[reg];
538     }
539 }
540 
541 static void save_or_nullify(DisasContext *ctx, TCGv_reg dest, TCGv_reg t)
542 {
543     if (ctx->null_cond.c != TCG_COND_NEVER) {
544         tcg_gen_movcond_reg(ctx->null_cond.c, dest, ctx->null_cond.a0,
545                             ctx->null_cond.a1, dest, t);
546     } else {
547         tcg_gen_mov_reg(dest, t);
548     }
549 }
550 
551 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_reg t)
552 {
553     if (reg != 0) {
554         save_or_nullify(ctx, cpu_gr[reg], t);
555     }
556 }
557 
558 #if HOST_BIG_ENDIAN
559 # define HI_OFS  0
560 # define LO_OFS  4
561 #else
562 # define HI_OFS  4
563 # define LO_OFS  0
564 #endif
565 
566 static TCGv_i32 load_frw_i32(unsigned rt)
567 {
568     TCGv_i32 ret = tcg_temp_new_i32();
569     tcg_gen_ld_i32(ret, cpu_env,
570                    offsetof(CPUHPPAState, fr[rt & 31])
571                    + (rt & 32 ? LO_OFS : HI_OFS));
572     return ret;
573 }
574 
575 static TCGv_i32 load_frw0_i32(unsigned rt)
576 {
577     if (rt == 0) {
578         TCGv_i32 ret = tcg_temp_new_i32();
579         tcg_gen_movi_i32(ret, 0);
580         return ret;
581     } else {
582         return load_frw_i32(rt);
583     }
584 }
585 
586 static TCGv_i64 load_frw0_i64(unsigned rt)
587 {
588     TCGv_i64 ret = tcg_temp_new_i64();
589     if (rt == 0) {
590         tcg_gen_movi_i64(ret, 0);
591     } else {
592         tcg_gen_ld32u_i64(ret, cpu_env,
593                           offsetof(CPUHPPAState, fr[rt & 31])
594                           + (rt & 32 ? LO_OFS : HI_OFS));
595     }
596     return ret;
597 }
598 
599 static void save_frw_i32(unsigned rt, TCGv_i32 val)
600 {
601     tcg_gen_st_i32(val, cpu_env,
602                    offsetof(CPUHPPAState, fr[rt & 31])
603                    + (rt & 32 ? LO_OFS : HI_OFS));
604 }
605 
606 #undef HI_OFS
607 #undef LO_OFS
608 
609 static TCGv_i64 load_frd(unsigned rt)
610 {
611     TCGv_i64 ret = tcg_temp_new_i64();
612     tcg_gen_ld_i64(ret, cpu_env, offsetof(CPUHPPAState, fr[rt]));
613     return ret;
614 }
615 
616 static TCGv_i64 load_frd0(unsigned rt)
617 {
618     if (rt == 0) {
619         TCGv_i64 ret = tcg_temp_new_i64();
620         tcg_gen_movi_i64(ret, 0);
621         return ret;
622     } else {
623         return load_frd(rt);
624     }
625 }
626 
627 static void save_frd(unsigned rt, TCGv_i64 val)
628 {
629     tcg_gen_st_i64(val, cpu_env, offsetof(CPUHPPAState, fr[rt]));
630 }
631 
632 static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
633 {
634 #ifdef CONFIG_USER_ONLY
635     tcg_gen_movi_i64(dest, 0);
636 #else
637     if (reg < 4) {
638         tcg_gen_mov_i64(dest, cpu_sr[reg]);
639     } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
640         tcg_gen_mov_i64(dest, cpu_srH);
641     } else {
642         tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUHPPAState, sr[reg]));
643     }
644 #endif
645 }
646 
647 /* Skip over the implementation of an insn that has been nullified.
648    Use this when the insn is too complex for a conditional move.  */
649 static void nullify_over(DisasContext *ctx)
650 {
651     if (ctx->null_cond.c != TCG_COND_NEVER) {
652         /* The always condition should have been handled in the main loop.  */
653         assert(ctx->null_cond.c != TCG_COND_ALWAYS);
654 
655         ctx->null_lab = gen_new_label();
656 
657         /* If we're using PSW[N], copy it to a temp because... */
658         if (ctx->null_cond.a0 == cpu_psw_n) {
659             ctx->null_cond.a0 = tcg_temp_new();
660             tcg_gen_mov_reg(ctx->null_cond.a0, cpu_psw_n);
661         }
662         /* ... we clear it before branching over the implementation,
663            so that (1) it's clear after nullifying this insn and
664            (2) if this insn nullifies the next, PSW[N] is valid.  */
665         if (ctx->psw_n_nonzero) {
666             ctx->psw_n_nonzero = false;
667             tcg_gen_movi_reg(cpu_psw_n, 0);
668         }
669 
670         tcg_gen_brcond_reg(ctx->null_cond.c, ctx->null_cond.a0,
671                            ctx->null_cond.a1, ctx->null_lab);
672         cond_free(&ctx->null_cond);
673     }
674 }
675 
676 /* Save the current nullification state to PSW[N].  */
677 static void nullify_save(DisasContext *ctx)
678 {
679     if (ctx->null_cond.c == TCG_COND_NEVER) {
680         if (ctx->psw_n_nonzero) {
681             tcg_gen_movi_reg(cpu_psw_n, 0);
682         }
683         return;
684     }
685     if (ctx->null_cond.a0 != cpu_psw_n) {
686         tcg_gen_setcond_reg(ctx->null_cond.c, cpu_psw_n,
687                             ctx->null_cond.a0, ctx->null_cond.a1);
688         ctx->psw_n_nonzero = true;
689     }
690     cond_free(&ctx->null_cond);
691 }
692 
693 /* Set a PSW[N] to X.  The intention is that this is used immediately
694    before a goto_tb/exit_tb, so that there is no fallthru path to other
695    code within the TB.  Therefore we do not update psw_n_nonzero.  */
696 static void nullify_set(DisasContext *ctx, bool x)
697 {
698     if (ctx->psw_n_nonzero || x) {
699         tcg_gen_movi_reg(cpu_psw_n, x);
700     }
701 }
702 
703 /* Mark the end of an instruction that may have been nullified.
704    This is the pair to nullify_over.  Always returns true so that
705    it may be tail-called from a translate function.  */
706 static bool nullify_end(DisasContext *ctx)
707 {
708     TCGLabel *null_lab = ctx->null_lab;
709     DisasJumpType status = ctx->base.is_jmp;
710 
711     /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
712        For UPDATED, we cannot update on the nullified path.  */
713     assert(status != DISAS_IAQ_N_UPDATED);
714 
715     if (likely(null_lab == NULL)) {
716         /* The current insn wasn't conditional or handled the condition
717            applied to it without a branch, so the (new) setting of
718            NULL_COND can be applied directly to the next insn.  */
719         return true;
720     }
721     ctx->null_lab = NULL;
722 
723     if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
724         /* The next instruction will be unconditional,
725            and NULL_COND already reflects that.  */
726         gen_set_label(null_lab);
727     } else {
728         /* The insn that we just executed is itself nullifying the next
729            instruction.  Store the condition in the PSW[N] global.
730            We asserted PSW[N] = 0 in nullify_over, so that after the
731            label we have the proper value in place.  */
732         nullify_save(ctx);
733         gen_set_label(null_lab);
734         ctx->null_cond = cond_make_n();
735     }
736     if (status == DISAS_NORETURN) {
737         ctx->base.is_jmp = DISAS_NEXT;
738     }
739     return true;
740 }
741 
742 static void copy_iaoq_entry(TCGv_reg dest, target_ureg ival, TCGv_reg vval)
743 {
744     if (unlikely(ival == -1)) {
745         tcg_gen_mov_reg(dest, vval);
746     } else {
747         tcg_gen_movi_reg(dest, ival);
748     }
749 }
750 
751 static inline target_ureg iaoq_dest(DisasContext *ctx, target_sreg disp)
752 {
753     return ctx->iaoq_f + disp + 8;
754 }
755 
756 static void gen_excp_1(int exception)
757 {
758     gen_helper_excp(cpu_env, tcg_constant_i32(exception));
759 }
760 
761 static void gen_excp(DisasContext *ctx, int exception)
762 {
763     copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
764     copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
765     nullify_save(ctx);
766     gen_excp_1(exception);
767     ctx->base.is_jmp = DISAS_NORETURN;
768 }
769 
770 static bool gen_excp_iir(DisasContext *ctx, int exc)
771 {
772     nullify_over(ctx);
773     tcg_gen_st_reg(tcg_constant_reg(ctx->insn),
774                    cpu_env, offsetof(CPUHPPAState, cr[CR_IIR]));
775     gen_excp(ctx, exc);
776     return nullify_end(ctx);
777 }
778 
779 static bool gen_illegal(DisasContext *ctx)
780 {
781     return gen_excp_iir(ctx, EXCP_ILL);
782 }
783 
784 #ifdef CONFIG_USER_ONLY
785 #define CHECK_MOST_PRIVILEGED(EXCP) \
786     return gen_excp_iir(ctx, EXCP)
787 #else
788 #define CHECK_MOST_PRIVILEGED(EXCP) \
789     do {                                     \
790         if (ctx->privilege != 0) {           \
791             return gen_excp_iir(ctx, EXCP);  \
792         }                                    \
793     } while (0)
794 #endif
795 
796 static bool use_goto_tb(DisasContext *ctx, target_ureg dest)
797 {
798     return translator_use_goto_tb(&ctx->base, dest);
799 }
800 
801 /* If the next insn is to be nullified, and it's on the same page,
802    and we're not attempting to set a breakpoint on it, then we can
803    totally skip the nullified insn.  This avoids creating and
804    executing a TB that merely branches to the next TB.  */
805 static bool use_nullify_skip(DisasContext *ctx)
806 {
807     return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
808             && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
809 }
810 
811 static void gen_goto_tb(DisasContext *ctx, int which,
812                         target_ureg f, target_ureg b)
813 {
814     if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
815         tcg_gen_goto_tb(which);
816         tcg_gen_movi_reg(cpu_iaoq_f, f);
817         tcg_gen_movi_reg(cpu_iaoq_b, b);
818         tcg_gen_exit_tb(ctx->base.tb, which);
819     } else {
820         copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b);
821         copy_iaoq_entry(cpu_iaoq_b, b, ctx->iaoq_n_var);
822         tcg_gen_lookup_and_goto_ptr();
823     }
824 }
825 
826 static bool cond_need_sv(int c)
827 {
828     return c == 2 || c == 3 || c == 6;
829 }
830 
831 static bool cond_need_cb(int c)
832 {
833     return c == 4 || c == 5;
834 }
835 
836 /*
837  * Compute conditional for arithmetic.  See Page 5-3, Table 5-1, of
838  * the Parisc 1.1 Architecture Reference Manual for details.
839  */
840 
841 static DisasCond do_cond(unsigned cf, TCGv_reg res,
842                          TCGv_reg cb_msb, TCGv_reg sv)
843 {
844     DisasCond cond;
845     TCGv_reg tmp;
846 
847     switch (cf >> 1) {
848     case 0: /* Never / TR    (0 / 1) */
849         cond = cond_make_f();
850         break;
851     case 1: /* = / <>        (Z / !Z) */
852         cond = cond_make_0(TCG_COND_EQ, res);
853         break;
854     case 2: /* < / >=        (N ^ V / !(N ^ V) */
855         tmp = tcg_temp_new();
856         tcg_gen_xor_reg(tmp, res, sv);
857         cond = cond_make_0_tmp(TCG_COND_LT, tmp);
858         break;
859     case 3: /* <= / >        (N ^ V) | Z / !((N ^ V) | Z) */
860         /*
861          * Simplify:
862          *   (N ^ V) | Z
863          *   ((res < 0) ^ (sv < 0)) | !res
864          *   ((res ^ sv) < 0) | !res
865          *   (~(res ^ sv) >= 0) | !res
866          *   !(~(res ^ sv) >> 31) | !res
867          *   !(~(res ^ sv) >> 31 & res)
868          */
869         tmp = tcg_temp_new();
870         tcg_gen_eqv_reg(tmp, res, sv);
871         tcg_gen_sari_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
872         tcg_gen_and_reg(tmp, tmp, res);
873         cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
874         break;
875     case 4: /* NUV / UV      (!C / C) */
876         cond = cond_make_0(TCG_COND_EQ, cb_msb);
877         break;
878     case 5: /* ZNV / VNZ     (!C | Z / C & !Z) */
879         tmp = tcg_temp_new();
880         tcg_gen_neg_reg(tmp, cb_msb);
881         tcg_gen_and_reg(tmp, tmp, res);
882         cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
883         break;
884     case 6: /* SV / NSV      (V / !V) */
885         cond = cond_make_0(TCG_COND_LT, sv);
886         break;
887     case 7: /* OD / EV */
888         tmp = tcg_temp_new();
889         tcg_gen_andi_reg(tmp, res, 1);
890         cond = cond_make_0_tmp(TCG_COND_NE, tmp);
891         break;
892     default:
893         g_assert_not_reached();
894     }
895     if (cf & 1) {
896         cond.c = tcg_invert_cond(cond.c);
897     }
898 
899     return cond;
900 }
901 
902 /* Similar, but for the special case of subtraction without borrow, we
903    can use the inputs directly.  This can allow other computation to be
904    deleted as unused.  */
905 
906 static DisasCond do_sub_cond(unsigned cf, TCGv_reg res,
907                              TCGv_reg in1, TCGv_reg in2, TCGv_reg sv)
908 {
909     DisasCond cond;
910 
911     switch (cf >> 1) {
912     case 1: /* = / <> */
913         cond = cond_make(TCG_COND_EQ, in1, in2);
914         break;
915     case 2: /* < / >= */
916         cond = cond_make(TCG_COND_LT, in1, in2);
917         break;
918     case 3: /* <= / > */
919         cond = cond_make(TCG_COND_LE, in1, in2);
920         break;
921     case 4: /* << / >>= */
922         cond = cond_make(TCG_COND_LTU, in1, in2);
923         break;
924     case 5: /* <<= / >> */
925         cond = cond_make(TCG_COND_LEU, in1, in2);
926         break;
927     default:
928         return do_cond(cf, res, NULL, sv);
929     }
930     if (cf & 1) {
931         cond.c = tcg_invert_cond(cond.c);
932     }
933 
934     return cond;
935 }
936 
937 /*
938  * Similar, but for logicals, where the carry and overflow bits are not
939  * computed, and use of them is undefined.
940  *
941  * Undefined or not, hardware does not trap.  It seems reasonable to
942  * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's
943  * how cases c={2,3} are treated.
944  */
945 
946 static DisasCond do_log_cond(unsigned cf, TCGv_reg res)
947 {
948     switch (cf) {
949     case 0:  /* never */
950     case 9:  /* undef, C */
951     case 11: /* undef, C & !Z */
952     case 12: /* undef, V */
953         return cond_make_f();
954 
955     case 1:  /* true */
956     case 8:  /* undef, !C */
957     case 10: /* undef, !C | Z */
958     case 13: /* undef, !V */
959         return cond_make_t();
960 
961     case 2:  /* == */
962         return cond_make_0(TCG_COND_EQ, res);
963     case 3:  /* <> */
964         return cond_make_0(TCG_COND_NE, res);
965     case 4:  /* < */
966         return cond_make_0(TCG_COND_LT, res);
967     case 5:  /* >= */
968         return cond_make_0(TCG_COND_GE, res);
969     case 6:  /* <= */
970         return cond_make_0(TCG_COND_LE, res);
971     case 7:  /* > */
972         return cond_make_0(TCG_COND_GT, res);
973 
974     case 14: /* OD */
975     case 15: /* EV */
976         return do_cond(cf, res, NULL, NULL);
977 
978     default:
979         g_assert_not_reached();
980     }
981 }
982 
983 /* Similar, but for shift/extract/deposit conditions.  */
984 
985 static DisasCond do_sed_cond(unsigned orig, TCGv_reg res)
986 {
987     unsigned c, f;
988 
989     /* Convert the compressed condition codes to standard.
990        0-2 are the same as logicals (nv,<,<=), while 3 is OD.
991        4-7 are the reverse of 0-3.  */
992     c = orig & 3;
993     if (c == 3) {
994         c = 7;
995     }
996     f = (orig & 4) / 4;
997 
998     return do_log_cond(c * 2 + f, res);
999 }
1000 
1001 /* Similar, but for unit conditions.  */
1002 
1003 static DisasCond do_unit_cond(unsigned cf, TCGv_reg res,
1004                               TCGv_reg in1, TCGv_reg in2)
1005 {
1006     DisasCond cond;
1007     TCGv_reg tmp, cb = NULL;
1008 
1009     if (cf & 8) {
1010         /* Since we want to test lots of carry-out bits all at once, do not
1011          * do our normal thing and compute carry-in of bit B+1 since that
1012          * leaves us with carry bits spread across two words.
1013          */
1014         cb = tcg_temp_new();
1015         tmp = tcg_temp_new();
1016         tcg_gen_or_reg(cb, in1, in2);
1017         tcg_gen_and_reg(tmp, in1, in2);
1018         tcg_gen_andc_reg(cb, cb, res);
1019         tcg_gen_or_reg(cb, cb, tmp);
1020     }
1021 
1022     switch (cf >> 1) {
1023     case 0: /* never / TR */
1024     case 1: /* undefined */
1025     case 5: /* undefined */
1026         cond = cond_make_f();
1027         break;
1028 
1029     case 2: /* SBZ / NBZ */
1030         /* See hasless(v,1) from
1031          * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
1032          */
1033         tmp = tcg_temp_new();
1034         tcg_gen_subi_reg(tmp, res, 0x01010101u);
1035         tcg_gen_andc_reg(tmp, tmp, res);
1036         tcg_gen_andi_reg(tmp, tmp, 0x80808080u);
1037         cond = cond_make_0(TCG_COND_NE, tmp);
1038         break;
1039 
1040     case 3: /* SHZ / NHZ */
1041         tmp = tcg_temp_new();
1042         tcg_gen_subi_reg(tmp, res, 0x00010001u);
1043         tcg_gen_andc_reg(tmp, tmp, res);
1044         tcg_gen_andi_reg(tmp, tmp, 0x80008000u);
1045         cond = cond_make_0(TCG_COND_NE, tmp);
1046         break;
1047 
1048     case 4: /* SDC / NDC */
1049         tcg_gen_andi_reg(cb, cb, 0x88888888u);
1050         cond = cond_make_0(TCG_COND_NE, cb);
1051         break;
1052 
1053     case 6: /* SBC / NBC */
1054         tcg_gen_andi_reg(cb, cb, 0x80808080u);
1055         cond = cond_make_0(TCG_COND_NE, cb);
1056         break;
1057 
1058     case 7: /* SHC / NHC */
1059         tcg_gen_andi_reg(cb, cb, 0x80008000u);
1060         cond = cond_make_0(TCG_COND_NE, cb);
1061         break;
1062 
1063     default:
1064         g_assert_not_reached();
1065     }
1066     if (cf & 1) {
1067         cond.c = tcg_invert_cond(cond.c);
1068     }
1069 
1070     return cond;
1071 }
1072 
1073 /* Compute signed overflow for addition.  */
1074 static TCGv_reg do_add_sv(DisasContext *ctx, TCGv_reg res,
1075                           TCGv_reg in1, TCGv_reg in2)
1076 {
1077     TCGv_reg sv = get_temp(ctx);
1078     TCGv_reg tmp = tcg_temp_new();
1079 
1080     tcg_gen_xor_reg(sv, res, in1);
1081     tcg_gen_xor_reg(tmp, in1, in2);
1082     tcg_gen_andc_reg(sv, sv, tmp);
1083 
1084     return sv;
1085 }
1086 
1087 /* Compute signed overflow for subtraction.  */
1088 static TCGv_reg do_sub_sv(DisasContext *ctx, TCGv_reg res,
1089                           TCGv_reg in1, TCGv_reg in2)
1090 {
1091     TCGv_reg sv = get_temp(ctx);
1092     TCGv_reg tmp = tcg_temp_new();
1093 
1094     tcg_gen_xor_reg(sv, res, in1);
1095     tcg_gen_xor_reg(tmp, in1, in2);
1096     tcg_gen_and_reg(sv, sv, tmp);
1097 
1098     return sv;
1099 }
1100 
1101 static void do_add(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1102                    TCGv_reg in2, unsigned shift, bool is_l,
1103                    bool is_tsv, bool is_tc, bool is_c, unsigned cf)
1104 {
1105     TCGv_reg dest, cb, cb_msb, sv, tmp;
1106     unsigned c = cf >> 1;
1107     DisasCond cond;
1108 
1109     dest = tcg_temp_new();
1110     cb = NULL;
1111     cb_msb = NULL;
1112 
1113     if (shift) {
1114         tmp = get_temp(ctx);
1115         tcg_gen_shli_reg(tmp, in1, shift);
1116         in1 = tmp;
1117     }
1118 
1119     if (!is_l || cond_need_cb(c)) {
1120         TCGv_reg zero = tcg_constant_reg(0);
1121         cb_msb = get_temp(ctx);
1122         tcg_gen_add2_reg(dest, cb_msb, in1, zero, in2, zero);
1123         if (is_c) {
1124             tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cpu_psw_cb_msb, zero);
1125         }
1126         if (!is_l) {
1127             cb = get_temp(ctx);
1128             tcg_gen_xor_reg(cb, in1, in2);
1129             tcg_gen_xor_reg(cb, cb, dest);
1130         }
1131     } else {
1132         tcg_gen_add_reg(dest, in1, in2);
1133         if (is_c) {
1134             tcg_gen_add_reg(dest, dest, cpu_psw_cb_msb);
1135         }
1136     }
1137 
1138     /* Compute signed overflow if required.  */
1139     sv = NULL;
1140     if (is_tsv || cond_need_sv(c)) {
1141         sv = do_add_sv(ctx, dest, in1, in2);
1142         if (is_tsv) {
1143             /* ??? Need to include overflow from shift.  */
1144             gen_helper_tsv(cpu_env, sv);
1145         }
1146     }
1147 
1148     /* Emit any conditional trap before any writeback.  */
1149     cond = do_cond(cf, dest, cb_msb, sv);
1150     if (is_tc) {
1151         tmp = tcg_temp_new();
1152         tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1153         gen_helper_tcond(cpu_env, tmp);
1154     }
1155 
1156     /* Write back the result.  */
1157     if (!is_l) {
1158         save_or_nullify(ctx, cpu_psw_cb, cb);
1159         save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1160     }
1161     save_gpr(ctx, rt, dest);
1162 
1163     /* Install the new nullification.  */
1164     cond_free(&ctx->null_cond);
1165     ctx->null_cond = cond;
1166 }
1167 
1168 static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_sh *a,
1169                        bool is_l, bool is_tsv, bool is_tc, bool is_c)
1170 {
1171     TCGv_reg tcg_r1, tcg_r2;
1172 
1173     if (a->cf) {
1174         nullify_over(ctx);
1175     }
1176     tcg_r1 = load_gpr(ctx, a->r1);
1177     tcg_r2 = load_gpr(ctx, a->r2);
1178     do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l, is_tsv, is_tc, is_c, a->cf);
1179     return nullify_end(ctx);
1180 }
1181 
1182 static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a,
1183                        bool is_tsv, bool is_tc)
1184 {
1185     TCGv_reg tcg_im, tcg_r2;
1186 
1187     if (a->cf) {
1188         nullify_over(ctx);
1189     }
1190     tcg_im = load_const(ctx, a->i);
1191     tcg_r2 = load_gpr(ctx, a->r);
1192     do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf);
1193     return nullify_end(ctx);
1194 }
1195 
1196 static void do_sub(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1197                    TCGv_reg in2, bool is_tsv, bool is_b,
1198                    bool is_tc, unsigned cf)
1199 {
1200     TCGv_reg dest, sv, cb, cb_msb, zero, tmp;
1201     unsigned c = cf >> 1;
1202     DisasCond cond;
1203 
1204     dest = tcg_temp_new();
1205     cb = tcg_temp_new();
1206     cb_msb = tcg_temp_new();
1207 
1208     zero = tcg_constant_reg(0);
1209     if (is_b) {
1210         /* DEST,C = IN1 + ~IN2 + C.  */
1211         tcg_gen_not_reg(cb, in2);
1212         tcg_gen_add2_reg(dest, cb_msb, in1, zero, cpu_psw_cb_msb, zero);
1213         tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cb, zero);
1214         tcg_gen_xor_reg(cb, cb, in1);
1215         tcg_gen_xor_reg(cb, cb, dest);
1216     } else {
1217         /* DEST,C = IN1 + ~IN2 + 1.  We can produce the same result in fewer
1218            operations by seeding the high word with 1 and subtracting.  */
1219         tcg_gen_movi_reg(cb_msb, 1);
1220         tcg_gen_sub2_reg(dest, cb_msb, in1, cb_msb, in2, zero);
1221         tcg_gen_eqv_reg(cb, in1, in2);
1222         tcg_gen_xor_reg(cb, cb, dest);
1223     }
1224 
1225     /* Compute signed overflow if required.  */
1226     sv = NULL;
1227     if (is_tsv || cond_need_sv(c)) {
1228         sv = do_sub_sv(ctx, dest, in1, in2);
1229         if (is_tsv) {
1230             gen_helper_tsv(cpu_env, sv);
1231         }
1232     }
1233 
1234     /* Compute the condition.  We cannot use the special case for borrow.  */
1235     if (!is_b) {
1236         cond = do_sub_cond(cf, dest, in1, in2, sv);
1237     } else {
1238         cond = do_cond(cf, dest, cb_msb, sv);
1239     }
1240 
1241     /* Emit any conditional trap before any writeback.  */
1242     if (is_tc) {
1243         tmp = tcg_temp_new();
1244         tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1245         gen_helper_tcond(cpu_env, tmp);
1246     }
1247 
1248     /* Write back the result.  */
1249     save_or_nullify(ctx, cpu_psw_cb, cb);
1250     save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1251     save_gpr(ctx, rt, dest);
1252 
1253     /* Install the new nullification.  */
1254     cond_free(&ctx->null_cond);
1255     ctx->null_cond = cond;
1256 }
1257 
1258 static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf *a,
1259                        bool is_tsv, bool is_b, bool is_tc)
1260 {
1261     TCGv_reg tcg_r1, tcg_r2;
1262 
1263     if (a->cf) {
1264         nullify_over(ctx);
1265     }
1266     tcg_r1 = load_gpr(ctx, a->r1);
1267     tcg_r2 = load_gpr(ctx, a->r2);
1268     do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf);
1269     return nullify_end(ctx);
1270 }
1271 
1272 static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv)
1273 {
1274     TCGv_reg tcg_im, tcg_r2;
1275 
1276     if (a->cf) {
1277         nullify_over(ctx);
1278     }
1279     tcg_im = load_const(ctx, a->i);
1280     tcg_r2 = load_gpr(ctx, a->r);
1281     do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf);
1282     return nullify_end(ctx);
1283 }
1284 
1285 static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1286                       TCGv_reg in2, unsigned cf)
1287 {
1288     TCGv_reg dest, sv;
1289     DisasCond cond;
1290 
1291     dest = tcg_temp_new();
1292     tcg_gen_sub_reg(dest, in1, in2);
1293 
1294     /* Compute signed overflow if required.  */
1295     sv = NULL;
1296     if (cond_need_sv(cf >> 1)) {
1297         sv = do_sub_sv(ctx, dest, in1, in2);
1298     }
1299 
1300     /* Form the condition for the compare.  */
1301     cond = do_sub_cond(cf, dest, in1, in2, sv);
1302 
1303     /* Clear.  */
1304     tcg_gen_movi_reg(dest, 0);
1305     save_gpr(ctx, rt, dest);
1306 
1307     /* Install the new nullification.  */
1308     cond_free(&ctx->null_cond);
1309     ctx->null_cond = cond;
1310 }
1311 
1312 static void do_log(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1313                    TCGv_reg in2, unsigned cf,
1314                    void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1315 {
1316     TCGv_reg dest = dest_gpr(ctx, rt);
1317 
1318     /* Perform the operation, and writeback.  */
1319     fn(dest, in1, in2);
1320     save_gpr(ctx, rt, dest);
1321 
1322     /* Install the new nullification.  */
1323     cond_free(&ctx->null_cond);
1324     if (cf) {
1325         ctx->null_cond = do_log_cond(cf, dest);
1326     }
1327 }
1328 
1329 static bool do_log_reg(DisasContext *ctx, arg_rrr_cf *a,
1330                        void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1331 {
1332     TCGv_reg tcg_r1, tcg_r2;
1333 
1334     if (a->cf) {
1335         nullify_over(ctx);
1336     }
1337     tcg_r1 = load_gpr(ctx, a->r1);
1338     tcg_r2 = load_gpr(ctx, a->r2);
1339     do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, fn);
1340     return nullify_end(ctx);
1341 }
1342 
1343 static void do_unit(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1344                     TCGv_reg in2, unsigned cf, bool is_tc,
1345                     void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1346 {
1347     TCGv_reg dest;
1348     DisasCond cond;
1349 
1350     if (cf == 0) {
1351         dest = dest_gpr(ctx, rt);
1352         fn(dest, in1, in2);
1353         save_gpr(ctx, rt, dest);
1354         cond_free(&ctx->null_cond);
1355     } else {
1356         dest = tcg_temp_new();
1357         fn(dest, in1, in2);
1358 
1359         cond = do_unit_cond(cf, dest, in1, in2);
1360 
1361         if (is_tc) {
1362             TCGv_reg tmp = tcg_temp_new();
1363             tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1364             gen_helper_tcond(cpu_env, tmp);
1365         }
1366         save_gpr(ctx, rt, dest);
1367 
1368         cond_free(&ctx->null_cond);
1369         ctx->null_cond = cond;
1370     }
1371 }
1372 
1373 #ifndef CONFIG_USER_ONLY
1374 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1375    from the top 2 bits of the base register.  There are a few system
1376    instructions that have a 3-bit space specifier, for which SR0 is
1377    not special.  To handle this, pass ~SP.  */
1378 static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_reg base)
1379 {
1380     TCGv_ptr ptr;
1381     TCGv_reg tmp;
1382     TCGv_i64 spc;
1383 
1384     if (sp != 0) {
1385         if (sp < 0) {
1386             sp = ~sp;
1387         }
1388         spc = get_temp_tl(ctx);
1389         load_spr(ctx, spc, sp);
1390         return spc;
1391     }
1392     if (ctx->tb_flags & TB_FLAG_SR_SAME) {
1393         return cpu_srH;
1394     }
1395 
1396     ptr = tcg_temp_new_ptr();
1397     tmp = tcg_temp_new();
1398     spc = get_temp_tl(ctx);
1399 
1400     tcg_gen_shri_reg(tmp, base, TARGET_REGISTER_BITS - 5);
1401     tcg_gen_andi_reg(tmp, tmp, 030);
1402     tcg_gen_trunc_reg_ptr(ptr, tmp);
1403 
1404     tcg_gen_add_ptr(ptr, ptr, cpu_env);
1405     tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
1406 
1407     return spc;
1408 }
1409 #endif
1410 
1411 static void form_gva(DisasContext *ctx, TCGv_tl *pgva, TCGv_reg *pofs,
1412                      unsigned rb, unsigned rx, int scale, target_sreg disp,
1413                      unsigned sp, int modify, bool is_phys)
1414 {
1415     TCGv_reg base = load_gpr(ctx, rb);
1416     TCGv_reg ofs;
1417 
1418     /* Note that RX is mutually exclusive with DISP.  */
1419     if (rx) {
1420         ofs = get_temp(ctx);
1421         tcg_gen_shli_reg(ofs, cpu_gr[rx], scale);
1422         tcg_gen_add_reg(ofs, ofs, base);
1423     } else if (disp || modify) {
1424         ofs = get_temp(ctx);
1425         tcg_gen_addi_reg(ofs, base, disp);
1426     } else {
1427         ofs = base;
1428     }
1429 
1430     *pofs = ofs;
1431 #ifdef CONFIG_USER_ONLY
1432     *pgva = (modify <= 0 ? ofs : base);
1433 #else
1434     TCGv_tl addr = get_temp_tl(ctx);
1435     tcg_gen_extu_reg_tl(addr, modify <= 0 ? ofs : base);
1436     if (ctx->tb_flags & PSW_W) {
1437         tcg_gen_andi_tl(addr, addr, 0x3fffffffffffffffull);
1438     }
1439     if (!is_phys) {
1440         tcg_gen_or_tl(addr, addr, space_select(ctx, sp, base));
1441     }
1442     *pgva = addr;
1443 #endif
1444 }
1445 
1446 /* Emit a memory load.  The modify parameter should be
1447  * < 0 for pre-modify,
1448  * > 0 for post-modify,
1449  * = 0 for no base register update.
1450  */
1451 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
1452                        unsigned rx, int scale, target_sreg disp,
1453                        unsigned sp, int modify, MemOp mop)
1454 {
1455     TCGv_reg ofs;
1456     TCGv_tl addr;
1457 
1458     /* Caller uses nullify_over/nullify_end.  */
1459     assert(ctx->null_cond.c == TCG_COND_NEVER);
1460 
1461     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1462              ctx->mmu_idx == MMU_PHYS_IDX);
1463     tcg_gen_qemu_ld_reg(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1464     if (modify) {
1465         save_gpr(ctx, rb, ofs);
1466     }
1467 }
1468 
1469 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1470                        unsigned rx, int scale, target_sreg disp,
1471                        unsigned sp, int modify, MemOp mop)
1472 {
1473     TCGv_reg ofs;
1474     TCGv_tl addr;
1475 
1476     /* Caller uses nullify_over/nullify_end.  */
1477     assert(ctx->null_cond.c == TCG_COND_NEVER);
1478 
1479     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1480              ctx->mmu_idx == MMU_PHYS_IDX);
1481     tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1482     if (modify) {
1483         save_gpr(ctx, rb, ofs);
1484     }
1485 }
1486 
1487 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1488                         unsigned rx, int scale, target_sreg disp,
1489                         unsigned sp, int modify, MemOp mop)
1490 {
1491     TCGv_reg ofs;
1492     TCGv_tl addr;
1493 
1494     /* Caller uses nullify_over/nullify_end.  */
1495     assert(ctx->null_cond.c == TCG_COND_NEVER);
1496 
1497     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1498              ctx->mmu_idx == MMU_PHYS_IDX);
1499     tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1500     if (modify) {
1501         save_gpr(ctx, rb, ofs);
1502     }
1503 }
1504 
1505 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1506                         unsigned rx, int scale, target_sreg disp,
1507                         unsigned sp, int modify, MemOp mop)
1508 {
1509     TCGv_reg ofs;
1510     TCGv_tl addr;
1511 
1512     /* Caller uses nullify_over/nullify_end.  */
1513     assert(ctx->null_cond.c == TCG_COND_NEVER);
1514 
1515     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1516              ctx->mmu_idx == MMU_PHYS_IDX);
1517     tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1518     if (modify) {
1519         save_gpr(ctx, rb, ofs);
1520     }
1521 }
1522 
1523 #if TARGET_REGISTER_BITS == 64
1524 #define do_load_reg   do_load_64
1525 #define do_store_reg  do_store_64
1526 #else
1527 #define do_load_reg   do_load_32
1528 #define do_store_reg  do_store_32
1529 #endif
1530 
1531 static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1532                     unsigned rx, int scale, target_sreg disp,
1533                     unsigned sp, int modify, MemOp mop)
1534 {
1535     TCGv_reg dest;
1536 
1537     nullify_over(ctx);
1538 
1539     if (modify == 0) {
1540         /* No base register update.  */
1541         dest = dest_gpr(ctx, rt);
1542     } else {
1543         /* Make sure if RT == RB, we see the result of the load.  */
1544         dest = get_temp(ctx);
1545     }
1546     do_load_reg(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
1547     save_gpr(ctx, rt, dest);
1548 
1549     return nullify_end(ctx);
1550 }
1551 
1552 static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1553                       unsigned rx, int scale, target_sreg disp,
1554                       unsigned sp, int modify)
1555 {
1556     TCGv_i32 tmp;
1557 
1558     nullify_over(ctx);
1559 
1560     tmp = tcg_temp_new_i32();
1561     do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1562     save_frw_i32(rt, tmp);
1563 
1564     if (rt == 0) {
1565         gen_helper_loaded_fr0(cpu_env);
1566     }
1567 
1568     return nullify_end(ctx);
1569 }
1570 
1571 static bool trans_fldw(DisasContext *ctx, arg_ldst *a)
1572 {
1573     return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1574                      a->disp, a->sp, a->m);
1575 }
1576 
1577 static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1578                       unsigned rx, int scale, target_sreg disp,
1579                       unsigned sp, int modify)
1580 {
1581     TCGv_i64 tmp;
1582 
1583     nullify_over(ctx);
1584 
1585     tmp = tcg_temp_new_i64();
1586     do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1587     save_frd(rt, tmp);
1588 
1589     if (rt == 0) {
1590         gen_helper_loaded_fr0(cpu_env);
1591     }
1592 
1593     return nullify_end(ctx);
1594 }
1595 
1596 static bool trans_fldd(DisasContext *ctx, arg_ldst *a)
1597 {
1598     return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1599                      a->disp, a->sp, a->m);
1600 }
1601 
1602 static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1603                      target_sreg disp, unsigned sp,
1604                      int modify, MemOp mop)
1605 {
1606     nullify_over(ctx);
1607     do_store_reg(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
1608     return nullify_end(ctx);
1609 }
1610 
1611 static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1612                        unsigned rx, int scale, target_sreg disp,
1613                        unsigned sp, int modify)
1614 {
1615     TCGv_i32 tmp;
1616 
1617     nullify_over(ctx);
1618 
1619     tmp = load_frw_i32(rt);
1620     do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1621 
1622     return nullify_end(ctx);
1623 }
1624 
1625 static bool trans_fstw(DisasContext *ctx, arg_ldst *a)
1626 {
1627     return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1628                       a->disp, a->sp, a->m);
1629 }
1630 
1631 static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1632                        unsigned rx, int scale, target_sreg disp,
1633                        unsigned sp, int modify)
1634 {
1635     TCGv_i64 tmp;
1636 
1637     nullify_over(ctx);
1638 
1639     tmp = load_frd(rt);
1640     do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1641 
1642     return nullify_end(ctx);
1643 }
1644 
1645 static bool trans_fstd(DisasContext *ctx, arg_ldst *a)
1646 {
1647     return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1648                       a->disp, a->sp, a->m);
1649 }
1650 
1651 static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
1652                        void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
1653 {
1654     TCGv_i32 tmp;
1655 
1656     nullify_over(ctx);
1657     tmp = load_frw0_i32(ra);
1658 
1659     func(tmp, cpu_env, tmp);
1660 
1661     save_frw_i32(rt, tmp);
1662     return nullify_end(ctx);
1663 }
1664 
1665 static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
1666                        void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
1667 {
1668     TCGv_i32 dst;
1669     TCGv_i64 src;
1670 
1671     nullify_over(ctx);
1672     src = load_frd(ra);
1673     dst = tcg_temp_new_i32();
1674 
1675     func(dst, cpu_env, src);
1676 
1677     save_frw_i32(rt, dst);
1678     return nullify_end(ctx);
1679 }
1680 
1681 static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
1682                        void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
1683 {
1684     TCGv_i64 tmp;
1685 
1686     nullify_over(ctx);
1687     tmp = load_frd0(ra);
1688 
1689     func(tmp, cpu_env, tmp);
1690 
1691     save_frd(rt, tmp);
1692     return nullify_end(ctx);
1693 }
1694 
1695 static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
1696                        void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
1697 {
1698     TCGv_i32 src;
1699     TCGv_i64 dst;
1700 
1701     nullify_over(ctx);
1702     src = load_frw0_i32(ra);
1703     dst = tcg_temp_new_i64();
1704 
1705     func(dst, cpu_env, src);
1706 
1707     save_frd(rt, dst);
1708     return nullify_end(ctx);
1709 }
1710 
1711 static bool do_fop_weww(DisasContext *ctx, unsigned rt,
1712                         unsigned ra, unsigned rb,
1713                         void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
1714 {
1715     TCGv_i32 a, b;
1716 
1717     nullify_over(ctx);
1718     a = load_frw0_i32(ra);
1719     b = load_frw0_i32(rb);
1720 
1721     func(a, cpu_env, a, b);
1722 
1723     save_frw_i32(rt, a);
1724     return nullify_end(ctx);
1725 }
1726 
1727 static bool do_fop_dedd(DisasContext *ctx, unsigned rt,
1728                         unsigned ra, unsigned rb,
1729                         void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
1730 {
1731     TCGv_i64 a, b;
1732 
1733     nullify_over(ctx);
1734     a = load_frd0(ra);
1735     b = load_frd0(rb);
1736 
1737     func(a, cpu_env, a, b);
1738 
1739     save_frd(rt, a);
1740     return nullify_end(ctx);
1741 }
1742 
1743 /* Emit an unconditional branch to a direct target, which may or may not
1744    have already had nullification handled.  */
1745 static bool do_dbranch(DisasContext *ctx, target_ureg dest,
1746                        unsigned link, bool is_n)
1747 {
1748     if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1749         if (link != 0) {
1750             copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1751         }
1752         ctx->iaoq_n = dest;
1753         if (is_n) {
1754             ctx->null_cond.c = TCG_COND_ALWAYS;
1755         }
1756     } else {
1757         nullify_over(ctx);
1758 
1759         if (link != 0) {
1760             copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1761         }
1762 
1763         if (is_n && use_nullify_skip(ctx)) {
1764             nullify_set(ctx, 0);
1765             gen_goto_tb(ctx, 0, dest, dest + 4);
1766         } else {
1767             nullify_set(ctx, is_n);
1768             gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1769         }
1770 
1771         nullify_end(ctx);
1772 
1773         nullify_set(ctx, 0);
1774         gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
1775         ctx->base.is_jmp = DISAS_NORETURN;
1776     }
1777     return true;
1778 }
1779 
1780 /* Emit a conditional branch to a direct target.  If the branch itself
1781    is nullified, we should have already used nullify_over.  */
1782 static bool do_cbranch(DisasContext *ctx, target_sreg disp, bool is_n,
1783                        DisasCond *cond)
1784 {
1785     target_ureg dest = iaoq_dest(ctx, disp);
1786     TCGLabel *taken = NULL;
1787     TCGCond c = cond->c;
1788     bool n;
1789 
1790     assert(ctx->null_cond.c == TCG_COND_NEVER);
1791 
1792     /* Handle TRUE and NEVER as direct branches.  */
1793     if (c == TCG_COND_ALWAYS) {
1794         return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
1795     }
1796     if (c == TCG_COND_NEVER) {
1797         return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
1798     }
1799 
1800     taken = gen_new_label();
1801     tcg_gen_brcond_reg(c, cond->a0, cond->a1, taken);
1802     cond_free(cond);
1803 
1804     /* Not taken: Condition not satisfied; nullify on backward branches. */
1805     n = is_n && disp < 0;
1806     if (n && use_nullify_skip(ctx)) {
1807         nullify_set(ctx, 0);
1808         gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
1809     } else {
1810         if (!n && ctx->null_lab) {
1811             gen_set_label(ctx->null_lab);
1812             ctx->null_lab = NULL;
1813         }
1814         nullify_set(ctx, n);
1815         if (ctx->iaoq_n == -1) {
1816             /* The temporary iaoq_n_var died at the branch above.
1817                Regenerate it here instead of saving it.  */
1818             tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
1819         }
1820         gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
1821     }
1822 
1823     gen_set_label(taken);
1824 
1825     /* Taken: Condition satisfied; nullify on forward branches.  */
1826     n = is_n && disp >= 0;
1827     if (n && use_nullify_skip(ctx)) {
1828         nullify_set(ctx, 0);
1829         gen_goto_tb(ctx, 1, dest, dest + 4);
1830     } else {
1831         nullify_set(ctx, n);
1832         gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
1833     }
1834 
1835     /* Not taken: the branch itself was nullified.  */
1836     if (ctx->null_lab) {
1837         gen_set_label(ctx->null_lab);
1838         ctx->null_lab = NULL;
1839         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
1840     } else {
1841         ctx->base.is_jmp = DISAS_NORETURN;
1842     }
1843     return true;
1844 }
1845 
1846 /* Emit an unconditional branch to an indirect target.  This handles
1847    nullification of the branch itself.  */
1848 static bool do_ibranch(DisasContext *ctx, TCGv_reg dest,
1849                        unsigned link, bool is_n)
1850 {
1851     TCGv_reg a0, a1, next, tmp;
1852     TCGCond c;
1853 
1854     assert(ctx->null_lab == NULL);
1855 
1856     if (ctx->null_cond.c == TCG_COND_NEVER) {
1857         if (link != 0) {
1858             copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1859         }
1860         next = get_temp(ctx);
1861         tcg_gen_mov_reg(next, dest);
1862         if (is_n) {
1863             if (use_nullify_skip(ctx)) {
1864                 tcg_gen_mov_reg(cpu_iaoq_f, next);
1865                 tcg_gen_addi_reg(cpu_iaoq_b, next, 4);
1866                 nullify_set(ctx, 0);
1867                 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1868                 return true;
1869             }
1870             ctx->null_cond.c = TCG_COND_ALWAYS;
1871         }
1872         ctx->iaoq_n = -1;
1873         ctx->iaoq_n_var = next;
1874     } else if (is_n && use_nullify_skip(ctx)) {
1875         /* The (conditional) branch, B, nullifies the next insn, N,
1876            and we're allowed to skip execution N (no single-step or
1877            tracepoint in effect).  Since the goto_ptr that we must use
1878            for the indirect branch consumes no special resources, we
1879            can (conditionally) skip B and continue execution.  */
1880         /* The use_nullify_skip test implies we have a known control path.  */
1881         tcg_debug_assert(ctx->iaoq_b != -1);
1882         tcg_debug_assert(ctx->iaoq_n != -1);
1883 
1884         /* We do have to handle the non-local temporary, DEST, before
1885            branching.  Since IOAQ_F is not really live at this point, we
1886            can simply store DEST optimistically.  Similarly with IAOQ_B.  */
1887         tcg_gen_mov_reg(cpu_iaoq_f, dest);
1888         tcg_gen_addi_reg(cpu_iaoq_b, dest, 4);
1889 
1890         nullify_over(ctx);
1891         if (link != 0) {
1892             tcg_gen_movi_reg(cpu_gr[link], ctx->iaoq_n);
1893         }
1894         tcg_gen_lookup_and_goto_ptr();
1895         return nullify_end(ctx);
1896     } else {
1897         c = ctx->null_cond.c;
1898         a0 = ctx->null_cond.a0;
1899         a1 = ctx->null_cond.a1;
1900 
1901         tmp = tcg_temp_new();
1902         next = get_temp(ctx);
1903 
1904         copy_iaoq_entry(tmp, ctx->iaoq_n, ctx->iaoq_n_var);
1905         tcg_gen_movcond_reg(c, next, a0, a1, tmp, dest);
1906         ctx->iaoq_n = -1;
1907         ctx->iaoq_n_var = next;
1908 
1909         if (link != 0) {
1910             tcg_gen_movcond_reg(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
1911         }
1912 
1913         if (is_n) {
1914             /* The branch nullifies the next insn, which means the state of N
1915                after the branch is the inverse of the state of N that applied
1916                to the branch.  */
1917             tcg_gen_setcond_reg(tcg_invert_cond(c), cpu_psw_n, a0, a1);
1918             cond_free(&ctx->null_cond);
1919             ctx->null_cond = cond_make_n();
1920             ctx->psw_n_nonzero = true;
1921         } else {
1922             cond_free(&ctx->null_cond);
1923         }
1924     }
1925     return true;
1926 }
1927 
1928 /* Implement
1929  *    if (IAOQ_Front{30..31} < GR[b]{30..31})
1930  *      IAOQ_Next{30..31} ← GR[b]{30..31};
1931  *    else
1932  *      IAOQ_Next{30..31} ← IAOQ_Front{30..31};
1933  * which keeps the privilege level from being increased.
1934  */
1935 static TCGv_reg do_ibranch_priv(DisasContext *ctx, TCGv_reg offset)
1936 {
1937     TCGv_reg dest;
1938     switch (ctx->privilege) {
1939     case 0:
1940         /* Privilege 0 is maximum and is allowed to decrease.  */
1941         return offset;
1942     case 3:
1943         /* Privilege 3 is minimum and is never allowed to increase.  */
1944         dest = get_temp(ctx);
1945         tcg_gen_ori_reg(dest, offset, 3);
1946         break;
1947     default:
1948         dest = get_temp(ctx);
1949         tcg_gen_andi_reg(dest, offset, -4);
1950         tcg_gen_ori_reg(dest, dest, ctx->privilege);
1951         tcg_gen_movcond_reg(TCG_COND_GTU, dest, dest, offset, dest, offset);
1952         break;
1953     }
1954     return dest;
1955 }
1956 
1957 #ifdef CONFIG_USER_ONLY
1958 /* On Linux, page zero is normally marked execute only + gateway.
1959    Therefore normal read or write is supposed to fail, but specific
1960    offsets have kernel code mapped to raise permissions to implement
1961    system calls.  Handling this via an explicit check here, rather
1962    in than the "be disp(sr2,r0)" instruction that probably sent us
1963    here, is the easiest way to handle the branch delay slot on the
1964    aforementioned BE.  */
1965 static void do_page_zero(DisasContext *ctx)
1966 {
1967     /* If by some means we get here with PSW[N]=1, that implies that
1968        the B,GATE instruction would be skipped, and we'd fault on the
1969        next insn within the privilaged page.  */
1970     switch (ctx->null_cond.c) {
1971     case TCG_COND_NEVER:
1972         break;
1973     case TCG_COND_ALWAYS:
1974         tcg_gen_movi_reg(cpu_psw_n, 0);
1975         goto do_sigill;
1976     default:
1977         /* Since this is always the first (and only) insn within the
1978            TB, we should know the state of PSW[N] from TB->FLAGS.  */
1979         g_assert_not_reached();
1980     }
1981 
1982     /* Check that we didn't arrive here via some means that allowed
1983        non-sequential instruction execution.  Normally the PSW[B] bit
1984        detects this by disallowing the B,GATE instruction to execute
1985        under such conditions.  */
1986     if (ctx->iaoq_b != ctx->iaoq_f + 4) {
1987         goto do_sigill;
1988     }
1989 
1990     switch (ctx->iaoq_f & -4) {
1991     case 0x00: /* Null pointer call */
1992         gen_excp_1(EXCP_IMP);
1993         ctx->base.is_jmp = DISAS_NORETURN;
1994         break;
1995 
1996     case 0xb0: /* LWS */
1997         gen_excp_1(EXCP_SYSCALL_LWS);
1998         ctx->base.is_jmp = DISAS_NORETURN;
1999         break;
2000 
2001     case 0xe0: /* SET_THREAD_POINTER */
2002         tcg_gen_st_reg(cpu_gr[26], cpu_env, offsetof(CPUHPPAState, cr[27]));
2003         tcg_gen_ori_reg(cpu_iaoq_f, cpu_gr[31], 3);
2004         tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
2005         ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
2006         break;
2007 
2008     case 0x100: /* SYSCALL */
2009         gen_excp_1(EXCP_SYSCALL);
2010         ctx->base.is_jmp = DISAS_NORETURN;
2011         break;
2012 
2013     default:
2014     do_sigill:
2015         gen_excp_1(EXCP_ILL);
2016         ctx->base.is_jmp = DISAS_NORETURN;
2017         break;
2018     }
2019 }
2020 #endif
2021 
2022 static bool trans_nop(DisasContext *ctx, arg_nop *a)
2023 {
2024     cond_free(&ctx->null_cond);
2025     return true;
2026 }
2027 
2028 static bool trans_break(DisasContext *ctx, arg_break *a)
2029 {
2030     return gen_excp_iir(ctx, EXCP_BREAK);
2031 }
2032 
2033 static bool trans_sync(DisasContext *ctx, arg_sync *a)
2034 {
2035     /* No point in nullifying the memory barrier.  */
2036     tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
2037 
2038     cond_free(&ctx->null_cond);
2039     return true;
2040 }
2041 
2042 static bool trans_mfia(DisasContext *ctx, arg_mfia *a)
2043 {
2044     unsigned rt = a->t;
2045     TCGv_reg tmp = dest_gpr(ctx, rt);
2046     tcg_gen_movi_reg(tmp, ctx->iaoq_f);
2047     save_gpr(ctx, rt, tmp);
2048 
2049     cond_free(&ctx->null_cond);
2050     return true;
2051 }
2052 
2053 static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a)
2054 {
2055     unsigned rt = a->t;
2056     unsigned rs = a->sp;
2057     TCGv_i64 t0 = tcg_temp_new_i64();
2058     TCGv_reg t1 = tcg_temp_new();
2059 
2060     load_spr(ctx, t0, rs);
2061     tcg_gen_shri_i64(t0, t0, 32);
2062     tcg_gen_trunc_i64_reg(t1, t0);
2063 
2064     save_gpr(ctx, rt, t1);
2065 
2066     cond_free(&ctx->null_cond);
2067     return true;
2068 }
2069 
2070 static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
2071 {
2072     unsigned rt = a->t;
2073     unsigned ctl = a->r;
2074     TCGv_reg tmp;
2075 
2076     switch (ctl) {
2077     case CR_SAR:
2078 #ifdef TARGET_HPPA64
2079         if (a->e == 0) {
2080             /* MFSAR without ,W masks low 5 bits.  */
2081             tmp = dest_gpr(ctx, rt);
2082             tcg_gen_andi_reg(tmp, cpu_sar, 31);
2083             save_gpr(ctx, rt, tmp);
2084             goto done;
2085         }
2086 #endif
2087         save_gpr(ctx, rt, cpu_sar);
2088         goto done;
2089     case CR_IT: /* Interval Timer */
2090         /* FIXME: Respect PSW_S bit.  */
2091         nullify_over(ctx);
2092         tmp = dest_gpr(ctx, rt);
2093         if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
2094             gen_io_start();
2095             gen_helper_read_interval_timer(tmp);
2096             ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2097         } else {
2098             gen_helper_read_interval_timer(tmp);
2099         }
2100         save_gpr(ctx, rt, tmp);
2101         return nullify_end(ctx);
2102     case 26:
2103     case 27:
2104         break;
2105     default:
2106         /* All other control registers are privileged.  */
2107         CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2108         break;
2109     }
2110 
2111     tmp = get_temp(ctx);
2112     tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2113     save_gpr(ctx, rt, tmp);
2114 
2115  done:
2116     cond_free(&ctx->null_cond);
2117     return true;
2118 }
2119 
2120 static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a)
2121 {
2122     unsigned rr = a->r;
2123     unsigned rs = a->sp;
2124     TCGv_i64 t64;
2125 
2126     if (rs >= 5) {
2127         CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2128     }
2129     nullify_over(ctx);
2130 
2131     t64 = tcg_temp_new_i64();
2132     tcg_gen_extu_reg_i64(t64, load_gpr(ctx, rr));
2133     tcg_gen_shli_i64(t64, t64, 32);
2134 
2135     if (rs >= 4) {
2136         tcg_gen_st_i64(t64, cpu_env, offsetof(CPUHPPAState, sr[rs]));
2137         ctx->tb_flags &= ~TB_FLAG_SR_SAME;
2138     } else {
2139         tcg_gen_mov_i64(cpu_sr[rs], t64);
2140     }
2141 
2142     return nullify_end(ctx);
2143 }
2144 
2145 static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
2146 {
2147     unsigned ctl = a->t;
2148     TCGv_reg reg;
2149     TCGv_reg tmp;
2150 
2151     if (ctl == CR_SAR) {
2152         reg = load_gpr(ctx, a->r);
2153         tmp = tcg_temp_new();
2154         tcg_gen_andi_reg(tmp, reg, TARGET_REGISTER_BITS - 1);
2155         save_or_nullify(ctx, cpu_sar, tmp);
2156 
2157         cond_free(&ctx->null_cond);
2158         return true;
2159     }
2160 
2161     /* All other control registers are privileged or read-only.  */
2162     CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2163 
2164 #ifndef CONFIG_USER_ONLY
2165     nullify_over(ctx);
2166     reg = load_gpr(ctx, a->r);
2167 
2168     switch (ctl) {
2169     case CR_IT:
2170         gen_helper_write_interval_timer(cpu_env, reg);
2171         break;
2172     case CR_EIRR:
2173         gen_helper_write_eirr(cpu_env, reg);
2174         break;
2175     case CR_EIEM:
2176         gen_helper_write_eiem(cpu_env, reg);
2177         ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2178         break;
2179 
2180     case CR_IIASQ:
2181     case CR_IIAOQ:
2182         /* FIXME: Respect PSW_Q bit */
2183         /* The write advances the queue and stores to the back element.  */
2184         tmp = get_temp(ctx);
2185         tcg_gen_ld_reg(tmp, cpu_env,
2186                        offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2187         tcg_gen_st_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2188         tcg_gen_st_reg(reg, cpu_env,
2189                        offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2190         break;
2191 
2192     case CR_PID1:
2193     case CR_PID2:
2194     case CR_PID3:
2195     case CR_PID4:
2196         tcg_gen_st_reg(reg, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2197 #ifndef CONFIG_USER_ONLY
2198         gen_helper_change_prot_id(cpu_env);
2199 #endif
2200         break;
2201 
2202     default:
2203         tcg_gen_st_reg(reg, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2204         break;
2205     }
2206     return nullify_end(ctx);
2207 #endif
2208 }
2209 
2210 static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a)
2211 {
2212     TCGv_reg tmp = tcg_temp_new();
2213 
2214     tcg_gen_not_reg(tmp, load_gpr(ctx, a->r));
2215     tcg_gen_andi_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
2216     save_or_nullify(ctx, cpu_sar, tmp);
2217 
2218     cond_free(&ctx->null_cond);
2219     return true;
2220 }
2221 
2222 static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a)
2223 {
2224     TCGv_reg dest = dest_gpr(ctx, a->t);
2225 
2226 #ifdef CONFIG_USER_ONLY
2227     /* We don't implement space registers in user mode. */
2228     tcg_gen_movi_reg(dest, 0);
2229 #else
2230     TCGv_i64 t0 = tcg_temp_new_i64();
2231 
2232     tcg_gen_mov_i64(t0, space_select(ctx, a->sp, load_gpr(ctx, a->b)));
2233     tcg_gen_shri_i64(t0, t0, 32);
2234     tcg_gen_trunc_i64_reg(dest, t0);
2235 #endif
2236     save_gpr(ctx, a->t, dest);
2237 
2238     cond_free(&ctx->null_cond);
2239     return true;
2240 }
2241 
2242 static bool trans_rsm(DisasContext *ctx, arg_rsm *a)
2243 {
2244     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2245 #ifndef CONFIG_USER_ONLY
2246     TCGv_reg tmp;
2247 
2248     nullify_over(ctx);
2249 
2250     tmp = get_temp(ctx);
2251     tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw));
2252     tcg_gen_andi_reg(tmp, tmp, ~a->i);
2253     gen_helper_swap_system_mask(tmp, cpu_env, tmp);
2254     save_gpr(ctx, a->t, tmp);
2255 
2256     /* Exit the TB to recognize new interrupts, e.g. PSW_M.  */
2257     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2258     return nullify_end(ctx);
2259 #endif
2260 }
2261 
2262 static bool trans_ssm(DisasContext *ctx, arg_ssm *a)
2263 {
2264     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2265 #ifndef CONFIG_USER_ONLY
2266     TCGv_reg tmp;
2267 
2268     nullify_over(ctx);
2269 
2270     tmp = get_temp(ctx);
2271     tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw));
2272     tcg_gen_ori_reg(tmp, tmp, a->i);
2273     gen_helper_swap_system_mask(tmp, cpu_env, tmp);
2274     save_gpr(ctx, a->t, tmp);
2275 
2276     /* Exit the TB to recognize new interrupts, e.g. PSW_I.  */
2277     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2278     return nullify_end(ctx);
2279 #endif
2280 }
2281 
2282 static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a)
2283 {
2284     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2285 #ifndef CONFIG_USER_ONLY
2286     TCGv_reg tmp, reg;
2287     nullify_over(ctx);
2288 
2289     reg = load_gpr(ctx, a->r);
2290     tmp = get_temp(ctx);
2291     gen_helper_swap_system_mask(tmp, cpu_env, reg);
2292 
2293     /* Exit the TB to recognize new interrupts.  */
2294     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2295     return nullify_end(ctx);
2296 #endif
2297 }
2298 
2299 static bool do_rfi(DisasContext *ctx, bool rfi_r)
2300 {
2301     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2302 #ifndef CONFIG_USER_ONLY
2303     nullify_over(ctx);
2304 
2305     if (rfi_r) {
2306         gen_helper_rfi_r(cpu_env);
2307     } else {
2308         gen_helper_rfi(cpu_env);
2309     }
2310     /* Exit the TB to recognize new interrupts.  */
2311     tcg_gen_exit_tb(NULL, 0);
2312     ctx->base.is_jmp = DISAS_NORETURN;
2313 
2314     return nullify_end(ctx);
2315 #endif
2316 }
2317 
2318 static bool trans_rfi(DisasContext *ctx, arg_rfi *a)
2319 {
2320     return do_rfi(ctx, false);
2321 }
2322 
2323 static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a)
2324 {
2325     return do_rfi(ctx, true);
2326 }
2327 
2328 static bool trans_halt(DisasContext *ctx, arg_halt *a)
2329 {
2330     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2331 #ifndef CONFIG_USER_ONLY
2332     nullify_over(ctx);
2333     gen_helper_halt(cpu_env);
2334     ctx->base.is_jmp = DISAS_NORETURN;
2335     return nullify_end(ctx);
2336 #endif
2337 }
2338 
2339 static bool trans_reset(DisasContext *ctx, arg_reset *a)
2340 {
2341     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2342 #ifndef CONFIG_USER_ONLY
2343     nullify_over(ctx);
2344     gen_helper_reset(cpu_env);
2345     ctx->base.is_jmp = DISAS_NORETURN;
2346     return nullify_end(ctx);
2347 #endif
2348 }
2349 
2350 static bool trans_getshadowregs(DisasContext *ctx, arg_getshadowregs *a)
2351 {
2352     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2353 #ifndef CONFIG_USER_ONLY
2354     nullify_over(ctx);
2355     gen_helper_getshadowregs(cpu_env);
2356     return nullify_end(ctx);
2357 #endif
2358 }
2359 
2360 static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a)
2361 {
2362     if (a->m) {
2363         TCGv_reg dest = dest_gpr(ctx, a->b);
2364         TCGv_reg src1 = load_gpr(ctx, a->b);
2365         TCGv_reg src2 = load_gpr(ctx, a->x);
2366 
2367         /* The only thing we need to do is the base register modification.  */
2368         tcg_gen_add_reg(dest, src1, src2);
2369         save_gpr(ctx, a->b, dest);
2370     }
2371     cond_free(&ctx->null_cond);
2372     return true;
2373 }
2374 
2375 static bool trans_probe(DisasContext *ctx, arg_probe *a)
2376 {
2377     TCGv_reg dest, ofs;
2378     TCGv_i32 level, want;
2379     TCGv_tl addr;
2380 
2381     nullify_over(ctx);
2382 
2383     dest = dest_gpr(ctx, a->t);
2384     form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2385 
2386     if (a->imm) {
2387         level = tcg_constant_i32(a->ri);
2388     } else {
2389         level = tcg_temp_new_i32();
2390         tcg_gen_trunc_reg_i32(level, load_gpr(ctx, a->ri));
2391         tcg_gen_andi_i32(level, level, 3);
2392     }
2393     want = tcg_constant_i32(a->write ? PAGE_WRITE : PAGE_READ);
2394 
2395     gen_helper_probe(dest, cpu_env, addr, level, want);
2396 
2397     save_gpr(ctx, a->t, dest);
2398     return nullify_end(ctx);
2399 }
2400 
2401 static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a)
2402 {
2403     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2404 #ifndef CONFIG_USER_ONLY
2405     TCGv_tl addr;
2406     TCGv_reg ofs, reg;
2407 
2408     nullify_over(ctx);
2409 
2410     form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2411     reg = load_gpr(ctx, a->r);
2412     if (a->addr) {
2413         gen_helper_itlba(cpu_env, addr, reg);
2414     } else {
2415         gen_helper_itlbp(cpu_env, addr, reg);
2416     }
2417 
2418     /* Exit TB for TLB change if mmu is enabled.  */
2419     if (ctx->tb_flags & PSW_C) {
2420         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2421     }
2422     return nullify_end(ctx);
2423 #endif
2424 }
2425 
2426 static bool trans_pxtlbx(DisasContext *ctx, arg_pxtlbx *a)
2427 {
2428     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2429 #ifndef CONFIG_USER_ONLY
2430     TCGv_tl addr;
2431     TCGv_reg ofs;
2432 
2433     nullify_over(ctx);
2434 
2435     form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2436     if (a->m) {
2437         save_gpr(ctx, a->b, ofs);
2438     }
2439     if (a->local) {
2440         gen_helper_ptlbe(cpu_env);
2441     } else {
2442         gen_helper_ptlb(cpu_env, addr);
2443     }
2444 
2445     /* Exit TB for TLB change if mmu is enabled.  */
2446     if (ctx->tb_flags & PSW_C) {
2447         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2448     }
2449     return nullify_end(ctx);
2450 #endif
2451 }
2452 
2453 /*
2454  * Implement the pcxl and pcxl2 Fast TLB Insert instructions.
2455  * See
2456  *     https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf
2457  *     page 13-9 (195/206)
2458  */
2459 static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a)
2460 {
2461     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2462 #ifndef CONFIG_USER_ONLY
2463     TCGv_tl addr, atl, stl;
2464     TCGv_reg reg;
2465 
2466     nullify_over(ctx);
2467 
2468     /*
2469      * FIXME:
2470      *  if (not (pcxl or pcxl2))
2471      *    return gen_illegal(ctx);
2472      *
2473      * Note for future: these are 32-bit systems; no hppa64.
2474      */
2475 
2476     atl = tcg_temp_new_tl();
2477     stl = tcg_temp_new_tl();
2478     addr = tcg_temp_new_tl();
2479 
2480     tcg_gen_ld32u_i64(stl, cpu_env,
2481                       a->data ? offsetof(CPUHPPAState, cr[CR_ISR])
2482                       : offsetof(CPUHPPAState, cr[CR_IIASQ]));
2483     tcg_gen_ld32u_i64(atl, cpu_env,
2484                       a->data ? offsetof(CPUHPPAState, cr[CR_IOR])
2485                       : offsetof(CPUHPPAState, cr[CR_IIAOQ]));
2486     tcg_gen_shli_i64(stl, stl, 32);
2487     tcg_gen_or_tl(addr, atl, stl);
2488 
2489     reg = load_gpr(ctx, a->r);
2490     if (a->addr) {
2491         gen_helper_itlba(cpu_env, addr, reg);
2492     } else {
2493         gen_helper_itlbp(cpu_env, addr, reg);
2494     }
2495 
2496     /* Exit TB for TLB change if mmu is enabled.  */
2497     if (ctx->tb_flags & PSW_C) {
2498         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2499     }
2500     return nullify_end(ctx);
2501 #endif
2502 }
2503 
2504 static bool trans_lpa(DisasContext *ctx, arg_ldst *a)
2505 {
2506     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2507 #ifndef CONFIG_USER_ONLY
2508     TCGv_tl vaddr;
2509     TCGv_reg ofs, paddr;
2510 
2511     nullify_over(ctx);
2512 
2513     form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2514 
2515     paddr = tcg_temp_new();
2516     gen_helper_lpa(paddr, cpu_env, vaddr);
2517 
2518     /* Note that physical address result overrides base modification.  */
2519     if (a->m) {
2520         save_gpr(ctx, a->b, ofs);
2521     }
2522     save_gpr(ctx, a->t, paddr);
2523 
2524     return nullify_end(ctx);
2525 #endif
2526 }
2527 
2528 static bool trans_lci(DisasContext *ctx, arg_lci *a)
2529 {
2530     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2531 
2532     /* The Coherence Index is an implementation-defined function of the
2533        physical address.  Two addresses with the same CI have a coherent
2534        view of the cache.  Our implementation is to return 0 for all,
2535        since the entire address space is coherent.  */
2536     save_gpr(ctx, a->t, tcg_constant_reg(0));
2537 
2538     cond_free(&ctx->null_cond);
2539     return true;
2540 }
2541 
2542 static bool trans_add(DisasContext *ctx, arg_rrr_cf_sh *a)
2543 {
2544     return do_add_reg(ctx, a, false, false, false, false);
2545 }
2546 
2547 static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_sh *a)
2548 {
2549     return do_add_reg(ctx, a, true, false, false, false);
2550 }
2551 
2552 static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_sh *a)
2553 {
2554     return do_add_reg(ctx, a, false, true, false, false);
2555 }
2556 
2557 static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_sh *a)
2558 {
2559     return do_add_reg(ctx, a, false, false, false, true);
2560 }
2561 
2562 static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_sh *a)
2563 {
2564     return do_add_reg(ctx, a, false, true, false, true);
2565 }
2566 
2567 static bool trans_sub(DisasContext *ctx, arg_rrr_cf *a)
2568 {
2569     return do_sub_reg(ctx, a, false, false, false);
2570 }
2571 
2572 static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf *a)
2573 {
2574     return do_sub_reg(ctx, a, true, false, false);
2575 }
2576 
2577 static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf *a)
2578 {
2579     return do_sub_reg(ctx, a, false, false, true);
2580 }
2581 
2582 static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf *a)
2583 {
2584     return do_sub_reg(ctx, a, true, false, true);
2585 }
2586 
2587 static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf *a)
2588 {
2589     return do_sub_reg(ctx, a, false, true, false);
2590 }
2591 
2592 static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf *a)
2593 {
2594     return do_sub_reg(ctx, a, true, true, false);
2595 }
2596 
2597 static bool trans_andcm(DisasContext *ctx, arg_rrr_cf *a)
2598 {
2599     return do_log_reg(ctx, a, tcg_gen_andc_reg);
2600 }
2601 
2602 static bool trans_and(DisasContext *ctx, arg_rrr_cf *a)
2603 {
2604     return do_log_reg(ctx, a, tcg_gen_and_reg);
2605 }
2606 
2607 static bool trans_or(DisasContext *ctx, arg_rrr_cf *a)
2608 {
2609     if (a->cf == 0) {
2610         unsigned r2 = a->r2;
2611         unsigned r1 = a->r1;
2612         unsigned rt = a->t;
2613 
2614         if (rt == 0) { /* NOP */
2615             cond_free(&ctx->null_cond);
2616             return true;
2617         }
2618         if (r2 == 0) { /* COPY */
2619             if (r1 == 0) {
2620                 TCGv_reg dest = dest_gpr(ctx, rt);
2621                 tcg_gen_movi_reg(dest, 0);
2622                 save_gpr(ctx, rt, dest);
2623             } else {
2624                 save_gpr(ctx, rt, cpu_gr[r1]);
2625             }
2626             cond_free(&ctx->null_cond);
2627             return true;
2628         }
2629 #ifndef CONFIG_USER_ONLY
2630         /* These are QEMU extensions and are nops in the real architecture:
2631          *
2632          * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2633          * or %r31,%r31,%r31 -- death loop; offline cpu
2634          *                      currently implemented as idle.
2635          */
2636         if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */
2637             /* No need to check for supervisor, as userland can only pause
2638                until the next timer interrupt.  */
2639             nullify_over(ctx);
2640 
2641             /* Advance the instruction queue.  */
2642             copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
2643             copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
2644             nullify_set(ctx, 0);
2645 
2646             /* Tell the qemu main loop to halt until this cpu has work.  */
2647             tcg_gen_st_i32(tcg_constant_i32(1), cpu_env,
2648                            offsetof(CPUState, halted) - offsetof(HPPACPU, env));
2649             gen_excp_1(EXCP_HALTED);
2650             ctx->base.is_jmp = DISAS_NORETURN;
2651 
2652             return nullify_end(ctx);
2653         }
2654 #endif
2655     }
2656     return do_log_reg(ctx, a, tcg_gen_or_reg);
2657 }
2658 
2659 static bool trans_xor(DisasContext *ctx, arg_rrr_cf *a)
2660 {
2661     return do_log_reg(ctx, a, tcg_gen_xor_reg);
2662 }
2663 
2664 static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf *a)
2665 {
2666     TCGv_reg tcg_r1, tcg_r2;
2667 
2668     if (a->cf) {
2669         nullify_over(ctx);
2670     }
2671     tcg_r1 = load_gpr(ctx, a->r1);
2672     tcg_r2 = load_gpr(ctx, a->r2);
2673     do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf);
2674     return nullify_end(ctx);
2675 }
2676 
2677 static bool trans_uxor(DisasContext *ctx, arg_rrr_cf *a)
2678 {
2679     TCGv_reg tcg_r1, tcg_r2;
2680 
2681     if (a->cf) {
2682         nullify_over(ctx);
2683     }
2684     tcg_r1 = load_gpr(ctx, a->r1);
2685     tcg_r2 = load_gpr(ctx, a->r2);
2686     do_unit(ctx, a->t, tcg_r1, tcg_r2, a->cf, false, tcg_gen_xor_reg);
2687     return nullify_end(ctx);
2688 }
2689 
2690 static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf *a, bool is_tc)
2691 {
2692     TCGv_reg tcg_r1, tcg_r2, tmp;
2693 
2694     if (a->cf) {
2695         nullify_over(ctx);
2696     }
2697     tcg_r1 = load_gpr(ctx, a->r1);
2698     tcg_r2 = load_gpr(ctx, a->r2);
2699     tmp = get_temp(ctx);
2700     tcg_gen_not_reg(tmp, tcg_r2);
2701     do_unit(ctx, a->t, tcg_r1, tmp, a->cf, is_tc, tcg_gen_add_reg);
2702     return nullify_end(ctx);
2703 }
2704 
2705 static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf *a)
2706 {
2707     return do_uaddcm(ctx, a, false);
2708 }
2709 
2710 static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf *a)
2711 {
2712     return do_uaddcm(ctx, a, true);
2713 }
2714 
2715 static bool do_dcor(DisasContext *ctx, arg_rr_cf *a, bool is_i)
2716 {
2717     TCGv_reg tmp;
2718 
2719     nullify_over(ctx);
2720 
2721     tmp = get_temp(ctx);
2722     tcg_gen_shri_reg(tmp, cpu_psw_cb, 3);
2723     if (!is_i) {
2724         tcg_gen_not_reg(tmp, tmp);
2725     }
2726     tcg_gen_andi_reg(tmp, tmp, 0x11111111);
2727     tcg_gen_muli_reg(tmp, tmp, 6);
2728     do_unit(ctx, a->t, load_gpr(ctx, a->r), tmp, a->cf, false,
2729             is_i ? tcg_gen_add_reg : tcg_gen_sub_reg);
2730     return nullify_end(ctx);
2731 }
2732 
2733 static bool trans_dcor(DisasContext *ctx, arg_rr_cf *a)
2734 {
2735     return do_dcor(ctx, a, false);
2736 }
2737 
2738 static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf *a)
2739 {
2740     return do_dcor(ctx, a, true);
2741 }
2742 
2743 static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a)
2744 {
2745     TCGv_reg dest, add1, add2, addc, zero, in1, in2;
2746 
2747     nullify_over(ctx);
2748 
2749     in1 = load_gpr(ctx, a->r1);
2750     in2 = load_gpr(ctx, a->r2);
2751 
2752     add1 = tcg_temp_new();
2753     add2 = tcg_temp_new();
2754     addc = tcg_temp_new();
2755     dest = tcg_temp_new();
2756     zero = tcg_constant_reg(0);
2757 
2758     /* Form R1 << 1 | PSW[CB]{8}.  */
2759     tcg_gen_add_reg(add1, in1, in1);
2760     tcg_gen_add_reg(add1, add1, cpu_psw_cb_msb);
2761 
2762     /* Add or subtract R2, depending on PSW[V].  Proper computation of
2763        carry{8} requires that we subtract via + ~R2 + 1, as described in
2764        the manual.  By extracting and masking V, we can produce the
2765        proper inputs to the addition without movcond.  */
2766     tcg_gen_sari_reg(addc, cpu_psw_v, TARGET_REGISTER_BITS - 1);
2767     tcg_gen_xor_reg(add2, in2, addc);
2768     tcg_gen_andi_reg(addc, addc, 1);
2769     /* ??? This is only correct for 32-bit.  */
2770     tcg_gen_add2_i32(dest, cpu_psw_cb_msb, add1, zero, add2, zero);
2771     tcg_gen_add2_i32(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero);
2772 
2773     /* Write back the result register.  */
2774     save_gpr(ctx, a->t, dest);
2775 
2776     /* Write back PSW[CB].  */
2777     tcg_gen_xor_reg(cpu_psw_cb, add1, add2);
2778     tcg_gen_xor_reg(cpu_psw_cb, cpu_psw_cb, dest);
2779 
2780     /* Write back PSW[V] for the division step.  */
2781     tcg_gen_neg_reg(cpu_psw_v, cpu_psw_cb_msb);
2782     tcg_gen_xor_reg(cpu_psw_v, cpu_psw_v, in2);
2783 
2784     /* Install the new nullification.  */
2785     if (a->cf) {
2786         TCGv_reg sv = NULL;
2787         if (cond_need_sv(a->cf >> 1)) {
2788             /* ??? The lshift is supposed to contribute to overflow.  */
2789             sv = do_add_sv(ctx, dest, add1, add2);
2790         }
2791         ctx->null_cond = do_cond(a->cf, dest, cpu_psw_cb_msb, sv);
2792     }
2793 
2794     return nullify_end(ctx);
2795 }
2796 
2797 static bool trans_addi(DisasContext *ctx, arg_rri_cf *a)
2798 {
2799     return do_add_imm(ctx, a, false, false);
2800 }
2801 
2802 static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a)
2803 {
2804     return do_add_imm(ctx, a, true, false);
2805 }
2806 
2807 static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a)
2808 {
2809     return do_add_imm(ctx, a, false, true);
2810 }
2811 
2812 static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a)
2813 {
2814     return do_add_imm(ctx, a, true, true);
2815 }
2816 
2817 static bool trans_subi(DisasContext *ctx, arg_rri_cf *a)
2818 {
2819     return do_sub_imm(ctx, a, false);
2820 }
2821 
2822 static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a)
2823 {
2824     return do_sub_imm(ctx, a, true);
2825 }
2826 
2827 static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf *a)
2828 {
2829     TCGv_reg tcg_im, tcg_r2;
2830 
2831     if (a->cf) {
2832         nullify_over(ctx);
2833     }
2834 
2835     tcg_im = load_const(ctx, a->i);
2836     tcg_r2 = load_gpr(ctx, a->r);
2837     do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf);
2838 
2839     return nullify_end(ctx);
2840 }
2841 
2842 static bool trans_ld(DisasContext *ctx, arg_ldst *a)
2843 {
2844     if (unlikely(TARGET_REGISTER_BITS == 32 && a->size > MO_32)) {
2845         return gen_illegal(ctx);
2846     } else {
2847         return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0,
2848                    a->disp, a->sp, a->m, a->size | MO_TE);
2849     }
2850 }
2851 
2852 static bool trans_st(DisasContext *ctx, arg_ldst *a)
2853 {
2854     assert(a->x == 0 && a->scale == 0);
2855     if (unlikely(TARGET_REGISTER_BITS == 32 && a->size > MO_32)) {
2856         return gen_illegal(ctx);
2857     } else {
2858         return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE);
2859     }
2860 }
2861 
2862 static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
2863 {
2864     MemOp mop = MO_TE | MO_ALIGN | a->size;
2865     TCGv_reg zero, dest, ofs;
2866     TCGv_tl addr;
2867 
2868     nullify_over(ctx);
2869 
2870     if (a->m) {
2871         /* Base register modification.  Make sure if RT == RB,
2872            we see the result of the load.  */
2873         dest = get_temp(ctx);
2874     } else {
2875         dest = dest_gpr(ctx, a->t);
2876     }
2877 
2878     form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? a->size : 0,
2879              a->disp, a->sp, a->m, ctx->mmu_idx == MMU_PHYS_IDX);
2880 
2881     /*
2882      * For hppa1.1, LDCW is undefined unless aligned mod 16.
2883      * However actual hardware succeeds with aligned mod 4.
2884      * Detect this case and log a GUEST_ERROR.
2885      *
2886      * TODO: HPPA64 relaxes the over-alignment requirement
2887      * with the ,co completer.
2888      */
2889     gen_helper_ldc_check(addr);
2890 
2891     zero = tcg_constant_reg(0);
2892     tcg_gen_atomic_xchg_reg(dest, addr, zero, ctx->mmu_idx, mop);
2893 
2894     if (a->m) {
2895         save_gpr(ctx, a->b, ofs);
2896     }
2897     save_gpr(ctx, a->t, dest);
2898 
2899     return nullify_end(ctx);
2900 }
2901 
2902 static bool trans_stby(DisasContext *ctx, arg_stby *a)
2903 {
2904     TCGv_reg ofs, val;
2905     TCGv_tl addr;
2906 
2907     nullify_over(ctx);
2908 
2909     form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
2910              ctx->mmu_idx == MMU_PHYS_IDX);
2911     val = load_gpr(ctx, a->r);
2912     if (a->a) {
2913         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2914             gen_helper_stby_e_parallel(cpu_env, addr, val);
2915         } else {
2916             gen_helper_stby_e(cpu_env, addr, val);
2917         }
2918     } else {
2919         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2920             gen_helper_stby_b_parallel(cpu_env, addr, val);
2921         } else {
2922             gen_helper_stby_b(cpu_env, addr, val);
2923         }
2924     }
2925     if (a->m) {
2926         tcg_gen_andi_reg(ofs, ofs, ~3);
2927         save_gpr(ctx, a->b, ofs);
2928     }
2929 
2930     return nullify_end(ctx);
2931 }
2932 
2933 static bool trans_lda(DisasContext *ctx, arg_ldst *a)
2934 {
2935     int hold_mmu_idx = ctx->mmu_idx;
2936 
2937     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2938     ctx->mmu_idx = MMU_PHYS_IDX;
2939     trans_ld(ctx, a);
2940     ctx->mmu_idx = hold_mmu_idx;
2941     return true;
2942 }
2943 
2944 static bool trans_sta(DisasContext *ctx, arg_ldst *a)
2945 {
2946     int hold_mmu_idx = ctx->mmu_idx;
2947 
2948     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2949     ctx->mmu_idx = MMU_PHYS_IDX;
2950     trans_st(ctx, a);
2951     ctx->mmu_idx = hold_mmu_idx;
2952     return true;
2953 }
2954 
2955 static bool trans_ldil(DisasContext *ctx, arg_ldil *a)
2956 {
2957     TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
2958 
2959     tcg_gen_movi_reg(tcg_rt, a->i);
2960     save_gpr(ctx, a->t, tcg_rt);
2961     cond_free(&ctx->null_cond);
2962     return true;
2963 }
2964 
2965 static bool trans_addil(DisasContext *ctx, arg_addil *a)
2966 {
2967     TCGv_reg tcg_rt = load_gpr(ctx, a->r);
2968     TCGv_reg tcg_r1 = dest_gpr(ctx, 1);
2969 
2970     tcg_gen_addi_reg(tcg_r1, tcg_rt, a->i);
2971     save_gpr(ctx, 1, tcg_r1);
2972     cond_free(&ctx->null_cond);
2973     return true;
2974 }
2975 
2976 static bool trans_ldo(DisasContext *ctx, arg_ldo *a)
2977 {
2978     TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
2979 
2980     /* Special case rb == 0, for the LDI pseudo-op.
2981        The COPY pseudo-op is handled for free within tcg_gen_addi_tl.  */
2982     if (a->b == 0) {
2983         tcg_gen_movi_reg(tcg_rt, a->i);
2984     } else {
2985         tcg_gen_addi_reg(tcg_rt, cpu_gr[a->b], a->i);
2986     }
2987     save_gpr(ctx, a->t, tcg_rt);
2988     cond_free(&ctx->null_cond);
2989     return true;
2990 }
2991 
2992 static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_reg in1,
2993                     unsigned c, unsigned f, unsigned n, int disp)
2994 {
2995     TCGv_reg dest, in2, sv;
2996     DisasCond cond;
2997 
2998     in2 = load_gpr(ctx, r);
2999     dest = get_temp(ctx);
3000 
3001     tcg_gen_sub_reg(dest, in1, in2);
3002 
3003     sv = NULL;
3004     if (cond_need_sv(c)) {
3005         sv = do_sub_sv(ctx, dest, in1, in2);
3006     }
3007 
3008     cond = do_sub_cond(c * 2 + f, dest, in1, in2, sv);
3009     return do_cbranch(ctx, disp, n, &cond);
3010 }
3011 
3012 static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a)
3013 {
3014     nullify_over(ctx);
3015     return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3016 }
3017 
3018 static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a)
3019 {
3020     nullify_over(ctx);
3021     return do_cmpb(ctx, a->r, load_const(ctx, a->i), a->c, a->f, a->n, a->disp);
3022 }
3023 
3024 static bool do_addb(DisasContext *ctx, unsigned r, TCGv_reg in1,
3025                     unsigned c, unsigned f, unsigned n, int disp)
3026 {
3027     TCGv_reg dest, in2, sv, cb_msb;
3028     DisasCond cond;
3029 
3030     in2 = load_gpr(ctx, r);
3031     dest = tcg_temp_new();
3032     sv = NULL;
3033     cb_msb = NULL;
3034 
3035     if (cond_need_cb(c)) {
3036         cb_msb = get_temp(ctx);
3037         tcg_gen_movi_reg(cb_msb, 0);
3038         tcg_gen_add2_reg(dest, cb_msb, in1, cb_msb, in2, cb_msb);
3039     } else {
3040         tcg_gen_add_reg(dest, in1, in2);
3041     }
3042     if (cond_need_sv(c)) {
3043         sv = do_add_sv(ctx, dest, in1, in2);
3044     }
3045 
3046     cond = do_cond(c * 2 + f, dest, cb_msb, sv);
3047     save_gpr(ctx, r, dest);
3048     return do_cbranch(ctx, disp, n, &cond);
3049 }
3050 
3051 static bool trans_addb(DisasContext *ctx, arg_addb *a)
3052 {
3053     nullify_over(ctx);
3054     return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3055 }
3056 
3057 static bool trans_addbi(DisasContext *ctx, arg_addbi *a)
3058 {
3059     nullify_over(ctx);
3060     return do_addb(ctx, a->r, load_const(ctx, a->i), a->c, a->f, a->n, a->disp);
3061 }
3062 
3063 static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a)
3064 {
3065     TCGv_reg tmp, tcg_r;
3066     DisasCond cond;
3067 
3068     nullify_over(ctx);
3069 
3070     tmp = tcg_temp_new();
3071     tcg_r = load_gpr(ctx, a->r);
3072     tcg_gen_shl_reg(tmp, tcg_r, cpu_sar);
3073 
3074     cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3075     return do_cbranch(ctx, a->disp, a->n, &cond);
3076 }
3077 
3078 static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a)
3079 {
3080     TCGv_reg tmp, tcg_r;
3081     DisasCond cond;
3082 
3083     nullify_over(ctx);
3084 
3085     tmp = tcg_temp_new();
3086     tcg_r = load_gpr(ctx, a->r);
3087     tcg_gen_shli_reg(tmp, tcg_r, a->p);
3088 
3089     cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3090     return do_cbranch(ctx, a->disp, a->n, &cond);
3091 }
3092 
3093 static bool trans_movb(DisasContext *ctx, arg_movb *a)
3094 {
3095     TCGv_reg dest;
3096     DisasCond cond;
3097 
3098     nullify_over(ctx);
3099 
3100     dest = dest_gpr(ctx, a->r2);
3101     if (a->r1 == 0) {
3102         tcg_gen_movi_reg(dest, 0);
3103     } else {
3104         tcg_gen_mov_reg(dest, cpu_gr[a->r1]);
3105     }
3106 
3107     cond = do_sed_cond(a->c, dest);
3108     return do_cbranch(ctx, a->disp, a->n, &cond);
3109 }
3110 
3111 static bool trans_movbi(DisasContext *ctx, arg_movbi *a)
3112 {
3113     TCGv_reg dest;
3114     DisasCond cond;
3115 
3116     nullify_over(ctx);
3117 
3118     dest = dest_gpr(ctx, a->r);
3119     tcg_gen_movi_reg(dest, a->i);
3120 
3121     cond = do_sed_cond(a->c, dest);
3122     return do_cbranch(ctx, a->disp, a->n, &cond);
3123 }
3124 
3125 static bool trans_shrpw_sar(DisasContext *ctx, arg_shrpw_sar *a)
3126 {
3127     TCGv_reg dest;
3128 
3129     if (a->c) {
3130         nullify_over(ctx);
3131     }
3132 
3133     dest = dest_gpr(ctx, a->t);
3134     if (a->r1 == 0) {
3135         tcg_gen_ext32u_reg(dest, load_gpr(ctx, a->r2));
3136         tcg_gen_shr_reg(dest, dest, cpu_sar);
3137     } else if (a->r1 == a->r2) {
3138         TCGv_i32 t32 = tcg_temp_new_i32();
3139         tcg_gen_trunc_reg_i32(t32, load_gpr(ctx, a->r2));
3140         tcg_gen_rotr_i32(t32, t32, cpu_sar);
3141         tcg_gen_extu_i32_reg(dest, t32);
3142     } else {
3143         TCGv_i64 t = tcg_temp_new_i64();
3144         TCGv_i64 s = tcg_temp_new_i64();
3145 
3146         tcg_gen_concat_reg_i64(t, load_gpr(ctx, a->r2), load_gpr(ctx, a->r1));
3147         tcg_gen_extu_reg_i64(s, cpu_sar);
3148         tcg_gen_shr_i64(t, t, s);
3149         tcg_gen_trunc_i64_reg(dest, t);
3150     }
3151     save_gpr(ctx, a->t, dest);
3152 
3153     /* Install the new nullification.  */
3154     cond_free(&ctx->null_cond);
3155     if (a->c) {
3156         ctx->null_cond = do_sed_cond(a->c, dest);
3157     }
3158     return nullify_end(ctx);
3159 }
3160 
3161 static bool trans_shrpw_imm(DisasContext *ctx, arg_shrpw_imm *a)
3162 {
3163     unsigned sa = 31 - a->cpos;
3164     TCGv_reg dest, t2;
3165 
3166     if (a->c) {
3167         nullify_over(ctx);
3168     }
3169 
3170     dest = dest_gpr(ctx, a->t);
3171     t2 = load_gpr(ctx, a->r2);
3172     if (a->r1 == 0) {
3173         tcg_gen_extract_reg(dest, t2, sa, 32 - sa);
3174     } else if (TARGET_REGISTER_BITS == 32) {
3175         tcg_gen_extract2_reg(dest, t2, cpu_gr[a->r1], sa);
3176     } else if (a->r1 == a->r2) {
3177         TCGv_i32 t32 = tcg_temp_new_i32();
3178         tcg_gen_trunc_reg_i32(t32, t2);
3179         tcg_gen_rotri_i32(t32, t32, sa);
3180         tcg_gen_extu_i32_reg(dest, t32);
3181     } else {
3182         TCGv_i64 t64 = tcg_temp_new_i64();
3183         tcg_gen_concat_reg_i64(t64, t2, cpu_gr[a->r1]);
3184         tcg_gen_shri_i64(t64, t64, sa);
3185         tcg_gen_trunc_i64_reg(dest, t64);
3186     }
3187     save_gpr(ctx, a->t, dest);
3188 
3189     /* Install the new nullification.  */
3190     cond_free(&ctx->null_cond);
3191     if (a->c) {
3192         ctx->null_cond = do_sed_cond(a->c, dest);
3193     }
3194     return nullify_end(ctx);
3195 }
3196 
3197 static bool trans_extrw_sar(DisasContext *ctx, arg_extrw_sar *a)
3198 {
3199     unsigned len = 32 - a->clen;
3200     TCGv_reg dest, src, tmp;
3201 
3202     if (a->c) {
3203         nullify_over(ctx);
3204     }
3205 
3206     dest = dest_gpr(ctx, a->t);
3207     src = load_gpr(ctx, a->r);
3208     tmp = tcg_temp_new();
3209 
3210     /* Recall that SAR is using big-endian bit numbering.  */
3211     tcg_gen_xori_reg(tmp, cpu_sar, TARGET_REGISTER_BITS - 1);
3212     if (a->se) {
3213         tcg_gen_sar_reg(dest, src, tmp);
3214         tcg_gen_sextract_reg(dest, dest, 0, len);
3215     } else {
3216         tcg_gen_shr_reg(dest, src, tmp);
3217         tcg_gen_extract_reg(dest, dest, 0, len);
3218     }
3219     save_gpr(ctx, a->t, dest);
3220 
3221     /* Install the new nullification.  */
3222     cond_free(&ctx->null_cond);
3223     if (a->c) {
3224         ctx->null_cond = do_sed_cond(a->c, dest);
3225     }
3226     return nullify_end(ctx);
3227 }
3228 
3229 static bool trans_extrw_imm(DisasContext *ctx, arg_extrw_imm *a)
3230 {
3231     unsigned len = 32 - a->clen;
3232     unsigned cpos = 31 - a->pos;
3233     TCGv_reg dest, src;
3234 
3235     if (a->c) {
3236         nullify_over(ctx);
3237     }
3238 
3239     dest = dest_gpr(ctx, a->t);
3240     src = load_gpr(ctx, a->r);
3241     if (a->se) {
3242         tcg_gen_sextract_reg(dest, src, cpos, len);
3243     } else {
3244         tcg_gen_extract_reg(dest, src, cpos, len);
3245     }
3246     save_gpr(ctx, a->t, dest);
3247 
3248     /* Install the new nullification.  */
3249     cond_free(&ctx->null_cond);
3250     if (a->c) {
3251         ctx->null_cond = do_sed_cond(a->c, dest);
3252     }
3253     return nullify_end(ctx);
3254 }
3255 
3256 static bool trans_depwi_imm(DisasContext *ctx, arg_depwi_imm *a)
3257 {
3258     unsigned len = 32 - a->clen;
3259     target_sreg mask0, mask1;
3260     TCGv_reg dest;
3261 
3262     if (a->c) {
3263         nullify_over(ctx);
3264     }
3265     if (a->cpos + len > 32) {
3266         len = 32 - a->cpos;
3267     }
3268 
3269     dest = dest_gpr(ctx, a->t);
3270     mask0 = deposit64(0, a->cpos, len, a->i);
3271     mask1 = deposit64(-1, a->cpos, len, a->i);
3272 
3273     if (a->nz) {
3274         TCGv_reg src = load_gpr(ctx, a->t);
3275         if (mask1 != -1) {
3276             tcg_gen_andi_reg(dest, src, mask1);
3277             src = dest;
3278         }
3279         tcg_gen_ori_reg(dest, src, mask0);
3280     } else {
3281         tcg_gen_movi_reg(dest, mask0);
3282     }
3283     save_gpr(ctx, a->t, dest);
3284 
3285     /* Install the new nullification.  */
3286     cond_free(&ctx->null_cond);
3287     if (a->c) {
3288         ctx->null_cond = do_sed_cond(a->c, dest);
3289     }
3290     return nullify_end(ctx);
3291 }
3292 
3293 static bool trans_depw_imm(DisasContext *ctx, arg_depw_imm *a)
3294 {
3295     unsigned rs = a->nz ? a->t : 0;
3296     unsigned len = 32 - a->clen;
3297     TCGv_reg dest, val;
3298 
3299     if (a->c) {
3300         nullify_over(ctx);
3301     }
3302     if (a->cpos + len > 32) {
3303         len = 32 - a->cpos;
3304     }
3305 
3306     dest = dest_gpr(ctx, a->t);
3307     val = load_gpr(ctx, a->r);
3308     if (rs == 0) {
3309         tcg_gen_deposit_z_reg(dest, val, a->cpos, len);
3310     } else {
3311         tcg_gen_deposit_reg(dest, cpu_gr[rs], val, a->cpos, len);
3312     }
3313     save_gpr(ctx, a->t, dest);
3314 
3315     /* Install the new nullification.  */
3316     cond_free(&ctx->null_cond);
3317     if (a->c) {
3318         ctx->null_cond = do_sed_cond(a->c, dest);
3319     }
3320     return nullify_end(ctx);
3321 }
3322 
3323 static bool do_depw_sar(DisasContext *ctx, unsigned rt, unsigned c,
3324                         unsigned nz, unsigned clen, TCGv_reg val)
3325 {
3326     unsigned rs = nz ? rt : 0;
3327     unsigned len = 32 - clen;
3328     TCGv_reg mask, tmp, shift, dest;
3329     unsigned msb = 1U << (len - 1);
3330 
3331     dest = dest_gpr(ctx, rt);
3332     shift = tcg_temp_new();
3333     tmp = tcg_temp_new();
3334 
3335     /* Convert big-endian bit numbering in SAR to left-shift.  */
3336     tcg_gen_xori_reg(shift, cpu_sar, TARGET_REGISTER_BITS - 1);
3337 
3338     mask = tcg_temp_new();
3339     tcg_gen_movi_reg(mask, msb + (msb - 1));
3340     tcg_gen_and_reg(tmp, val, mask);
3341     if (rs) {
3342         tcg_gen_shl_reg(mask, mask, shift);
3343         tcg_gen_shl_reg(tmp, tmp, shift);
3344         tcg_gen_andc_reg(dest, cpu_gr[rs], mask);
3345         tcg_gen_or_reg(dest, dest, tmp);
3346     } else {
3347         tcg_gen_shl_reg(dest, tmp, shift);
3348     }
3349     save_gpr(ctx, rt, dest);
3350 
3351     /* Install the new nullification.  */
3352     cond_free(&ctx->null_cond);
3353     if (c) {
3354         ctx->null_cond = do_sed_cond(c, dest);
3355     }
3356     return nullify_end(ctx);
3357 }
3358 
3359 static bool trans_depw_sar(DisasContext *ctx, arg_depw_sar *a)
3360 {
3361     if (a->c) {
3362         nullify_over(ctx);
3363     }
3364     return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_gpr(ctx, a->r));
3365 }
3366 
3367 static bool trans_depwi_sar(DisasContext *ctx, arg_depwi_sar *a)
3368 {
3369     if (a->c) {
3370         nullify_over(ctx);
3371     }
3372     return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_const(ctx, a->i));
3373 }
3374 
3375 static bool trans_be(DisasContext *ctx, arg_be *a)
3376 {
3377     TCGv_reg tmp;
3378 
3379 #ifdef CONFIG_USER_ONLY
3380     /* ??? It seems like there should be a good way of using
3381        "be disp(sr2, r0)", the canonical gateway entry mechanism
3382        to our advantage.  But that appears to be inconvenient to
3383        manage along side branch delay slots.  Therefore we handle
3384        entry into the gateway page via absolute address.  */
3385     /* Since we don't implement spaces, just branch.  Do notice the special
3386        case of "be disp(*,r0)" using a direct branch to disp, so that we can
3387        goto_tb to the TB containing the syscall.  */
3388     if (a->b == 0) {
3389         return do_dbranch(ctx, a->disp, a->l, a->n);
3390     }
3391 #else
3392     nullify_over(ctx);
3393 #endif
3394 
3395     tmp = get_temp(ctx);
3396     tcg_gen_addi_reg(tmp, load_gpr(ctx, a->b), a->disp);
3397     tmp = do_ibranch_priv(ctx, tmp);
3398 
3399 #ifdef CONFIG_USER_ONLY
3400     return do_ibranch(ctx, tmp, a->l, a->n);
3401 #else
3402     TCGv_i64 new_spc = tcg_temp_new_i64();
3403 
3404     load_spr(ctx, new_spc, a->sp);
3405     if (a->l) {
3406         copy_iaoq_entry(cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
3407         tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f);
3408     }
3409     if (a->n && use_nullify_skip(ctx)) {
3410         tcg_gen_mov_reg(cpu_iaoq_f, tmp);
3411         tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
3412         tcg_gen_mov_i64(cpu_iasq_f, new_spc);
3413         tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f);
3414     } else {
3415         copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3416         if (ctx->iaoq_b == -1) {
3417             tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3418         }
3419         tcg_gen_mov_reg(cpu_iaoq_b, tmp);
3420         tcg_gen_mov_i64(cpu_iasq_b, new_spc);
3421         nullify_set(ctx, a->n);
3422     }
3423     tcg_gen_lookup_and_goto_ptr();
3424     ctx->base.is_jmp = DISAS_NORETURN;
3425     return nullify_end(ctx);
3426 #endif
3427 }
3428 
3429 static bool trans_bl(DisasContext *ctx, arg_bl *a)
3430 {
3431     return do_dbranch(ctx, iaoq_dest(ctx, a->disp), a->l, a->n);
3432 }
3433 
3434 static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a)
3435 {
3436     target_ureg dest = iaoq_dest(ctx, a->disp);
3437 
3438     nullify_over(ctx);
3439 
3440     /* Make sure the caller hasn't done something weird with the queue.
3441      * ??? This is not quite the same as the PSW[B] bit, which would be
3442      * expensive to track.  Real hardware will trap for
3443      *    b  gateway
3444      *    b  gateway+4  (in delay slot of first branch)
3445      * However, checking for a non-sequential instruction queue *will*
3446      * diagnose the security hole
3447      *    b  gateway
3448      *    b  evil
3449      * in which instructions at evil would run with increased privs.
3450      */
3451     if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) {
3452         return gen_illegal(ctx);
3453     }
3454 
3455 #ifndef CONFIG_USER_ONLY
3456     if (ctx->tb_flags & PSW_C) {
3457         CPUHPPAState *env = ctx->cs->env_ptr;
3458         int type = hppa_artype_for_page(env, ctx->base.pc_next);
3459         /* If we could not find a TLB entry, then we need to generate an
3460            ITLB miss exception so the kernel will provide it.
3461            The resulting TLB fill operation will invalidate this TB and
3462            we will re-translate, at which point we *will* be able to find
3463            the TLB entry and determine if this is in fact a gateway page.  */
3464         if (type < 0) {
3465             gen_excp(ctx, EXCP_ITLB_MISS);
3466             return true;
3467         }
3468         /* No change for non-gateway pages or for priv decrease.  */
3469         if (type >= 4 && type - 4 < ctx->privilege) {
3470             dest = deposit32(dest, 0, 2, type - 4);
3471         }
3472     } else {
3473         dest &= -4;  /* priv = 0 */
3474     }
3475 #endif
3476 
3477     if (a->l) {
3478         TCGv_reg tmp = dest_gpr(ctx, a->l);
3479         if (ctx->privilege < 3) {
3480             tcg_gen_andi_reg(tmp, tmp, -4);
3481         }
3482         tcg_gen_ori_reg(tmp, tmp, ctx->privilege);
3483         save_gpr(ctx, a->l, tmp);
3484     }
3485 
3486     return do_dbranch(ctx, dest, 0, a->n);
3487 }
3488 
3489 static bool trans_blr(DisasContext *ctx, arg_blr *a)
3490 {
3491     if (a->x) {
3492         TCGv_reg tmp = get_temp(ctx);
3493         tcg_gen_shli_reg(tmp, load_gpr(ctx, a->x), 3);
3494         tcg_gen_addi_reg(tmp, tmp, ctx->iaoq_f + 8);
3495         /* The computation here never changes privilege level.  */
3496         return do_ibranch(ctx, tmp, a->l, a->n);
3497     } else {
3498         /* BLR R0,RX is a good way to load PC+8 into RX.  */
3499         return do_dbranch(ctx, ctx->iaoq_f + 8, a->l, a->n);
3500     }
3501 }
3502 
3503 static bool trans_bv(DisasContext *ctx, arg_bv *a)
3504 {
3505     TCGv_reg dest;
3506 
3507     if (a->x == 0) {
3508         dest = load_gpr(ctx, a->b);
3509     } else {
3510         dest = get_temp(ctx);
3511         tcg_gen_shli_reg(dest, load_gpr(ctx, a->x), 3);
3512         tcg_gen_add_reg(dest, dest, load_gpr(ctx, a->b));
3513     }
3514     dest = do_ibranch_priv(ctx, dest);
3515     return do_ibranch(ctx, dest, 0, a->n);
3516 }
3517 
3518 static bool trans_bve(DisasContext *ctx, arg_bve *a)
3519 {
3520     TCGv_reg dest;
3521 
3522 #ifdef CONFIG_USER_ONLY
3523     dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3524     return do_ibranch(ctx, dest, a->l, a->n);
3525 #else
3526     nullify_over(ctx);
3527     dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3528 
3529     copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3530     if (ctx->iaoq_b == -1) {
3531         tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3532     }
3533     copy_iaoq_entry(cpu_iaoq_b, -1, dest);
3534     tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest));
3535     if (a->l) {
3536         copy_iaoq_entry(cpu_gr[a->l], ctx->iaoq_n, ctx->iaoq_n_var);
3537     }
3538     nullify_set(ctx, a->n);
3539     tcg_gen_lookup_and_goto_ptr();
3540     ctx->base.is_jmp = DISAS_NORETURN;
3541     return nullify_end(ctx);
3542 #endif
3543 }
3544 
3545 /*
3546  * Float class 0
3547  */
3548 
3549 static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3550 {
3551     tcg_gen_mov_i32(dst, src);
3552 }
3553 
3554 static bool trans_fid_f(DisasContext *ctx, arg_fid_f *a)
3555 {
3556     uint64_t ret;
3557 
3558     if (TARGET_REGISTER_BITS == 64) {
3559         ret = 0x13080000000000ULL; /* PA8700 (PCX-W2) */
3560     } else {
3561         ret = 0x0f080000000000ULL; /* PA7300LC (PCX-L2) */
3562     }
3563 
3564     nullify_over(ctx);
3565     save_frd(0, tcg_constant_i64(ret));
3566     return nullify_end(ctx);
3567 }
3568 
3569 static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a)
3570 {
3571     return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f);
3572 }
3573 
3574 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3575 {
3576     tcg_gen_mov_i64(dst, src);
3577 }
3578 
3579 static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a)
3580 {
3581     return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d);
3582 }
3583 
3584 static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3585 {
3586     tcg_gen_andi_i32(dst, src, INT32_MAX);
3587 }
3588 
3589 static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a)
3590 {
3591     return do_fop_wew(ctx, a->t, a->r, gen_fabs_f);
3592 }
3593 
3594 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3595 {
3596     tcg_gen_andi_i64(dst, src, INT64_MAX);
3597 }
3598 
3599 static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a)
3600 {
3601     return do_fop_ded(ctx, a->t, a->r, gen_fabs_d);
3602 }
3603 
3604 static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a)
3605 {
3606     return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s);
3607 }
3608 
3609 static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a)
3610 {
3611     return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d);
3612 }
3613 
3614 static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a)
3615 {
3616     return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s);
3617 }
3618 
3619 static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a)
3620 {
3621     return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d);
3622 }
3623 
3624 static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3625 {
3626     tcg_gen_xori_i32(dst, src, INT32_MIN);
3627 }
3628 
3629 static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a)
3630 {
3631     return do_fop_wew(ctx, a->t, a->r, gen_fneg_f);
3632 }
3633 
3634 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3635 {
3636     tcg_gen_xori_i64(dst, src, INT64_MIN);
3637 }
3638 
3639 static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a)
3640 {
3641     return do_fop_ded(ctx, a->t, a->r, gen_fneg_d);
3642 }
3643 
3644 static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3645 {
3646     tcg_gen_ori_i32(dst, src, INT32_MIN);
3647 }
3648 
3649 static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a)
3650 {
3651     return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f);
3652 }
3653 
3654 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3655 {
3656     tcg_gen_ori_i64(dst, src, INT64_MIN);
3657 }
3658 
3659 static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a)
3660 {
3661     return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d);
3662 }
3663 
3664 /*
3665  * Float class 1
3666  */
3667 
3668 static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a)
3669 {
3670     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s);
3671 }
3672 
3673 static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a)
3674 {
3675     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d);
3676 }
3677 
3678 static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a)
3679 {
3680     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s);
3681 }
3682 
3683 static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a)
3684 {
3685     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s);
3686 }
3687 
3688 static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a)
3689 {
3690     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d);
3691 }
3692 
3693 static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a)
3694 {
3695     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d);
3696 }
3697 
3698 static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a)
3699 {
3700     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w);
3701 }
3702 
3703 static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a)
3704 {
3705     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w);
3706 }
3707 
3708 static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a)
3709 {
3710     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw);
3711 }
3712 
3713 static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a)
3714 {
3715     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw);
3716 }
3717 
3718 static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a)
3719 {
3720     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w);
3721 }
3722 
3723 static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a)
3724 {
3725     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w);
3726 }
3727 
3728 static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a)
3729 {
3730     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw);
3731 }
3732 
3733 static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a)
3734 {
3735     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw);
3736 }
3737 
3738 static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a)
3739 {
3740     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s);
3741 }
3742 
3743 static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a)
3744 {
3745     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s);
3746 }
3747 
3748 static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a)
3749 {
3750     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d);
3751 }
3752 
3753 static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a)
3754 {
3755     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d);
3756 }
3757 
3758 static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a)
3759 {
3760     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw);
3761 }
3762 
3763 static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a)
3764 {
3765     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw);
3766 }
3767 
3768 static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a)
3769 {
3770     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw);
3771 }
3772 
3773 static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a)
3774 {
3775     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw);
3776 }
3777 
3778 static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a)
3779 {
3780     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw);
3781 }
3782 
3783 static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a)
3784 {
3785     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw);
3786 }
3787 
3788 static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a)
3789 {
3790     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw);
3791 }
3792 
3793 static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a)
3794 {
3795     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw);
3796 }
3797 
3798 /*
3799  * Float class 2
3800  */
3801 
3802 static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a)
3803 {
3804     TCGv_i32 ta, tb, tc, ty;
3805 
3806     nullify_over(ctx);
3807 
3808     ta = load_frw0_i32(a->r1);
3809     tb = load_frw0_i32(a->r2);
3810     ty = tcg_constant_i32(a->y);
3811     tc = tcg_constant_i32(a->c);
3812 
3813     gen_helper_fcmp_s(cpu_env, ta, tb, ty, tc);
3814 
3815     return nullify_end(ctx);
3816 }
3817 
3818 static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a)
3819 {
3820     TCGv_i64 ta, tb;
3821     TCGv_i32 tc, ty;
3822 
3823     nullify_over(ctx);
3824 
3825     ta = load_frd0(a->r1);
3826     tb = load_frd0(a->r2);
3827     ty = tcg_constant_i32(a->y);
3828     tc = tcg_constant_i32(a->c);
3829 
3830     gen_helper_fcmp_d(cpu_env, ta, tb, ty, tc);
3831 
3832     return nullify_end(ctx);
3833 }
3834 
3835 static bool trans_ftest(DisasContext *ctx, arg_ftest *a)
3836 {
3837     TCGv_reg t;
3838 
3839     nullify_over(ctx);
3840 
3841     t = get_temp(ctx);
3842     tcg_gen_ld32u_reg(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
3843 
3844     if (a->y == 1) {
3845         int mask;
3846         bool inv = false;
3847 
3848         switch (a->c) {
3849         case 0: /* simple */
3850             tcg_gen_andi_reg(t, t, 0x4000000);
3851             ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3852             goto done;
3853         case 2: /* rej */
3854             inv = true;
3855             /* fallthru */
3856         case 1: /* acc */
3857             mask = 0x43ff800;
3858             break;
3859         case 6: /* rej8 */
3860             inv = true;
3861             /* fallthru */
3862         case 5: /* acc8 */
3863             mask = 0x43f8000;
3864             break;
3865         case 9: /* acc6 */
3866             mask = 0x43e0000;
3867             break;
3868         case 13: /* acc4 */
3869             mask = 0x4380000;
3870             break;
3871         case 17: /* acc2 */
3872             mask = 0x4200000;
3873             break;
3874         default:
3875             gen_illegal(ctx);
3876             return true;
3877         }
3878         if (inv) {
3879             TCGv_reg c = load_const(ctx, mask);
3880             tcg_gen_or_reg(t, t, c);
3881             ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
3882         } else {
3883             tcg_gen_andi_reg(t, t, mask);
3884             ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
3885         }
3886     } else {
3887         unsigned cbit = (a->y ^ 1) - 1;
3888 
3889         tcg_gen_extract_reg(t, t, 21 - cbit, 1);
3890         ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3891     }
3892 
3893  done:
3894     return nullify_end(ctx);
3895 }
3896 
3897 /*
3898  * Float class 2
3899  */
3900 
3901 static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a)
3902 {
3903     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s);
3904 }
3905 
3906 static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a)
3907 {
3908     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d);
3909 }
3910 
3911 static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a)
3912 {
3913     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s);
3914 }
3915 
3916 static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a)
3917 {
3918     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d);
3919 }
3920 
3921 static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a)
3922 {
3923     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s);
3924 }
3925 
3926 static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a)
3927 {
3928     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d);
3929 }
3930 
3931 static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a)
3932 {
3933     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s);
3934 }
3935 
3936 static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a)
3937 {
3938     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d);
3939 }
3940 
3941 static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a)
3942 {
3943     TCGv_i64 x, y;
3944 
3945     nullify_over(ctx);
3946 
3947     x = load_frw0_i64(a->r1);
3948     y = load_frw0_i64(a->r2);
3949     tcg_gen_mul_i64(x, x, y);
3950     save_frd(a->t, x);
3951 
3952     return nullify_end(ctx);
3953 }
3954 
3955 /* Convert the fmpyadd single-precision register encodings to standard.  */
3956 static inline int fmpyadd_s_reg(unsigned r)
3957 {
3958     return (r & 16) * 2 + 16 + (r & 15);
3959 }
3960 
3961 static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
3962 {
3963     int tm = fmpyadd_s_reg(a->tm);
3964     int ra = fmpyadd_s_reg(a->ra);
3965     int ta = fmpyadd_s_reg(a->ta);
3966     int rm2 = fmpyadd_s_reg(a->rm2);
3967     int rm1 = fmpyadd_s_reg(a->rm1);
3968 
3969     nullify_over(ctx);
3970 
3971     do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
3972     do_fop_weww(ctx, ta, ta, ra,
3973                 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
3974 
3975     return nullify_end(ctx);
3976 }
3977 
3978 static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a)
3979 {
3980     return do_fmpyadd_s(ctx, a, false);
3981 }
3982 
3983 static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a)
3984 {
3985     return do_fmpyadd_s(ctx, a, true);
3986 }
3987 
3988 static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
3989 {
3990     nullify_over(ctx);
3991 
3992     do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d);
3993     do_fop_dedd(ctx, a->ta, a->ta, a->ra,
3994                 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
3995 
3996     return nullify_end(ctx);
3997 }
3998 
3999 static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a)
4000 {
4001     return do_fmpyadd_d(ctx, a, false);
4002 }
4003 
4004 static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a)
4005 {
4006     return do_fmpyadd_d(ctx, a, true);
4007 }
4008 
4009 static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a)
4010 {
4011     TCGv_i32 x, y, z;
4012 
4013     nullify_over(ctx);
4014     x = load_frw0_i32(a->rm1);
4015     y = load_frw0_i32(a->rm2);
4016     z = load_frw0_i32(a->ra3);
4017 
4018     if (a->neg) {
4019         gen_helper_fmpynfadd_s(x, cpu_env, x, y, z);
4020     } else {
4021         gen_helper_fmpyfadd_s(x, cpu_env, x, y, z);
4022     }
4023 
4024     save_frw_i32(a->t, x);
4025     return nullify_end(ctx);
4026 }
4027 
4028 static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a)
4029 {
4030     TCGv_i64 x, y, z;
4031 
4032     nullify_over(ctx);
4033     x = load_frd0(a->rm1);
4034     y = load_frd0(a->rm2);
4035     z = load_frd0(a->ra3);
4036 
4037     if (a->neg) {
4038         gen_helper_fmpynfadd_d(x, cpu_env, x, y, z);
4039     } else {
4040         gen_helper_fmpyfadd_d(x, cpu_env, x, y, z);
4041     }
4042 
4043     save_frd(a->t, x);
4044     return nullify_end(ctx);
4045 }
4046 
4047 static bool trans_diag(DisasContext *ctx, arg_diag *a)
4048 {
4049     qemu_log_mask(LOG_UNIMP, "DIAG opcode ignored\n");
4050     cond_free(&ctx->null_cond);
4051     return true;
4052 }
4053 
4054 static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
4055 {
4056     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4057     int bound;
4058 
4059     ctx->cs = cs;
4060     ctx->tb_flags = ctx->base.tb->flags;
4061 
4062 #ifdef CONFIG_USER_ONLY
4063     ctx->privilege = MMU_USER_IDX;
4064     ctx->mmu_idx = MMU_USER_IDX;
4065     ctx->iaoq_f = ctx->base.pc_first | MMU_USER_IDX;
4066     ctx->iaoq_b = ctx->base.tb->cs_base | MMU_USER_IDX;
4067     ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN);
4068 #else
4069     ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
4070     ctx->mmu_idx = (ctx->tb_flags & PSW_D ? ctx->privilege : MMU_PHYS_IDX);
4071 
4072     /* Recover the IAOQ values from the GVA + PRIV.  */
4073     uint64_t cs_base = ctx->base.tb->cs_base;
4074     uint64_t iasq_f = cs_base & ~0xffffffffull;
4075     int32_t diff = cs_base;
4076 
4077     ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege;
4078     ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1);
4079 #endif
4080     ctx->iaoq_n = -1;
4081     ctx->iaoq_n_var = NULL;
4082 
4083     /* Bound the number of instructions by those left on the page.  */
4084     bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
4085     ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
4086 
4087     ctx->ntempr = 0;
4088     ctx->ntempl = 0;
4089     memset(ctx->tempr, 0, sizeof(ctx->tempr));
4090     memset(ctx->templ, 0, sizeof(ctx->templ));
4091 }
4092 
4093 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
4094 {
4095     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4096 
4097     /* Seed the nullification status from PSW[N], as saved in TB->FLAGS.  */
4098     ctx->null_cond = cond_make_f();
4099     ctx->psw_n_nonzero = false;
4100     if (ctx->tb_flags & PSW_N) {
4101         ctx->null_cond.c = TCG_COND_ALWAYS;
4102         ctx->psw_n_nonzero = true;
4103     }
4104     ctx->null_lab = NULL;
4105 }
4106 
4107 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4108 {
4109     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4110 
4111     tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b);
4112 }
4113 
4114 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4115 {
4116     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4117     CPUHPPAState *env = cs->env_ptr;
4118     DisasJumpType ret;
4119     int i, n;
4120 
4121     /* Execute one insn.  */
4122 #ifdef CONFIG_USER_ONLY
4123     if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
4124         do_page_zero(ctx);
4125         ret = ctx->base.is_jmp;
4126         assert(ret != DISAS_NEXT);
4127     } else
4128 #endif
4129     {
4130         /* Always fetch the insn, even if nullified, so that we check
4131            the page permissions for execute.  */
4132         uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
4133 
4134         /* Set up the IA queue for the next insn.
4135            This will be overwritten by a branch.  */
4136         if (ctx->iaoq_b == -1) {
4137             ctx->iaoq_n = -1;
4138             ctx->iaoq_n_var = get_temp(ctx);
4139             tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
4140         } else {
4141             ctx->iaoq_n = ctx->iaoq_b + 4;
4142             ctx->iaoq_n_var = NULL;
4143         }
4144 
4145         if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
4146             ctx->null_cond.c = TCG_COND_NEVER;
4147             ret = DISAS_NEXT;
4148         } else {
4149             ctx->insn = insn;
4150             if (!decode(ctx, insn)) {
4151                 gen_illegal(ctx);
4152             }
4153             ret = ctx->base.is_jmp;
4154             assert(ctx->null_lab == NULL);
4155         }
4156     }
4157 
4158     /* Forget any temporaries allocated.  */
4159     for (i = 0, n = ctx->ntempr; i < n; ++i) {
4160         ctx->tempr[i] = NULL;
4161     }
4162     for (i = 0, n = ctx->ntempl; i < n; ++i) {
4163         ctx->templ[i] = NULL;
4164     }
4165     ctx->ntempr = 0;
4166     ctx->ntempl = 0;
4167 
4168     /* Advance the insn queue.  Note that this check also detects
4169        a priority change within the instruction queue.  */
4170     if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
4171         if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1
4172             && use_goto_tb(ctx, ctx->iaoq_b)
4173             && (ctx->null_cond.c == TCG_COND_NEVER
4174                 || ctx->null_cond.c == TCG_COND_ALWAYS)) {
4175             nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
4176             gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
4177             ctx->base.is_jmp = ret = DISAS_NORETURN;
4178         } else {
4179             ctx->base.is_jmp = ret = DISAS_IAQ_N_STALE;
4180         }
4181     }
4182     ctx->iaoq_f = ctx->iaoq_b;
4183     ctx->iaoq_b = ctx->iaoq_n;
4184     ctx->base.pc_next += 4;
4185 
4186     switch (ret) {
4187     case DISAS_NORETURN:
4188     case DISAS_IAQ_N_UPDATED:
4189         break;
4190 
4191     case DISAS_NEXT:
4192     case DISAS_IAQ_N_STALE:
4193     case DISAS_IAQ_N_STALE_EXIT:
4194         if (ctx->iaoq_f == -1) {
4195             tcg_gen_mov_reg(cpu_iaoq_f, cpu_iaoq_b);
4196             copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
4197 #ifndef CONFIG_USER_ONLY
4198             tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
4199 #endif
4200             nullify_save(ctx);
4201             ctx->base.is_jmp = (ret == DISAS_IAQ_N_STALE_EXIT
4202                                 ? DISAS_EXIT
4203                                 : DISAS_IAQ_N_UPDATED);
4204         } else if (ctx->iaoq_b == -1) {
4205             tcg_gen_mov_reg(cpu_iaoq_b, ctx->iaoq_n_var);
4206         }
4207         break;
4208 
4209     default:
4210         g_assert_not_reached();
4211     }
4212 }
4213 
4214 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4215 {
4216     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4217     DisasJumpType is_jmp = ctx->base.is_jmp;
4218 
4219     switch (is_jmp) {
4220     case DISAS_NORETURN:
4221         break;
4222     case DISAS_TOO_MANY:
4223     case DISAS_IAQ_N_STALE:
4224     case DISAS_IAQ_N_STALE_EXIT:
4225         copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
4226         copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
4227         nullify_save(ctx);
4228         /* FALLTHRU */
4229     case DISAS_IAQ_N_UPDATED:
4230         if (is_jmp != DISAS_IAQ_N_STALE_EXIT) {
4231             tcg_gen_lookup_and_goto_ptr();
4232             break;
4233         }
4234         /* FALLTHRU */
4235     case DISAS_EXIT:
4236         tcg_gen_exit_tb(NULL, 0);
4237         break;
4238     default:
4239         g_assert_not_reached();
4240     }
4241 }
4242 
4243 static void hppa_tr_disas_log(const DisasContextBase *dcbase,
4244                               CPUState *cs, FILE *logfile)
4245 {
4246     target_ulong pc = dcbase->pc_first;
4247 
4248 #ifdef CONFIG_USER_ONLY
4249     switch (pc) {
4250     case 0x00:
4251         fprintf(logfile, "IN:\n0x00000000:  (null)\n");
4252         return;
4253     case 0xb0:
4254         fprintf(logfile, "IN:\n0x000000b0:  light-weight-syscall\n");
4255         return;
4256     case 0xe0:
4257         fprintf(logfile, "IN:\n0x000000e0:  set-thread-pointer-syscall\n");
4258         return;
4259     case 0x100:
4260         fprintf(logfile, "IN:\n0x00000100:  syscall\n");
4261         return;
4262     }
4263 #endif
4264 
4265     fprintf(logfile, "IN: %s\n", lookup_symbol(pc));
4266     target_disas(logfile, cs, pc, dcbase->tb->size);
4267 }
4268 
4269 static const TranslatorOps hppa_tr_ops = {
4270     .init_disas_context = hppa_tr_init_disas_context,
4271     .tb_start           = hppa_tr_tb_start,
4272     .insn_start         = hppa_tr_insn_start,
4273     .translate_insn     = hppa_tr_translate_insn,
4274     .tb_stop            = hppa_tr_tb_stop,
4275     .disas_log          = hppa_tr_disas_log,
4276 };
4277 
4278 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
4279                            target_ulong pc, void *host_pc)
4280 {
4281     DisasContext ctx;
4282     translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);
4283 }
4284