xref: /openbmc/qemu/target/hppa/translate.c (revision abaf3e5b)
1 /*
2  * HPPA emulation cpu translation for qemu.
3  *
4  * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "exec/log.h"
31 
32 #define HELPER_H "helper.h"
33 #include "exec/helper-info.c.inc"
34 #undef  HELPER_H
35 
36 
37 /* Since we have a distinction between register size and address size,
38    we need to redefine all of these.  */
39 
40 #undef TCGv
41 #undef tcg_temp_new
42 #undef tcg_global_mem_new
43 
44 #if TARGET_LONG_BITS == 64
45 #define TCGv_tl              TCGv_i64
46 #define tcg_temp_new_tl      tcg_temp_new_i64
47 #if TARGET_REGISTER_BITS == 64
48 #define tcg_gen_extu_reg_tl  tcg_gen_mov_i64
49 #else
50 #define tcg_gen_extu_reg_tl  tcg_gen_extu_i32_i64
51 #endif
52 #else
53 #define TCGv_tl              TCGv_i32
54 #define tcg_temp_new_tl      tcg_temp_new_i32
55 #define tcg_gen_extu_reg_tl  tcg_gen_mov_i32
56 #endif
57 
58 #if TARGET_REGISTER_BITS == 64
59 #define TCGv_reg             TCGv_i64
60 
61 #define tcg_temp_new         tcg_temp_new_i64
62 #define tcg_global_mem_new   tcg_global_mem_new_i64
63 
64 #define tcg_gen_movi_reg     tcg_gen_movi_i64
65 #define tcg_gen_mov_reg      tcg_gen_mov_i64
66 #define tcg_gen_ld8u_reg     tcg_gen_ld8u_i64
67 #define tcg_gen_ld8s_reg     tcg_gen_ld8s_i64
68 #define tcg_gen_ld16u_reg    tcg_gen_ld16u_i64
69 #define tcg_gen_ld16s_reg    tcg_gen_ld16s_i64
70 #define tcg_gen_ld32u_reg    tcg_gen_ld32u_i64
71 #define tcg_gen_ld32s_reg    tcg_gen_ld32s_i64
72 #define tcg_gen_ld_reg       tcg_gen_ld_i64
73 #define tcg_gen_st8_reg      tcg_gen_st8_i64
74 #define tcg_gen_st16_reg     tcg_gen_st16_i64
75 #define tcg_gen_st32_reg     tcg_gen_st32_i64
76 #define tcg_gen_st_reg       tcg_gen_st_i64
77 #define tcg_gen_add_reg      tcg_gen_add_i64
78 #define tcg_gen_addi_reg     tcg_gen_addi_i64
79 #define tcg_gen_sub_reg      tcg_gen_sub_i64
80 #define tcg_gen_neg_reg      tcg_gen_neg_i64
81 #define tcg_gen_subfi_reg    tcg_gen_subfi_i64
82 #define tcg_gen_subi_reg     tcg_gen_subi_i64
83 #define tcg_gen_and_reg      tcg_gen_and_i64
84 #define tcg_gen_andi_reg     tcg_gen_andi_i64
85 #define tcg_gen_or_reg       tcg_gen_or_i64
86 #define tcg_gen_ori_reg      tcg_gen_ori_i64
87 #define tcg_gen_xor_reg      tcg_gen_xor_i64
88 #define tcg_gen_xori_reg     tcg_gen_xori_i64
89 #define tcg_gen_not_reg      tcg_gen_not_i64
90 #define tcg_gen_shl_reg      tcg_gen_shl_i64
91 #define tcg_gen_shli_reg     tcg_gen_shli_i64
92 #define tcg_gen_shr_reg      tcg_gen_shr_i64
93 #define tcg_gen_shri_reg     tcg_gen_shri_i64
94 #define tcg_gen_sar_reg      tcg_gen_sar_i64
95 #define tcg_gen_sari_reg     tcg_gen_sari_i64
96 #define tcg_gen_brcond_reg   tcg_gen_brcond_i64
97 #define tcg_gen_brcondi_reg  tcg_gen_brcondi_i64
98 #define tcg_gen_setcond_reg  tcg_gen_setcond_i64
99 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i64
100 #define tcg_gen_mul_reg      tcg_gen_mul_i64
101 #define tcg_gen_muli_reg     tcg_gen_muli_i64
102 #define tcg_gen_div_reg      tcg_gen_div_i64
103 #define tcg_gen_rem_reg      tcg_gen_rem_i64
104 #define tcg_gen_divu_reg     tcg_gen_divu_i64
105 #define tcg_gen_remu_reg     tcg_gen_remu_i64
106 #define tcg_gen_discard_reg  tcg_gen_discard_i64
107 #define tcg_gen_trunc_reg_i32 tcg_gen_extrl_i64_i32
108 #define tcg_gen_trunc_i64_reg tcg_gen_mov_i64
109 #define tcg_gen_extu_i32_reg tcg_gen_extu_i32_i64
110 #define tcg_gen_ext_i32_reg  tcg_gen_ext_i32_i64
111 #define tcg_gen_extu_reg_i64 tcg_gen_mov_i64
112 #define tcg_gen_ext_reg_i64  tcg_gen_mov_i64
113 #define tcg_gen_ext8u_reg    tcg_gen_ext8u_i64
114 #define tcg_gen_ext8s_reg    tcg_gen_ext8s_i64
115 #define tcg_gen_ext16u_reg   tcg_gen_ext16u_i64
116 #define tcg_gen_ext16s_reg   tcg_gen_ext16s_i64
117 #define tcg_gen_ext32u_reg   tcg_gen_ext32u_i64
118 #define tcg_gen_ext32s_reg   tcg_gen_ext32s_i64
119 #define tcg_gen_bswap16_reg  tcg_gen_bswap16_i64
120 #define tcg_gen_bswap32_reg  tcg_gen_bswap32_i64
121 #define tcg_gen_bswap64_reg  tcg_gen_bswap64_i64
122 #define tcg_gen_concat_reg_i64 tcg_gen_concat32_i64
123 #define tcg_gen_andc_reg     tcg_gen_andc_i64
124 #define tcg_gen_eqv_reg      tcg_gen_eqv_i64
125 #define tcg_gen_nand_reg     tcg_gen_nand_i64
126 #define tcg_gen_nor_reg      tcg_gen_nor_i64
127 #define tcg_gen_orc_reg      tcg_gen_orc_i64
128 #define tcg_gen_clz_reg      tcg_gen_clz_i64
129 #define tcg_gen_ctz_reg      tcg_gen_ctz_i64
130 #define tcg_gen_clzi_reg     tcg_gen_clzi_i64
131 #define tcg_gen_ctzi_reg     tcg_gen_ctzi_i64
132 #define tcg_gen_clrsb_reg    tcg_gen_clrsb_i64
133 #define tcg_gen_ctpop_reg    tcg_gen_ctpop_i64
134 #define tcg_gen_rotl_reg     tcg_gen_rotl_i64
135 #define tcg_gen_rotli_reg    tcg_gen_rotli_i64
136 #define tcg_gen_rotr_reg     tcg_gen_rotr_i64
137 #define tcg_gen_rotri_reg    tcg_gen_rotri_i64
138 #define tcg_gen_deposit_reg  tcg_gen_deposit_i64
139 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i64
140 #define tcg_gen_extract_reg  tcg_gen_extract_i64
141 #define tcg_gen_sextract_reg tcg_gen_sextract_i64
142 #define tcg_gen_extract2_reg tcg_gen_extract2_i64
143 #define tcg_constant_reg     tcg_constant_i64
144 #define tcg_gen_movcond_reg  tcg_gen_movcond_i64
145 #define tcg_gen_add2_reg     tcg_gen_add2_i64
146 #define tcg_gen_sub2_reg     tcg_gen_sub2_i64
147 #define tcg_gen_qemu_ld_reg  tcg_gen_qemu_ld_i64
148 #define tcg_gen_qemu_st_reg  tcg_gen_qemu_st_i64
149 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i64
150 #define tcg_gen_trunc_reg_ptr   tcg_gen_trunc_i64_ptr
151 #else
152 #define TCGv_reg             TCGv_i32
153 #define tcg_temp_new         tcg_temp_new_i32
154 #define tcg_global_mem_new   tcg_global_mem_new_i32
155 
156 #define tcg_gen_movi_reg     tcg_gen_movi_i32
157 #define tcg_gen_mov_reg      tcg_gen_mov_i32
158 #define tcg_gen_ld8u_reg     tcg_gen_ld8u_i32
159 #define tcg_gen_ld8s_reg     tcg_gen_ld8s_i32
160 #define tcg_gen_ld16u_reg    tcg_gen_ld16u_i32
161 #define tcg_gen_ld16s_reg    tcg_gen_ld16s_i32
162 #define tcg_gen_ld32u_reg    tcg_gen_ld_i32
163 #define tcg_gen_ld32s_reg    tcg_gen_ld_i32
164 #define tcg_gen_ld_reg       tcg_gen_ld_i32
165 #define tcg_gen_st8_reg      tcg_gen_st8_i32
166 #define tcg_gen_st16_reg     tcg_gen_st16_i32
167 #define tcg_gen_st32_reg     tcg_gen_st32_i32
168 #define tcg_gen_st_reg       tcg_gen_st_i32
169 #define tcg_gen_add_reg      tcg_gen_add_i32
170 #define tcg_gen_addi_reg     tcg_gen_addi_i32
171 #define tcg_gen_sub_reg      tcg_gen_sub_i32
172 #define tcg_gen_neg_reg      tcg_gen_neg_i32
173 #define tcg_gen_subfi_reg    tcg_gen_subfi_i32
174 #define tcg_gen_subi_reg     tcg_gen_subi_i32
175 #define tcg_gen_and_reg      tcg_gen_and_i32
176 #define tcg_gen_andi_reg     tcg_gen_andi_i32
177 #define tcg_gen_or_reg       tcg_gen_or_i32
178 #define tcg_gen_ori_reg      tcg_gen_ori_i32
179 #define tcg_gen_xor_reg      tcg_gen_xor_i32
180 #define tcg_gen_xori_reg     tcg_gen_xori_i32
181 #define tcg_gen_not_reg      tcg_gen_not_i32
182 #define tcg_gen_shl_reg      tcg_gen_shl_i32
183 #define tcg_gen_shli_reg     tcg_gen_shli_i32
184 #define tcg_gen_shr_reg      tcg_gen_shr_i32
185 #define tcg_gen_shri_reg     tcg_gen_shri_i32
186 #define tcg_gen_sar_reg      tcg_gen_sar_i32
187 #define tcg_gen_sari_reg     tcg_gen_sari_i32
188 #define tcg_gen_brcond_reg   tcg_gen_brcond_i32
189 #define tcg_gen_brcondi_reg  tcg_gen_brcondi_i32
190 #define tcg_gen_setcond_reg  tcg_gen_setcond_i32
191 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i32
192 #define tcg_gen_mul_reg      tcg_gen_mul_i32
193 #define tcg_gen_muli_reg     tcg_gen_muli_i32
194 #define tcg_gen_div_reg      tcg_gen_div_i32
195 #define tcg_gen_rem_reg      tcg_gen_rem_i32
196 #define tcg_gen_divu_reg     tcg_gen_divu_i32
197 #define tcg_gen_remu_reg     tcg_gen_remu_i32
198 #define tcg_gen_discard_reg  tcg_gen_discard_i32
199 #define tcg_gen_trunc_reg_i32 tcg_gen_mov_i32
200 #define tcg_gen_trunc_i64_reg tcg_gen_extrl_i64_i32
201 #define tcg_gen_extu_i32_reg tcg_gen_mov_i32
202 #define tcg_gen_ext_i32_reg  tcg_gen_mov_i32
203 #define tcg_gen_extu_reg_i64 tcg_gen_extu_i32_i64
204 #define tcg_gen_ext_reg_i64  tcg_gen_ext_i32_i64
205 #define tcg_gen_ext8u_reg    tcg_gen_ext8u_i32
206 #define tcg_gen_ext8s_reg    tcg_gen_ext8s_i32
207 #define tcg_gen_ext16u_reg   tcg_gen_ext16u_i32
208 #define tcg_gen_ext16s_reg   tcg_gen_ext16s_i32
209 #define tcg_gen_ext32u_reg   tcg_gen_mov_i32
210 #define tcg_gen_ext32s_reg   tcg_gen_mov_i32
211 #define tcg_gen_bswap16_reg  tcg_gen_bswap16_i32
212 #define tcg_gen_bswap32_reg  tcg_gen_bswap32_i32
213 #define tcg_gen_concat_reg_i64 tcg_gen_concat_i32_i64
214 #define tcg_gen_andc_reg     tcg_gen_andc_i32
215 #define tcg_gen_eqv_reg      tcg_gen_eqv_i32
216 #define tcg_gen_nand_reg     tcg_gen_nand_i32
217 #define tcg_gen_nor_reg      tcg_gen_nor_i32
218 #define tcg_gen_orc_reg      tcg_gen_orc_i32
219 #define tcg_gen_clz_reg      tcg_gen_clz_i32
220 #define tcg_gen_ctz_reg      tcg_gen_ctz_i32
221 #define tcg_gen_clzi_reg     tcg_gen_clzi_i32
222 #define tcg_gen_ctzi_reg     tcg_gen_ctzi_i32
223 #define tcg_gen_clrsb_reg    tcg_gen_clrsb_i32
224 #define tcg_gen_ctpop_reg    tcg_gen_ctpop_i32
225 #define tcg_gen_rotl_reg     tcg_gen_rotl_i32
226 #define tcg_gen_rotli_reg    tcg_gen_rotli_i32
227 #define tcg_gen_rotr_reg     tcg_gen_rotr_i32
228 #define tcg_gen_rotri_reg    tcg_gen_rotri_i32
229 #define tcg_gen_deposit_reg  tcg_gen_deposit_i32
230 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i32
231 #define tcg_gen_extract_reg  tcg_gen_extract_i32
232 #define tcg_gen_sextract_reg tcg_gen_sextract_i32
233 #define tcg_gen_extract2_reg tcg_gen_extract2_i32
234 #define tcg_constant_reg     tcg_constant_i32
235 #define tcg_gen_movcond_reg  tcg_gen_movcond_i32
236 #define tcg_gen_add2_reg     tcg_gen_add2_i32
237 #define tcg_gen_sub2_reg     tcg_gen_sub2_i32
238 #define tcg_gen_qemu_ld_reg  tcg_gen_qemu_ld_i32
239 #define tcg_gen_qemu_st_reg  tcg_gen_qemu_st_i32
240 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i32
241 #define tcg_gen_trunc_reg_ptr   tcg_gen_ext_i32_ptr
242 #endif /* TARGET_REGISTER_BITS */
243 
244 typedef struct DisasCond {
245     TCGCond c;
246     TCGv_reg a0, a1;
247 } DisasCond;
248 
249 typedef struct DisasContext {
250     DisasContextBase base;
251     CPUState *cs;
252 
253     target_ureg iaoq_f;
254     target_ureg iaoq_b;
255     target_ureg iaoq_n;
256     TCGv_reg iaoq_n_var;
257 
258     int ntempr, ntempl;
259     TCGv_reg tempr[8];
260     TCGv_tl  templ[4];
261 
262     DisasCond null_cond;
263     TCGLabel *null_lab;
264 
265     uint32_t insn;
266     uint32_t tb_flags;
267     int mmu_idx;
268     int privilege;
269     bool psw_n_nonzero;
270 
271 #ifdef CONFIG_USER_ONLY
272     MemOp unalign;
273 #endif
274 } DisasContext;
275 
276 #ifdef CONFIG_USER_ONLY
277 #define UNALIGN(C)  (C)->unalign
278 #else
279 #define UNALIGN(C)  MO_ALIGN
280 #endif
281 
282 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently.  */
283 static int expand_sm_imm(DisasContext *ctx, int val)
284 {
285     if (val & PSW_SM_E) {
286         val = (val & ~PSW_SM_E) | PSW_E;
287     }
288     if (val & PSW_SM_W) {
289         val = (val & ~PSW_SM_W) | PSW_W;
290     }
291     return val;
292 }
293 
294 /* Inverted space register indicates 0 means sr0 not inferred from base.  */
295 static int expand_sr3x(DisasContext *ctx, int val)
296 {
297     return ~val;
298 }
299 
300 /* Convert the M:A bits within a memory insn to the tri-state value
301    we use for the final M.  */
302 static int ma_to_m(DisasContext *ctx, int val)
303 {
304     return val & 2 ? (val & 1 ? -1 : 1) : 0;
305 }
306 
307 /* Convert the sign of the displacement to a pre or post-modify.  */
308 static int pos_to_m(DisasContext *ctx, int val)
309 {
310     return val ? 1 : -1;
311 }
312 
313 static int neg_to_m(DisasContext *ctx, int val)
314 {
315     return val ? -1 : 1;
316 }
317 
318 /* Used for branch targets and fp memory ops.  */
319 static int expand_shl2(DisasContext *ctx, int val)
320 {
321     return val << 2;
322 }
323 
324 /* Used for fp memory ops.  */
325 static int expand_shl3(DisasContext *ctx, int val)
326 {
327     return val << 3;
328 }
329 
330 /* Used for assemble_21.  */
331 static int expand_shl11(DisasContext *ctx, int val)
332 {
333     return val << 11;
334 }
335 
336 
337 /* Include the auto-generated decoder.  */
338 #include "decode-insns.c.inc"
339 
340 /* We are not using a goto_tb (for whatever reason), but have updated
341    the iaq (for whatever reason), so don't do it again on exit.  */
342 #define DISAS_IAQ_N_UPDATED  DISAS_TARGET_0
343 
344 /* We are exiting the TB, but have neither emitted a goto_tb, nor
345    updated the iaq for the next instruction to be executed.  */
346 #define DISAS_IAQ_N_STALE    DISAS_TARGET_1
347 
348 /* Similarly, but we want to return to the main loop immediately
349    to recognize unmasked interrupts.  */
350 #define DISAS_IAQ_N_STALE_EXIT      DISAS_TARGET_2
351 #define DISAS_EXIT                  DISAS_TARGET_3
352 
353 /* global register indexes */
354 static TCGv_reg cpu_gr[32];
355 static TCGv_i64 cpu_sr[4];
356 static TCGv_i64 cpu_srH;
357 static TCGv_reg cpu_iaoq_f;
358 static TCGv_reg cpu_iaoq_b;
359 static TCGv_i64 cpu_iasq_f;
360 static TCGv_i64 cpu_iasq_b;
361 static TCGv_reg cpu_sar;
362 static TCGv_reg cpu_psw_n;
363 static TCGv_reg cpu_psw_v;
364 static TCGv_reg cpu_psw_cb;
365 static TCGv_reg cpu_psw_cb_msb;
366 
367 void hppa_translate_init(void)
368 {
369 #define DEF_VAR(V)  { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
370 
371     typedef struct { TCGv_reg *var; const char *name; int ofs; } GlobalVar;
372     static const GlobalVar vars[] = {
373         { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
374         DEF_VAR(psw_n),
375         DEF_VAR(psw_v),
376         DEF_VAR(psw_cb),
377         DEF_VAR(psw_cb_msb),
378         DEF_VAR(iaoq_f),
379         DEF_VAR(iaoq_b),
380     };
381 
382 #undef DEF_VAR
383 
384     /* Use the symbolic register names that match the disassembler.  */
385     static const char gr_names[32][4] = {
386         "r0",  "r1",  "r2",  "r3",  "r4",  "r5",  "r6",  "r7",
387         "r8",  "r9",  "r10", "r11", "r12", "r13", "r14", "r15",
388         "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
389         "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
390     };
391     /* SR[4-7] are not global registers so that we can index them.  */
392     static const char sr_names[5][4] = {
393         "sr0", "sr1", "sr2", "sr3", "srH"
394     };
395 
396     int i;
397 
398     cpu_gr[0] = NULL;
399     for (i = 1; i < 32; i++) {
400         cpu_gr[i] = tcg_global_mem_new(cpu_env,
401                                        offsetof(CPUHPPAState, gr[i]),
402                                        gr_names[i]);
403     }
404     for (i = 0; i < 4; i++) {
405         cpu_sr[i] = tcg_global_mem_new_i64(cpu_env,
406                                            offsetof(CPUHPPAState, sr[i]),
407                                            sr_names[i]);
408     }
409     cpu_srH = tcg_global_mem_new_i64(cpu_env,
410                                      offsetof(CPUHPPAState, sr[4]),
411                                      sr_names[4]);
412 
413     for (i = 0; i < ARRAY_SIZE(vars); ++i) {
414         const GlobalVar *v = &vars[i];
415         *v->var = tcg_global_mem_new(cpu_env, v->ofs, v->name);
416     }
417 
418     cpu_iasq_f = tcg_global_mem_new_i64(cpu_env,
419                                         offsetof(CPUHPPAState, iasq_f),
420                                         "iasq_f");
421     cpu_iasq_b = tcg_global_mem_new_i64(cpu_env,
422                                         offsetof(CPUHPPAState, iasq_b),
423                                         "iasq_b");
424 }
425 
426 static DisasCond cond_make_f(void)
427 {
428     return (DisasCond){
429         .c = TCG_COND_NEVER,
430         .a0 = NULL,
431         .a1 = NULL,
432     };
433 }
434 
435 static DisasCond cond_make_t(void)
436 {
437     return (DisasCond){
438         .c = TCG_COND_ALWAYS,
439         .a0 = NULL,
440         .a1 = NULL,
441     };
442 }
443 
444 static DisasCond cond_make_n(void)
445 {
446     return (DisasCond){
447         .c = TCG_COND_NE,
448         .a0 = cpu_psw_n,
449         .a1 = tcg_constant_reg(0)
450     };
451 }
452 
453 static DisasCond cond_make_0_tmp(TCGCond c, TCGv_reg a0)
454 {
455     assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
456     return (DisasCond){
457         .c = c, .a0 = a0, .a1 = tcg_constant_reg(0)
458     };
459 }
460 
461 static DisasCond cond_make_0(TCGCond c, TCGv_reg a0)
462 {
463     TCGv_reg tmp = tcg_temp_new();
464     tcg_gen_mov_reg(tmp, a0);
465     return cond_make_0_tmp(c, tmp);
466 }
467 
468 static DisasCond cond_make(TCGCond c, TCGv_reg a0, TCGv_reg a1)
469 {
470     DisasCond r = { .c = c };
471 
472     assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
473     r.a0 = tcg_temp_new();
474     tcg_gen_mov_reg(r.a0, a0);
475     r.a1 = tcg_temp_new();
476     tcg_gen_mov_reg(r.a1, a1);
477 
478     return r;
479 }
480 
481 static void cond_free(DisasCond *cond)
482 {
483     switch (cond->c) {
484     default:
485         cond->a0 = NULL;
486         cond->a1 = NULL;
487         /* fallthru */
488     case TCG_COND_ALWAYS:
489         cond->c = TCG_COND_NEVER;
490         break;
491     case TCG_COND_NEVER:
492         break;
493     }
494 }
495 
496 static TCGv_reg get_temp(DisasContext *ctx)
497 {
498     unsigned i = ctx->ntempr++;
499     g_assert(i < ARRAY_SIZE(ctx->tempr));
500     return ctx->tempr[i] = tcg_temp_new();
501 }
502 
503 #ifndef CONFIG_USER_ONLY
504 static TCGv_tl get_temp_tl(DisasContext *ctx)
505 {
506     unsigned i = ctx->ntempl++;
507     g_assert(i < ARRAY_SIZE(ctx->templ));
508     return ctx->templ[i] = tcg_temp_new_tl();
509 }
510 #endif
511 
512 static TCGv_reg load_const(DisasContext *ctx, target_sreg v)
513 {
514     TCGv_reg t = get_temp(ctx);
515     tcg_gen_movi_reg(t, v);
516     return t;
517 }
518 
519 static TCGv_reg load_gpr(DisasContext *ctx, unsigned reg)
520 {
521     if (reg == 0) {
522         TCGv_reg t = get_temp(ctx);
523         tcg_gen_movi_reg(t, 0);
524         return t;
525     } else {
526         return cpu_gr[reg];
527     }
528 }
529 
530 static TCGv_reg dest_gpr(DisasContext *ctx, unsigned reg)
531 {
532     if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
533         return get_temp(ctx);
534     } else {
535         return cpu_gr[reg];
536     }
537 }
538 
539 static void save_or_nullify(DisasContext *ctx, TCGv_reg dest, TCGv_reg t)
540 {
541     if (ctx->null_cond.c != TCG_COND_NEVER) {
542         tcg_gen_movcond_reg(ctx->null_cond.c, dest, ctx->null_cond.a0,
543                             ctx->null_cond.a1, dest, t);
544     } else {
545         tcg_gen_mov_reg(dest, t);
546     }
547 }
548 
549 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_reg t)
550 {
551     if (reg != 0) {
552         save_or_nullify(ctx, cpu_gr[reg], t);
553     }
554 }
555 
556 #if HOST_BIG_ENDIAN
557 # define HI_OFS  0
558 # define LO_OFS  4
559 #else
560 # define HI_OFS  4
561 # define LO_OFS  0
562 #endif
563 
564 static TCGv_i32 load_frw_i32(unsigned rt)
565 {
566     TCGv_i32 ret = tcg_temp_new_i32();
567     tcg_gen_ld_i32(ret, cpu_env,
568                    offsetof(CPUHPPAState, fr[rt & 31])
569                    + (rt & 32 ? LO_OFS : HI_OFS));
570     return ret;
571 }
572 
573 static TCGv_i32 load_frw0_i32(unsigned rt)
574 {
575     if (rt == 0) {
576         TCGv_i32 ret = tcg_temp_new_i32();
577         tcg_gen_movi_i32(ret, 0);
578         return ret;
579     } else {
580         return load_frw_i32(rt);
581     }
582 }
583 
584 static TCGv_i64 load_frw0_i64(unsigned rt)
585 {
586     TCGv_i64 ret = tcg_temp_new_i64();
587     if (rt == 0) {
588         tcg_gen_movi_i64(ret, 0);
589     } else {
590         tcg_gen_ld32u_i64(ret, cpu_env,
591                           offsetof(CPUHPPAState, fr[rt & 31])
592                           + (rt & 32 ? LO_OFS : HI_OFS));
593     }
594     return ret;
595 }
596 
597 static void save_frw_i32(unsigned rt, TCGv_i32 val)
598 {
599     tcg_gen_st_i32(val, cpu_env,
600                    offsetof(CPUHPPAState, fr[rt & 31])
601                    + (rt & 32 ? LO_OFS : HI_OFS));
602 }
603 
604 #undef HI_OFS
605 #undef LO_OFS
606 
607 static TCGv_i64 load_frd(unsigned rt)
608 {
609     TCGv_i64 ret = tcg_temp_new_i64();
610     tcg_gen_ld_i64(ret, cpu_env, offsetof(CPUHPPAState, fr[rt]));
611     return ret;
612 }
613 
614 static TCGv_i64 load_frd0(unsigned rt)
615 {
616     if (rt == 0) {
617         TCGv_i64 ret = tcg_temp_new_i64();
618         tcg_gen_movi_i64(ret, 0);
619         return ret;
620     } else {
621         return load_frd(rt);
622     }
623 }
624 
625 static void save_frd(unsigned rt, TCGv_i64 val)
626 {
627     tcg_gen_st_i64(val, cpu_env, offsetof(CPUHPPAState, fr[rt]));
628 }
629 
630 static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
631 {
632 #ifdef CONFIG_USER_ONLY
633     tcg_gen_movi_i64(dest, 0);
634 #else
635     if (reg < 4) {
636         tcg_gen_mov_i64(dest, cpu_sr[reg]);
637     } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
638         tcg_gen_mov_i64(dest, cpu_srH);
639     } else {
640         tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUHPPAState, sr[reg]));
641     }
642 #endif
643 }
644 
645 /* Skip over the implementation of an insn that has been nullified.
646    Use this when the insn is too complex for a conditional move.  */
647 static void nullify_over(DisasContext *ctx)
648 {
649     if (ctx->null_cond.c != TCG_COND_NEVER) {
650         /* The always condition should have been handled in the main loop.  */
651         assert(ctx->null_cond.c != TCG_COND_ALWAYS);
652 
653         ctx->null_lab = gen_new_label();
654 
655         /* If we're using PSW[N], copy it to a temp because... */
656         if (ctx->null_cond.a0 == cpu_psw_n) {
657             ctx->null_cond.a0 = tcg_temp_new();
658             tcg_gen_mov_reg(ctx->null_cond.a0, cpu_psw_n);
659         }
660         /* ... we clear it before branching over the implementation,
661            so that (1) it's clear after nullifying this insn and
662            (2) if this insn nullifies the next, PSW[N] is valid.  */
663         if (ctx->psw_n_nonzero) {
664             ctx->psw_n_nonzero = false;
665             tcg_gen_movi_reg(cpu_psw_n, 0);
666         }
667 
668         tcg_gen_brcond_reg(ctx->null_cond.c, ctx->null_cond.a0,
669                            ctx->null_cond.a1, ctx->null_lab);
670         cond_free(&ctx->null_cond);
671     }
672 }
673 
674 /* Save the current nullification state to PSW[N].  */
675 static void nullify_save(DisasContext *ctx)
676 {
677     if (ctx->null_cond.c == TCG_COND_NEVER) {
678         if (ctx->psw_n_nonzero) {
679             tcg_gen_movi_reg(cpu_psw_n, 0);
680         }
681         return;
682     }
683     if (ctx->null_cond.a0 != cpu_psw_n) {
684         tcg_gen_setcond_reg(ctx->null_cond.c, cpu_psw_n,
685                             ctx->null_cond.a0, ctx->null_cond.a1);
686         ctx->psw_n_nonzero = true;
687     }
688     cond_free(&ctx->null_cond);
689 }
690 
691 /* Set a PSW[N] to X.  The intention is that this is used immediately
692    before a goto_tb/exit_tb, so that there is no fallthru path to other
693    code within the TB.  Therefore we do not update psw_n_nonzero.  */
694 static void nullify_set(DisasContext *ctx, bool x)
695 {
696     if (ctx->psw_n_nonzero || x) {
697         tcg_gen_movi_reg(cpu_psw_n, x);
698     }
699 }
700 
701 /* Mark the end of an instruction that may have been nullified.
702    This is the pair to nullify_over.  Always returns true so that
703    it may be tail-called from a translate function.  */
704 static bool nullify_end(DisasContext *ctx)
705 {
706     TCGLabel *null_lab = ctx->null_lab;
707     DisasJumpType status = ctx->base.is_jmp;
708 
709     /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
710        For UPDATED, we cannot update on the nullified path.  */
711     assert(status != DISAS_IAQ_N_UPDATED);
712 
713     if (likely(null_lab == NULL)) {
714         /* The current insn wasn't conditional or handled the condition
715            applied to it without a branch, so the (new) setting of
716            NULL_COND can be applied directly to the next insn.  */
717         return true;
718     }
719     ctx->null_lab = NULL;
720 
721     if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
722         /* The next instruction will be unconditional,
723            and NULL_COND already reflects that.  */
724         gen_set_label(null_lab);
725     } else {
726         /* The insn that we just executed is itself nullifying the next
727            instruction.  Store the condition in the PSW[N] global.
728            We asserted PSW[N] = 0 in nullify_over, so that after the
729            label we have the proper value in place.  */
730         nullify_save(ctx);
731         gen_set_label(null_lab);
732         ctx->null_cond = cond_make_n();
733     }
734     if (status == DISAS_NORETURN) {
735         ctx->base.is_jmp = DISAS_NEXT;
736     }
737     return true;
738 }
739 
740 static void copy_iaoq_entry(TCGv_reg dest, target_ureg ival, TCGv_reg vval)
741 {
742     if (unlikely(ival == -1)) {
743         tcg_gen_mov_reg(dest, vval);
744     } else {
745         tcg_gen_movi_reg(dest, ival);
746     }
747 }
748 
749 static inline target_ureg iaoq_dest(DisasContext *ctx, target_sreg disp)
750 {
751     return ctx->iaoq_f + disp + 8;
752 }
753 
754 static void gen_excp_1(int exception)
755 {
756     gen_helper_excp(cpu_env, tcg_constant_i32(exception));
757 }
758 
759 static void gen_excp(DisasContext *ctx, int exception)
760 {
761     copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
762     copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
763     nullify_save(ctx);
764     gen_excp_1(exception);
765     ctx->base.is_jmp = DISAS_NORETURN;
766 }
767 
768 static bool gen_excp_iir(DisasContext *ctx, int exc)
769 {
770     nullify_over(ctx);
771     tcg_gen_st_reg(tcg_constant_reg(ctx->insn),
772                    cpu_env, offsetof(CPUHPPAState, cr[CR_IIR]));
773     gen_excp(ctx, exc);
774     return nullify_end(ctx);
775 }
776 
777 static bool gen_illegal(DisasContext *ctx)
778 {
779     return gen_excp_iir(ctx, EXCP_ILL);
780 }
781 
782 #ifdef CONFIG_USER_ONLY
783 #define CHECK_MOST_PRIVILEGED(EXCP) \
784     return gen_excp_iir(ctx, EXCP)
785 #else
786 #define CHECK_MOST_PRIVILEGED(EXCP) \
787     do {                                     \
788         if (ctx->privilege != 0) {           \
789             return gen_excp_iir(ctx, EXCP);  \
790         }                                    \
791     } while (0)
792 #endif
793 
794 static bool use_goto_tb(DisasContext *ctx, target_ureg dest)
795 {
796     return translator_use_goto_tb(&ctx->base, dest);
797 }
798 
799 /* If the next insn is to be nullified, and it's on the same page,
800    and we're not attempting to set a breakpoint on it, then we can
801    totally skip the nullified insn.  This avoids creating and
802    executing a TB that merely branches to the next TB.  */
803 static bool use_nullify_skip(DisasContext *ctx)
804 {
805     return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
806             && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
807 }
808 
809 static void gen_goto_tb(DisasContext *ctx, int which,
810                         target_ureg f, target_ureg b)
811 {
812     if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
813         tcg_gen_goto_tb(which);
814         tcg_gen_movi_reg(cpu_iaoq_f, f);
815         tcg_gen_movi_reg(cpu_iaoq_b, b);
816         tcg_gen_exit_tb(ctx->base.tb, which);
817     } else {
818         copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b);
819         copy_iaoq_entry(cpu_iaoq_b, b, ctx->iaoq_n_var);
820         tcg_gen_lookup_and_goto_ptr();
821     }
822 }
823 
824 static bool cond_need_sv(int c)
825 {
826     return c == 2 || c == 3 || c == 6;
827 }
828 
829 static bool cond_need_cb(int c)
830 {
831     return c == 4 || c == 5;
832 }
833 
834 /*
835  * Compute conditional for arithmetic.  See Page 5-3, Table 5-1, of
836  * the Parisc 1.1 Architecture Reference Manual for details.
837  */
838 
839 static DisasCond do_cond(unsigned cf, TCGv_reg res,
840                          TCGv_reg cb_msb, TCGv_reg sv)
841 {
842     DisasCond cond;
843     TCGv_reg tmp;
844 
845     switch (cf >> 1) {
846     case 0: /* Never / TR    (0 / 1) */
847         cond = cond_make_f();
848         break;
849     case 1: /* = / <>        (Z / !Z) */
850         cond = cond_make_0(TCG_COND_EQ, res);
851         break;
852     case 2: /* < / >=        (N ^ V / !(N ^ V) */
853         tmp = tcg_temp_new();
854         tcg_gen_xor_reg(tmp, res, sv);
855         cond = cond_make_0_tmp(TCG_COND_LT, tmp);
856         break;
857     case 3: /* <= / >        (N ^ V) | Z / !((N ^ V) | Z) */
858         /*
859          * Simplify:
860          *   (N ^ V) | Z
861          *   ((res < 0) ^ (sv < 0)) | !res
862          *   ((res ^ sv) < 0) | !res
863          *   (~(res ^ sv) >= 0) | !res
864          *   !(~(res ^ sv) >> 31) | !res
865          *   !(~(res ^ sv) >> 31 & res)
866          */
867         tmp = tcg_temp_new();
868         tcg_gen_eqv_reg(tmp, res, sv);
869         tcg_gen_sari_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
870         tcg_gen_and_reg(tmp, tmp, res);
871         cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
872         break;
873     case 4: /* NUV / UV      (!C / C) */
874         cond = cond_make_0(TCG_COND_EQ, cb_msb);
875         break;
876     case 5: /* ZNV / VNZ     (!C | Z / C & !Z) */
877         tmp = tcg_temp_new();
878         tcg_gen_neg_reg(tmp, cb_msb);
879         tcg_gen_and_reg(tmp, tmp, res);
880         cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
881         break;
882     case 6: /* SV / NSV      (V / !V) */
883         cond = cond_make_0(TCG_COND_LT, sv);
884         break;
885     case 7: /* OD / EV */
886         tmp = tcg_temp_new();
887         tcg_gen_andi_reg(tmp, res, 1);
888         cond = cond_make_0_tmp(TCG_COND_NE, tmp);
889         break;
890     default:
891         g_assert_not_reached();
892     }
893     if (cf & 1) {
894         cond.c = tcg_invert_cond(cond.c);
895     }
896 
897     return cond;
898 }
899 
900 /* Similar, but for the special case of subtraction without borrow, we
901    can use the inputs directly.  This can allow other computation to be
902    deleted as unused.  */
903 
904 static DisasCond do_sub_cond(unsigned cf, TCGv_reg res,
905                              TCGv_reg in1, TCGv_reg in2, TCGv_reg sv)
906 {
907     DisasCond cond;
908 
909     switch (cf >> 1) {
910     case 1: /* = / <> */
911         cond = cond_make(TCG_COND_EQ, in1, in2);
912         break;
913     case 2: /* < / >= */
914         cond = cond_make(TCG_COND_LT, in1, in2);
915         break;
916     case 3: /* <= / > */
917         cond = cond_make(TCG_COND_LE, in1, in2);
918         break;
919     case 4: /* << / >>= */
920         cond = cond_make(TCG_COND_LTU, in1, in2);
921         break;
922     case 5: /* <<= / >> */
923         cond = cond_make(TCG_COND_LEU, in1, in2);
924         break;
925     default:
926         return do_cond(cf, res, NULL, sv);
927     }
928     if (cf & 1) {
929         cond.c = tcg_invert_cond(cond.c);
930     }
931 
932     return cond;
933 }
934 
935 /*
936  * Similar, but for logicals, where the carry and overflow bits are not
937  * computed, and use of them is undefined.
938  *
939  * Undefined or not, hardware does not trap.  It seems reasonable to
940  * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's
941  * how cases c={2,3} are treated.
942  */
943 
944 static DisasCond do_log_cond(unsigned cf, TCGv_reg res)
945 {
946     switch (cf) {
947     case 0:  /* never */
948     case 9:  /* undef, C */
949     case 11: /* undef, C & !Z */
950     case 12: /* undef, V */
951         return cond_make_f();
952 
953     case 1:  /* true */
954     case 8:  /* undef, !C */
955     case 10: /* undef, !C | Z */
956     case 13: /* undef, !V */
957         return cond_make_t();
958 
959     case 2:  /* == */
960         return cond_make_0(TCG_COND_EQ, res);
961     case 3:  /* <> */
962         return cond_make_0(TCG_COND_NE, res);
963     case 4:  /* < */
964         return cond_make_0(TCG_COND_LT, res);
965     case 5:  /* >= */
966         return cond_make_0(TCG_COND_GE, res);
967     case 6:  /* <= */
968         return cond_make_0(TCG_COND_LE, res);
969     case 7:  /* > */
970         return cond_make_0(TCG_COND_GT, res);
971 
972     case 14: /* OD */
973     case 15: /* EV */
974         return do_cond(cf, res, NULL, NULL);
975 
976     default:
977         g_assert_not_reached();
978     }
979 }
980 
981 /* Similar, but for shift/extract/deposit conditions.  */
982 
983 static DisasCond do_sed_cond(unsigned orig, TCGv_reg res)
984 {
985     unsigned c, f;
986 
987     /* Convert the compressed condition codes to standard.
988        0-2 are the same as logicals (nv,<,<=), while 3 is OD.
989        4-7 are the reverse of 0-3.  */
990     c = orig & 3;
991     if (c == 3) {
992         c = 7;
993     }
994     f = (orig & 4) / 4;
995 
996     return do_log_cond(c * 2 + f, res);
997 }
998 
999 /* Similar, but for unit conditions.  */
1000 
1001 static DisasCond do_unit_cond(unsigned cf, TCGv_reg res,
1002                               TCGv_reg in1, TCGv_reg in2)
1003 {
1004     DisasCond cond;
1005     TCGv_reg tmp, cb = NULL;
1006 
1007     if (cf & 8) {
1008         /* Since we want to test lots of carry-out bits all at once, do not
1009          * do our normal thing and compute carry-in of bit B+1 since that
1010          * leaves us with carry bits spread across two words.
1011          */
1012         cb = tcg_temp_new();
1013         tmp = tcg_temp_new();
1014         tcg_gen_or_reg(cb, in1, in2);
1015         tcg_gen_and_reg(tmp, in1, in2);
1016         tcg_gen_andc_reg(cb, cb, res);
1017         tcg_gen_or_reg(cb, cb, tmp);
1018     }
1019 
1020     switch (cf >> 1) {
1021     case 0: /* never / TR */
1022     case 1: /* undefined */
1023     case 5: /* undefined */
1024         cond = cond_make_f();
1025         break;
1026 
1027     case 2: /* SBZ / NBZ */
1028         /* See hasless(v,1) from
1029          * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
1030          */
1031         tmp = tcg_temp_new();
1032         tcg_gen_subi_reg(tmp, res, 0x01010101u);
1033         tcg_gen_andc_reg(tmp, tmp, res);
1034         tcg_gen_andi_reg(tmp, tmp, 0x80808080u);
1035         cond = cond_make_0(TCG_COND_NE, tmp);
1036         break;
1037 
1038     case 3: /* SHZ / NHZ */
1039         tmp = tcg_temp_new();
1040         tcg_gen_subi_reg(tmp, res, 0x00010001u);
1041         tcg_gen_andc_reg(tmp, tmp, res);
1042         tcg_gen_andi_reg(tmp, tmp, 0x80008000u);
1043         cond = cond_make_0(TCG_COND_NE, tmp);
1044         break;
1045 
1046     case 4: /* SDC / NDC */
1047         tcg_gen_andi_reg(cb, cb, 0x88888888u);
1048         cond = cond_make_0(TCG_COND_NE, cb);
1049         break;
1050 
1051     case 6: /* SBC / NBC */
1052         tcg_gen_andi_reg(cb, cb, 0x80808080u);
1053         cond = cond_make_0(TCG_COND_NE, cb);
1054         break;
1055 
1056     case 7: /* SHC / NHC */
1057         tcg_gen_andi_reg(cb, cb, 0x80008000u);
1058         cond = cond_make_0(TCG_COND_NE, cb);
1059         break;
1060 
1061     default:
1062         g_assert_not_reached();
1063     }
1064     if (cf & 1) {
1065         cond.c = tcg_invert_cond(cond.c);
1066     }
1067 
1068     return cond;
1069 }
1070 
1071 /* Compute signed overflow for addition.  */
1072 static TCGv_reg do_add_sv(DisasContext *ctx, TCGv_reg res,
1073                           TCGv_reg in1, TCGv_reg in2)
1074 {
1075     TCGv_reg sv = get_temp(ctx);
1076     TCGv_reg tmp = tcg_temp_new();
1077 
1078     tcg_gen_xor_reg(sv, res, in1);
1079     tcg_gen_xor_reg(tmp, in1, in2);
1080     tcg_gen_andc_reg(sv, sv, tmp);
1081 
1082     return sv;
1083 }
1084 
1085 /* Compute signed overflow for subtraction.  */
1086 static TCGv_reg do_sub_sv(DisasContext *ctx, TCGv_reg res,
1087                           TCGv_reg in1, TCGv_reg in2)
1088 {
1089     TCGv_reg sv = get_temp(ctx);
1090     TCGv_reg tmp = tcg_temp_new();
1091 
1092     tcg_gen_xor_reg(sv, res, in1);
1093     tcg_gen_xor_reg(tmp, in1, in2);
1094     tcg_gen_and_reg(sv, sv, tmp);
1095 
1096     return sv;
1097 }
1098 
1099 static void do_add(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1100                    TCGv_reg in2, unsigned shift, bool is_l,
1101                    bool is_tsv, bool is_tc, bool is_c, unsigned cf)
1102 {
1103     TCGv_reg dest, cb, cb_msb, sv, tmp;
1104     unsigned c = cf >> 1;
1105     DisasCond cond;
1106 
1107     dest = tcg_temp_new();
1108     cb = NULL;
1109     cb_msb = NULL;
1110 
1111     if (shift) {
1112         tmp = get_temp(ctx);
1113         tcg_gen_shli_reg(tmp, in1, shift);
1114         in1 = tmp;
1115     }
1116 
1117     if (!is_l || cond_need_cb(c)) {
1118         TCGv_reg zero = tcg_constant_reg(0);
1119         cb_msb = get_temp(ctx);
1120         tcg_gen_add2_reg(dest, cb_msb, in1, zero, in2, zero);
1121         if (is_c) {
1122             tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cpu_psw_cb_msb, zero);
1123         }
1124         if (!is_l) {
1125             cb = get_temp(ctx);
1126             tcg_gen_xor_reg(cb, in1, in2);
1127             tcg_gen_xor_reg(cb, cb, dest);
1128         }
1129     } else {
1130         tcg_gen_add_reg(dest, in1, in2);
1131         if (is_c) {
1132             tcg_gen_add_reg(dest, dest, cpu_psw_cb_msb);
1133         }
1134     }
1135 
1136     /* Compute signed overflow if required.  */
1137     sv = NULL;
1138     if (is_tsv || cond_need_sv(c)) {
1139         sv = do_add_sv(ctx, dest, in1, in2);
1140         if (is_tsv) {
1141             /* ??? Need to include overflow from shift.  */
1142             gen_helper_tsv(cpu_env, sv);
1143         }
1144     }
1145 
1146     /* Emit any conditional trap before any writeback.  */
1147     cond = do_cond(cf, dest, cb_msb, sv);
1148     if (is_tc) {
1149         tmp = tcg_temp_new();
1150         tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1151         gen_helper_tcond(cpu_env, tmp);
1152     }
1153 
1154     /* Write back the result.  */
1155     if (!is_l) {
1156         save_or_nullify(ctx, cpu_psw_cb, cb);
1157         save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1158     }
1159     save_gpr(ctx, rt, dest);
1160 
1161     /* Install the new nullification.  */
1162     cond_free(&ctx->null_cond);
1163     ctx->null_cond = cond;
1164 }
1165 
1166 static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_sh *a,
1167                        bool is_l, bool is_tsv, bool is_tc, bool is_c)
1168 {
1169     TCGv_reg tcg_r1, tcg_r2;
1170 
1171     if (a->cf) {
1172         nullify_over(ctx);
1173     }
1174     tcg_r1 = load_gpr(ctx, a->r1);
1175     tcg_r2 = load_gpr(ctx, a->r2);
1176     do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l, is_tsv, is_tc, is_c, a->cf);
1177     return nullify_end(ctx);
1178 }
1179 
1180 static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a,
1181                        bool is_tsv, bool is_tc)
1182 {
1183     TCGv_reg tcg_im, tcg_r2;
1184 
1185     if (a->cf) {
1186         nullify_over(ctx);
1187     }
1188     tcg_im = load_const(ctx, a->i);
1189     tcg_r2 = load_gpr(ctx, a->r);
1190     do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf);
1191     return nullify_end(ctx);
1192 }
1193 
1194 static void do_sub(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1195                    TCGv_reg in2, bool is_tsv, bool is_b,
1196                    bool is_tc, unsigned cf)
1197 {
1198     TCGv_reg dest, sv, cb, cb_msb, zero, tmp;
1199     unsigned c = cf >> 1;
1200     DisasCond cond;
1201 
1202     dest = tcg_temp_new();
1203     cb = tcg_temp_new();
1204     cb_msb = tcg_temp_new();
1205 
1206     zero = tcg_constant_reg(0);
1207     if (is_b) {
1208         /* DEST,C = IN1 + ~IN2 + C.  */
1209         tcg_gen_not_reg(cb, in2);
1210         tcg_gen_add2_reg(dest, cb_msb, in1, zero, cpu_psw_cb_msb, zero);
1211         tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cb, zero);
1212         tcg_gen_xor_reg(cb, cb, in1);
1213         tcg_gen_xor_reg(cb, cb, dest);
1214     } else {
1215         /* DEST,C = IN1 + ~IN2 + 1.  We can produce the same result in fewer
1216            operations by seeding the high word with 1 and subtracting.  */
1217         tcg_gen_movi_reg(cb_msb, 1);
1218         tcg_gen_sub2_reg(dest, cb_msb, in1, cb_msb, in2, zero);
1219         tcg_gen_eqv_reg(cb, in1, in2);
1220         tcg_gen_xor_reg(cb, cb, dest);
1221     }
1222 
1223     /* Compute signed overflow if required.  */
1224     sv = NULL;
1225     if (is_tsv || cond_need_sv(c)) {
1226         sv = do_sub_sv(ctx, dest, in1, in2);
1227         if (is_tsv) {
1228             gen_helper_tsv(cpu_env, sv);
1229         }
1230     }
1231 
1232     /* Compute the condition.  We cannot use the special case for borrow.  */
1233     if (!is_b) {
1234         cond = do_sub_cond(cf, dest, in1, in2, sv);
1235     } else {
1236         cond = do_cond(cf, dest, cb_msb, sv);
1237     }
1238 
1239     /* Emit any conditional trap before any writeback.  */
1240     if (is_tc) {
1241         tmp = tcg_temp_new();
1242         tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1243         gen_helper_tcond(cpu_env, tmp);
1244     }
1245 
1246     /* Write back the result.  */
1247     save_or_nullify(ctx, cpu_psw_cb, cb);
1248     save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1249     save_gpr(ctx, rt, dest);
1250 
1251     /* Install the new nullification.  */
1252     cond_free(&ctx->null_cond);
1253     ctx->null_cond = cond;
1254 }
1255 
1256 static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf *a,
1257                        bool is_tsv, bool is_b, bool is_tc)
1258 {
1259     TCGv_reg tcg_r1, tcg_r2;
1260 
1261     if (a->cf) {
1262         nullify_over(ctx);
1263     }
1264     tcg_r1 = load_gpr(ctx, a->r1);
1265     tcg_r2 = load_gpr(ctx, a->r2);
1266     do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf);
1267     return nullify_end(ctx);
1268 }
1269 
1270 static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv)
1271 {
1272     TCGv_reg tcg_im, tcg_r2;
1273 
1274     if (a->cf) {
1275         nullify_over(ctx);
1276     }
1277     tcg_im = load_const(ctx, a->i);
1278     tcg_r2 = load_gpr(ctx, a->r);
1279     do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf);
1280     return nullify_end(ctx);
1281 }
1282 
1283 static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1284                       TCGv_reg in2, unsigned cf)
1285 {
1286     TCGv_reg dest, sv;
1287     DisasCond cond;
1288 
1289     dest = tcg_temp_new();
1290     tcg_gen_sub_reg(dest, in1, in2);
1291 
1292     /* Compute signed overflow if required.  */
1293     sv = NULL;
1294     if (cond_need_sv(cf >> 1)) {
1295         sv = do_sub_sv(ctx, dest, in1, in2);
1296     }
1297 
1298     /* Form the condition for the compare.  */
1299     cond = do_sub_cond(cf, dest, in1, in2, sv);
1300 
1301     /* Clear.  */
1302     tcg_gen_movi_reg(dest, 0);
1303     save_gpr(ctx, rt, dest);
1304 
1305     /* Install the new nullification.  */
1306     cond_free(&ctx->null_cond);
1307     ctx->null_cond = cond;
1308 }
1309 
1310 static void do_log(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1311                    TCGv_reg in2, unsigned cf,
1312                    void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1313 {
1314     TCGv_reg dest = dest_gpr(ctx, rt);
1315 
1316     /* Perform the operation, and writeback.  */
1317     fn(dest, in1, in2);
1318     save_gpr(ctx, rt, dest);
1319 
1320     /* Install the new nullification.  */
1321     cond_free(&ctx->null_cond);
1322     if (cf) {
1323         ctx->null_cond = do_log_cond(cf, dest);
1324     }
1325 }
1326 
1327 static bool do_log_reg(DisasContext *ctx, arg_rrr_cf *a,
1328                        void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1329 {
1330     TCGv_reg tcg_r1, tcg_r2;
1331 
1332     if (a->cf) {
1333         nullify_over(ctx);
1334     }
1335     tcg_r1 = load_gpr(ctx, a->r1);
1336     tcg_r2 = load_gpr(ctx, a->r2);
1337     do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, fn);
1338     return nullify_end(ctx);
1339 }
1340 
1341 static void do_unit(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1342                     TCGv_reg in2, unsigned cf, bool is_tc,
1343                     void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1344 {
1345     TCGv_reg dest;
1346     DisasCond cond;
1347 
1348     if (cf == 0) {
1349         dest = dest_gpr(ctx, rt);
1350         fn(dest, in1, in2);
1351         save_gpr(ctx, rt, dest);
1352         cond_free(&ctx->null_cond);
1353     } else {
1354         dest = tcg_temp_new();
1355         fn(dest, in1, in2);
1356 
1357         cond = do_unit_cond(cf, dest, in1, in2);
1358 
1359         if (is_tc) {
1360             TCGv_reg tmp = tcg_temp_new();
1361             tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1362             gen_helper_tcond(cpu_env, tmp);
1363         }
1364         save_gpr(ctx, rt, dest);
1365 
1366         cond_free(&ctx->null_cond);
1367         ctx->null_cond = cond;
1368     }
1369 }
1370 
1371 #ifndef CONFIG_USER_ONLY
1372 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1373    from the top 2 bits of the base register.  There are a few system
1374    instructions that have a 3-bit space specifier, for which SR0 is
1375    not special.  To handle this, pass ~SP.  */
1376 static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_reg base)
1377 {
1378     TCGv_ptr ptr;
1379     TCGv_reg tmp;
1380     TCGv_i64 spc;
1381 
1382     if (sp != 0) {
1383         if (sp < 0) {
1384             sp = ~sp;
1385         }
1386         spc = get_temp_tl(ctx);
1387         load_spr(ctx, spc, sp);
1388         return spc;
1389     }
1390     if (ctx->tb_flags & TB_FLAG_SR_SAME) {
1391         return cpu_srH;
1392     }
1393 
1394     ptr = tcg_temp_new_ptr();
1395     tmp = tcg_temp_new();
1396     spc = get_temp_tl(ctx);
1397 
1398     tcg_gen_shri_reg(tmp, base, TARGET_REGISTER_BITS - 5);
1399     tcg_gen_andi_reg(tmp, tmp, 030);
1400     tcg_gen_trunc_reg_ptr(ptr, tmp);
1401 
1402     tcg_gen_add_ptr(ptr, ptr, cpu_env);
1403     tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
1404 
1405     return spc;
1406 }
1407 #endif
1408 
1409 static void form_gva(DisasContext *ctx, TCGv_tl *pgva, TCGv_reg *pofs,
1410                      unsigned rb, unsigned rx, int scale, target_sreg disp,
1411                      unsigned sp, int modify, bool is_phys)
1412 {
1413     TCGv_reg base = load_gpr(ctx, rb);
1414     TCGv_reg ofs;
1415 
1416     /* Note that RX is mutually exclusive with DISP.  */
1417     if (rx) {
1418         ofs = get_temp(ctx);
1419         tcg_gen_shli_reg(ofs, cpu_gr[rx], scale);
1420         tcg_gen_add_reg(ofs, ofs, base);
1421     } else if (disp || modify) {
1422         ofs = get_temp(ctx);
1423         tcg_gen_addi_reg(ofs, base, disp);
1424     } else {
1425         ofs = base;
1426     }
1427 
1428     *pofs = ofs;
1429 #ifdef CONFIG_USER_ONLY
1430     *pgva = (modify <= 0 ? ofs : base);
1431 #else
1432     TCGv_tl addr = get_temp_tl(ctx);
1433     tcg_gen_extu_reg_tl(addr, modify <= 0 ? ofs : base);
1434     if (ctx->tb_flags & PSW_W) {
1435         tcg_gen_andi_tl(addr, addr, 0x3fffffffffffffffull);
1436     }
1437     if (!is_phys) {
1438         tcg_gen_or_tl(addr, addr, space_select(ctx, sp, base));
1439     }
1440     *pgva = addr;
1441 #endif
1442 }
1443 
1444 /* Emit a memory load.  The modify parameter should be
1445  * < 0 for pre-modify,
1446  * > 0 for post-modify,
1447  * = 0 for no base register update.
1448  */
1449 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
1450                        unsigned rx, int scale, target_sreg disp,
1451                        unsigned sp, int modify, MemOp mop)
1452 {
1453     TCGv_reg ofs;
1454     TCGv_tl addr;
1455 
1456     /* Caller uses nullify_over/nullify_end.  */
1457     assert(ctx->null_cond.c == TCG_COND_NEVER);
1458 
1459     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1460              ctx->mmu_idx == MMU_PHYS_IDX);
1461     tcg_gen_qemu_ld_reg(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1462     if (modify) {
1463         save_gpr(ctx, rb, ofs);
1464     }
1465 }
1466 
1467 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1468                        unsigned rx, int scale, target_sreg disp,
1469                        unsigned sp, int modify, MemOp mop)
1470 {
1471     TCGv_reg ofs;
1472     TCGv_tl addr;
1473 
1474     /* Caller uses nullify_over/nullify_end.  */
1475     assert(ctx->null_cond.c == TCG_COND_NEVER);
1476 
1477     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1478              ctx->mmu_idx == MMU_PHYS_IDX);
1479     tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1480     if (modify) {
1481         save_gpr(ctx, rb, ofs);
1482     }
1483 }
1484 
1485 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1486                         unsigned rx, int scale, target_sreg disp,
1487                         unsigned sp, int modify, MemOp mop)
1488 {
1489     TCGv_reg ofs;
1490     TCGv_tl addr;
1491 
1492     /* Caller uses nullify_over/nullify_end.  */
1493     assert(ctx->null_cond.c == TCG_COND_NEVER);
1494 
1495     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1496              ctx->mmu_idx == MMU_PHYS_IDX);
1497     tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1498     if (modify) {
1499         save_gpr(ctx, rb, ofs);
1500     }
1501 }
1502 
1503 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1504                         unsigned rx, int scale, target_sreg disp,
1505                         unsigned sp, int modify, MemOp mop)
1506 {
1507     TCGv_reg ofs;
1508     TCGv_tl addr;
1509 
1510     /* Caller uses nullify_over/nullify_end.  */
1511     assert(ctx->null_cond.c == TCG_COND_NEVER);
1512 
1513     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1514              ctx->mmu_idx == MMU_PHYS_IDX);
1515     tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1516     if (modify) {
1517         save_gpr(ctx, rb, ofs);
1518     }
1519 }
1520 
1521 #if TARGET_REGISTER_BITS == 64
1522 #define do_load_reg   do_load_64
1523 #define do_store_reg  do_store_64
1524 #else
1525 #define do_load_reg   do_load_32
1526 #define do_store_reg  do_store_32
1527 #endif
1528 
1529 static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1530                     unsigned rx, int scale, target_sreg disp,
1531                     unsigned sp, int modify, MemOp mop)
1532 {
1533     TCGv_reg dest;
1534 
1535     nullify_over(ctx);
1536 
1537     if (modify == 0) {
1538         /* No base register update.  */
1539         dest = dest_gpr(ctx, rt);
1540     } else {
1541         /* Make sure if RT == RB, we see the result of the load.  */
1542         dest = get_temp(ctx);
1543     }
1544     do_load_reg(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
1545     save_gpr(ctx, rt, dest);
1546 
1547     return nullify_end(ctx);
1548 }
1549 
1550 static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1551                       unsigned rx, int scale, target_sreg disp,
1552                       unsigned sp, int modify)
1553 {
1554     TCGv_i32 tmp;
1555 
1556     nullify_over(ctx);
1557 
1558     tmp = tcg_temp_new_i32();
1559     do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1560     save_frw_i32(rt, tmp);
1561 
1562     if (rt == 0) {
1563         gen_helper_loaded_fr0(cpu_env);
1564     }
1565 
1566     return nullify_end(ctx);
1567 }
1568 
1569 static bool trans_fldw(DisasContext *ctx, arg_ldst *a)
1570 {
1571     return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1572                      a->disp, a->sp, a->m);
1573 }
1574 
1575 static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1576                       unsigned rx, int scale, target_sreg disp,
1577                       unsigned sp, int modify)
1578 {
1579     TCGv_i64 tmp;
1580 
1581     nullify_over(ctx);
1582 
1583     tmp = tcg_temp_new_i64();
1584     do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1585     save_frd(rt, tmp);
1586 
1587     if (rt == 0) {
1588         gen_helper_loaded_fr0(cpu_env);
1589     }
1590 
1591     return nullify_end(ctx);
1592 }
1593 
1594 static bool trans_fldd(DisasContext *ctx, arg_ldst *a)
1595 {
1596     return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1597                      a->disp, a->sp, a->m);
1598 }
1599 
1600 static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1601                      target_sreg disp, unsigned sp,
1602                      int modify, MemOp mop)
1603 {
1604     nullify_over(ctx);
1605     do_store_reg(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
1606     return nullify_end(ctx);
1607 }
1608 
1609 static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1610                        unsigned rx, int scale, target_sreg disp,
1611                        unsigned sp, int modify)
1612 {
1613     TCGv_i32 tmp;
1614 
1615     nullify_over(ctx);
1616 
1617     tmp = load_frw_i32(rt);
1618     do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1619 
1620     return nullify_end(ctx);
1621 }
1622 
1623 static bool trans_fstw(DisasContext *ctx, arg_ldst *a)
1624 {
1625     return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1626                       a->disp, a->sp, a->m);
1627 }
1628 
1629 static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1630                        unsigned rx, int scale, target_sreg disp,
1631                        unsigned sp, int modify)
1632 {
1633     TCGv_i64 tmp;
1634 
1635     nullify_over(ctx);
1636 
1637     tmp = load_frd(rt);
1638     do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1639 
1640     return nullify_end(ctx);
1641 }
1642 
1643 static bool trans_fstd(DisasContext *ctx, arg_ldst *a)
1644 {
1645     return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1646                       a->disp, a->sp, a->m);
1647 }
1648 
1649 static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
1650                        void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
1651 {
1652     TCGv_i32 tmp;
1653 
1654     nullify_over(ctx);
1655     tmp = load_frw0_i32(ra);
1656 
1657     func(tmp, cpu_env, tmp);
1658 
1659     save_frw_i32(rt, tmp);
1660     return nullify_end(ctx);
1661 }
1662 
1663 static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
1664                        void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
1665 {
1666     TCGv_i32 dst;
1667     TCGv_i64 src;
1668 
1669     nullify_over(ctx);
1670     src = load_frd(ra);
1671     dst = tcg_temp_new_i32();
1672 
1673     func(dst, cpu_env, src);
1674 
1675     save_frw_i32(rt, dst);
1676     return nullify_end(ctx);
1677 }
1678 
1679 static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
1680                        void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
1681 {
1682     TCGv_i64 tmp;
1683 
1684     nullify_over(ctx);
1685     tmp = load_frd0(ra);
1686 
1687     func(tmp, cpu_env, tmp);
1688 
1689     save_frd(rt, tmp);
1690     return nullify_end(ctx);
1691 }
1692 
1693 static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
1694                        void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
1695 {
1696     TCGv_i32 src;
1697     TCGv_i64 dst;
1698 
1699     nullify_over(ctx);
1700     src = load_frw0_i32(ra);
1701     dst = tcg_temp_new_i64();
1702 
1703     func(dst, cpu_env, src);
1704 
1705     save_frd(rt, dst);
1706     return nullify_end(ctx);
1707 }
1708 
1709 static bool do_fop_weww(DisasContext *ctx, unsigned rt,
1710                         unsigned ra, unsigned rb,
1711                         void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
1712 {
1713     TCGv_i32 a, b;
1714 
1715     nullify_over(ctx);
1716     a = load_frw0_i32(ra);
1717     b = load_frw0_i32(rb);
1718 
1719     func(a, cpu_env, a, b);
1720 
1721     save_frw_i32(rt, a);
1722     return nullify_end(ctx);
1723 }
1724 
1725 static bool do_fop_dedd(DisasContext *ctx, unsigned rt,
1726                         unsigned ra, unsigned rb,
1727                         void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
1728 {
1729     TCGv_i64 a, b;
1730 
1731     nullify_over(ctx);
1732     a = load_frd0(ra);
1733     b = load_frd0(rb);
1734 
1735     func(a, cpu_env, a, b);
1736 
1737     save_frd(rt, a);
1738     return nullify_end(ctx);
1739 }
1740 
1741 /* Emit an unconditional branch to a direct target, which may or may not
1742    have already had nullification handled.  */
1743 static bool do_dbranch(DisasContext *ctx, target_ureg dest,
1744                        unsigned link, bool is_n)
1745 {
1746     if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1747         if (link != 0) {
1748             copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1749         }
1750         ctx->iaoq_n = dest;
1751         if (is_n) {
1752             ctx->null_cond.c = TCG_COND_ALWAYS;
1753         }
1754     } else {
1755         nullify_over(ctx);
1756 
1757         if (link != 0) {
1758             copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1759         }
1760 
1761         if (is_n && use_nullify_skip(ctx)) {
1762             nullify_set(ctx, 0);
1763             gen_goto_tb(ctx, 0, dest, dest + 4);
1764         } else {
1765             nullify_set(ctx, is_n);
1766             gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1767         }
1768 
1769         nullify_end(ctx);
1770 
1771         nullify_set(ctx, 0);
1772         gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
1773         ctx->base.is_jmp = DISAS_NORETURN;
1774     }
1775     return true;
1776 }
1777 
1778 /* Emit a conditional branch to a direct target.  If the branch itself
1779    is nullified, we should have already used nullify_over.  */
1780 static bool do_cbranch(DisasContext *ctx, target_sreg disp, bool is_n,
1781                        DisasCond *cond)
1782 {
1783     target_ureg dest = iaoq_dest(ctx, disp);
1784     TCGLabel *taken = NULL;
1785     TCGCond c = cond->c;
1786     bool n;
1787 
1788     assert(ctx->null_cond.c == TCG_COND_NEVER);
1789 
1790     /* Handle TRUE and NEVER as direct branches.  */
1791     if (c == TCG_COND_ALWAYS) {
1792         return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
1793     }
1794     if (c == TCG_COND_NEVER) {
1795         return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
1796     }
1797 
1798     taken = gen_new_label();
1799     tcg_gen_brcond_reg(c, cond->a0, cond->a1, taken);
1800     cond_free(cond);
1801 
1802     /* Not taken: Condition not satisfied; nullify on backward branches. */
1803     n = is_n && disp < 0;
1804     if (n && use_nullify_skip(ctx)) {
1805         nullify_set(ctx, 0);
1806         gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
1807     } else {
1808         if (!n && ctx->null_lab) {
1809             gen_set_label(ctx->null_lab);
1810             ctx->null_lab = NULL;
1811         }
1812         nullify_set(ctx, n);
1813         if (ctx->iaoq_n == -1) {
1814             /* The temporary iaoq_n_var died at the branch above.
1815                Regenerate it here instead of saving it.  */
1816             tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
1817         }
1818         gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
1819     }
1820 
1821     gen_set_label(taken);
1822 
1823     /* Taken: Condition satisfied; nullify on forward branches.  */
1824     n = is_n && disp >= 0;
1825     if (n && use_nullify_skip(ctx)) {
1826         nullify_set(ctx, 0);
1827         gen_goto_tb(ctx, 1, dest, dest + 4);
1828     } else {
1829         nullify_set(ctx, n);
1830         gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
1831     }
1832 
1833     /* Not taken: the branch itself was nullified.  */
1834     if (ctx->null_lab) {
1835         gen_set_label(ctx->null_lab);
1836         ctx->null_lab = NULL;
1837         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
1838     } else {
1839         ctx->base.is_jmp = DISAS_NORETURN;
1840     }
1841     return true;
1842 }
1843 
1844 /* Emit an unconditional branch to an indirect target.  This handles
1845    nullification of the branch itself.  */
1846 static bool do_ibranch(DisasContext *ctx, TCGv_reg dest,
1847                        unsigned link, bool is_n)
1848 {
1849     TCGv_reg a0, a1, next, tmp;
1850     TCGCond c;
1851 
1852     assert(ctx->null_lab == NULL);
1853 
1854     if (ctx->null_cond.c == TCG_COND_NEVER) {
1855         if (link != 0) {
1856             copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1857         }
1858         next = get_temp(ctx);
1859         tcg_gen_mov_reg(next, dest);
1860         if (is_n) {
1861             if (use_nullify_skip(ctx)) {
1862                 tcg_gen_mov_reg(cpu_iaoq_f, next);
1863                 tcg_gen_addi_reg(cpu_iaoq_b, next, 4);
1864                 nullify_set(ctx, 0);
1865                 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1866                 return true;
1867             }
1868             ctx->null_cond.c = TCG_COND_ALWAYS;
1869         }
1870         ctx->iaoq_n = -1;
1871         ctx->iaoq_n_var = next;
1872     } else if (is_n && use_nullify_skip(ctx)) {
1873         /* The (conditional) branch, B, nullifies the next insn, N,
1874            and we're allowed to skip execution N (no single-step or
1875            tracepoint in effect).  Since the goto_ptr that we must use
1876            for the indirect branch consumes no special resources, we
1877            can (conditionally) skip B and continue execution.  */
1878         /* The use_nullify_skip test implies we have a known control path.  */
1879         tcg_debug_assert(ctx->iaoq_b != -1);
1880         tcg_debug_assert(ctx->iaoq_n != -1);
1881 
1882         /* We do have to handle the non-local temporary, DEST, before
1883            branching.  Since IOAQ_F is not really live at this point, we
1884            can simply store DEST optimistically.  Similarly with IAOQ_B.  */
1885         tcg_gen_mov_reg(cpu_iaoq_f, dest);
1886         tcg_gen_addi_reg(cpu_iaoq_b, dest, 4);
1887 
1888         nullify_over(ctx);
1889         if (link != 0) {
1890             tcg_gen_movi_reg(cpu_gr[link], ctx->iaoq_n);
1891         }
1892         tcg_gen_lookup_and_goto_ptr();
1893         return nullify_end(ctx);
1894     } else {
1895         c = ctx->null_cond.c;
1896         a0 = ctx->null_cond.a0;
1897         a1 = ctx->null_cond.a1;
1898 
1899         tmp = tcg_temp_new();
1900         next = get_temp(ctx);
1901 
1902         copy_iaoq_entry(tmp, ctx->iaoq_n, ctx->iaoq_n_var);
1903         tcg_gen_movcond_reg(c, next, a0, a1, tmp, dest);
1904         ctx->iaoq_n = -1;
1905         ctx->iaoq_n_var = next;
1906 
1907         if (link != 0) {
1908             tcg_gen_movcond_reg(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
1909         }
1910 
1911         if (is_n) {
1912             /* The branch nullifies the next insn, which means the state of N
1913                after the branch is the inverse of the state of N that applied
1914                to the branch.  */
1915             tcg_gen_setcond_reg(tcg_invert_cond(c), cpu_psw_n, a0, a1);
1916             cond_free(&ctx->null_cond);
1917             ctx->null_cond = cond_make_n();
1918             ctx->psw_n_nonzero = true;
1919         } else {
1920             cond_free(&ctx->null_cond);
1921         }
1922     }
1923     return true;
1924 }
1925 
1926 /* Implement
1927  *    if (IAOQ_Front{30..31} < GR[b]{30..31})
1928  *      IAOQ_Next{30..31} ← GR[b]{30..31};
1929  *    else
1930  *      IAOQ_Next{30..31} ← IAOQ_Front{30..31};
1931  * which keeps the privilege level from being increased.
1932  */
1933 static TCGv_reg do_ibranch_priv(DisasContext *ctx, TCGv_reg offset)
1934 {
1935     TCGv_reg dest;
1936     switch (ctx->privilege) {
1937     case 0:
1938         /* Privilege 0 is maximum and is allowed to decrease.  */
1939         return offset;
1940     case 3:
1941         /* Privilege 3 is minimum and is never allowed to increase.  */
1942         dest = get_temp(ctx);
1943         tcg_gen_ori_reg(dest, offset, 3);
1944         break;
1945     default:
1946         dest = get_temp(ctx);
1947         tcg_gen_andi_reg(dest, offset, -4);
1948         tcg_gen_ori_reg(dest, dest, ctx->privilege);
1949         tcg_gen_movcond_reg(TCG_COND_GTU, dest, dest, offset, dest, offset);
1950         break;
1951     }
1952     return dest;
1953 }
1954 
1955 #ifdef CONFIG_USER_ONLY
1956 /* On Linux, page zero is normally marked execute only + gateway.
1957    Therefore normal read or write is supposed to fail, but specific
1958    offsets have kernel code mapped to raise permissions to implement
1959    system calls.  Handling this via an explicit check here, rather
1960    in than the "be disp(sr2,r0)" instruction that probably sent us
1961    here, is the easiest way to handle the branch delay slot on the
1962    aforementioned BE.  */
1963 static void do_page_zero(DisasContext *ctx)
1964 {
1965     /* If by some means we get here with PSW[N]=1, that implies that
1966        the B,GATE instruction would be skipped, and we'd fault on the
1967        next insn within the privileged page.  */
1968     switch (ctx->null_cond.c) {
1969     case TCG_COND_NEVER:
1970         break;
1971     case TCG_COND_ALWAYS:
1972         tcg_gen_movi_reg(cpu_psw_n, 0);
1973         goto do_sigill;
1974     default:
1975         /* Since this is always the first (and only) insn within the
1976            TB, we should know the state of PSW[N] from TB->FLAGS.  */
1977         g_assert_not_reached();
1978     }
1979 
1980     /* Check that we didn't arrive here via some means that allowed
1981        non-sequential instruction execution.  Normally the PSW[B] bit
1982        detects this by disallowing the B,GATE instruction to execute
1983        under such conditions.  */
1984     if (ctx->iaoq_b != ctx->iaoq_f + 4) {
1985         goto do_sigill;
1986     }
1987 
1988     switch (ctx->iaoq_f & -4) {
1989     case 0x00: /* Null pointer call */
1990         gen_excp_1(EXCP_IMP);
1991         ctx->base.is_jmp = DISAS_NORETURN;
1992         break;
1993 
1994     case 0xb0: /* LWS */
1995         gen_excp_1(EXCP_SYSCALL_LWS);
1996         ctx->base.is_jmp = DISAS_NORETURN;
1997         break;
1998 
1999     case 0xe0: /* SET_THREAD_POINTER */
2000         tcg_gen_st_reg(cpu_gr[26], cpu_env, offsetof(CPUHPPAState, cr[27]));
2001         tcg_gen_ori_reg(cpu_iaoq_f, cpu_gr[31], 3);
2002         tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
2003         ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
2004         break;
2005 
2006     case 0x100: /* SYSCALL */
2007         gen_excp_1(EXCP_SYSCALL);
2008         ctx->base.is_jmp = DISAS_NORETURN;
2009         break;
2010 
2011     default:
2012     do_sigill:
2013         gen_excp_1(EXCP_ILL);
2014         ctx->base.is_jmp = DISAS_NORETURN;
2015         break;
2016     }
2017 }
2018 #endif
2019 
2020 static bool trans_nop(DisasContext *ctx, arg_nop *a)
2021 {
2022     cond_free(&ctx->null_cond);
2023     return true;
2024 }
2025 
2026 static bool trans_break(DisasContext *ctx, arg_break *a)
2027 {
2028     return gen_excp_iir(ctx, EXCP_BREAK);
2029 }
2030 
2031 static bool trans_sync(DisasContext *ctx, arg_sync *a)
2032 {
2033     /* No point in nullifying the memory barrier.  */
2034     tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
2035 
2036     cond_free(&ctx->null_cond);
2037     return true;
2038 }
2039 
2040 static bool trans_mfia(DisasContext *ctx, arg_mfia *a)
2041 {
2042     unsigned rt = a->t;
2043     TCGv_reg tmp = dest_gpr(ctx, rt);
2044     tcg_gen_movi_reg(tmp, ctx->iaoq_f);
2045     save_gpr(ctx, rt, tmp);
2046 
2047     cond_free(&ctx->null_cond);
2048     return true;
2049 }
2050 
2051 static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a)
2052 {
2053     unsigned rt = a->t;
2054     unsigned rs = a->sp;
2055     TCGv_i64 t0 = tcg_temp_new_i64();
2056     TCGv_reg t1 = tcg_temp_new();
2057 
2058     load_spr(ctx, t0, rs);
2059     tcg_gen_shri_i64(t0, t0, 32);
2060     tcg_gen_trunc_i64_reg(t1, t0);
2061 
2062     save_gpr(ctx, rt, t1);
2063 
2064     cond_free(&ctx->null_cond);
2065     return true;
2066 }
2067 
2068 static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
2069 {
2070     unsigned rt = a->t;
2071     unsigned ctl = a->r;
2072     TCGv_reg tmp;
2073 
2074     switch (ctl) {
2075     case CR_SAR:
2076 #ifdef TARGET_HPPA64
2077         if (a->e == 0) {
2078             /* MFSAR without ,W masks low 5 bits.  */
2079             tmp = dest_gpr(ctx, rt);
2080             tcg_gen_andi_reg(tmp, cpu_sar, 31);
2081             save_gpr(ctx, rt, tmp);
2082             goto done;
2083         }
2084 #endif
2085         save_gpr(ctx, rt, cpu_sar);
2086         goto done;
2087     case CR_IT: /* Interval Timer */
2088         /* FIXME: Respect PSW_S bit.  */
2089         nullify_over(ctx);
2090         tmp = dest_gpr(ctx, rt);
2091         if (translator_io_start(&ctx->base)) {
2092             gen_helper_read_interval_timer(tmp);
2093             ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2094         } else {
2095             gen_helper_read_interval_timer(tmp);
2096         }
2097         save_gpr(ctx, rt, tmp);
2098         return nullify_end(ctx);
2099     case 26:
2100     case 27:
2101         break;
2102     default:
2103         /* All other control registers are privileged.  */
2104         CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2105         break;
2106     }
2107 
2108     tmp = get_temp(ctx);
2109     tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2110     save_gpr(ctx, rt, tmp);
2111 
2112  done:
2113     cond_free(&ctx->null_cond);
2114     return true;
2115 }
2116 
2117 static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a)
2118 {
2119     unsigned rr = a->r;
2120     unsigned rs = a->sp;
2121     TCGv_i64 t64;
2122 
2123     if (rs >= 5) {
2124         CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2125     }
2126     nullify_over(ctx);
2127 
2128     t64 = tcg_temp_new_i64();
2129     tcg_gen_extu_reg_i64(t64, load_gpr(ctx, rr));
2130     tcg_gen_shli_i64(t64, t64, 32);
2131 
2132     if (rs >= 4) {
2133         tcg_gen_st_i64(t64, cpu_env, offsetof(CPUHPPAState, sr[rs]));
2134         ctx->tb_flags &= ~TB_FLAG_SR_SAME;
2135     } else {
2136         tcg_gen_mov_i64(cpu_sr[rs], t64);
2137     }
2138 
2139     return nullify_end(ctx);
2140 }
2141 
2142 static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
2143 {
2144     unsigned ctl = a->t;
2145     TCGv_reg reg;
2146     TCGv_reg tmp;
2147 
2148     if (ctl == CR_SAR) {
2149         reg = load_gpr(ctx, a->r);
2150         tmp = tcg_temp_new();
2151         tcg_gen_andi_reg(tmp, reg, TARGET_REGISTER_BITS - 1);
2152         save_or_nullify(ctx, cpu_sar, tmp);
2153 
2154         cond_free(&ctx->null_cond);
2155         return true;
2156     }
2157 
2158     /* All other control registers are privileged or read-only.  */
2159     CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2160 
2161 #ifndef CONFIG_USER_ONLY
2162     nullify_over(ctx);
2163     reg = load_gpr(ctx, a->r);
2164 
2165     switch (ctl) {
2166     case CR_IT:
2167         gen_helper_write_interval_timer(cpu_env, reg);
2168         break;
2169     case CR_EIRR:
2170         gen_helper_write_eirr(cpu_env, reg);
2171         break;
2172     case CR_EIEM:
2173         gen_helper_write_eiem(cpu_env, reg);
2174         ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2175         break;
2176 
2177     case CR_IIASQ:
2178     case CR_IIAOQ:
2179         /* FIXME: Respect PSW_Q bit */
2180         /* The write advances the queue and stores to the back element.  */
2181         tmp = get_temp(ctx);
2182         tcg_gen_ld_reg(tmp, cpu_env,
2183                        offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2184         tcg_gen_st_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2185         tcg_gen_st_reg(reg, cpu_env,
2186                        offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2187         break;
2188 
2189     case CR_PID1:
2190     case CR_PID2:
2191     case CR_PID3:
2192     case CR_PID4:
2193         tcg_gen_st_reg(reg, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2194 #ifndef CONFIG_USER_ONLY
2195         gen_helper_change_prot_id(cpu_env);
2196 #endif
2197         break;
2198 
2199     default:
2200         tcg_gen_st_reg(reg, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2201         break;
2202     }
2203     return nullify_end(ctx);
2204 #endif
2205 }
2206 
2207 static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a)
2208 {
2209     TCGv_reg tmp = tcg_temp_new();
2210 
2211     tcg_gen_not_reg(tmp, load_gpr(ctx, a->r));
2212     tcg_gen_andi_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
2213     save_or_nullify(ctx, cpu_sar, tmp);
2214 
2215     cond_free(&ctx->null_cond);
2216     return true;
2217 }
2218 
2219 static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a)
2220 {
2221     TCGv_reg dest = dest_gpr(ctx, a->t);
2222 
2223 #ifdef CONFIG_USER_ONLY
2224     /* We don't implement space registers in user mode. */
2225     tcg_gen_movi_reg(dest, 0);
2226 #else
2227     TCGv_i64 t0 = tcg_temp_new_i64();
2228 
2229     tcg_gen_mov_i64(t0, space_select(ctx, a->sp, load_gpr(ctx, a->b)));
2230     tcg_gen_shri_i64(t0, t0, 32);
2231     tcg_gen_trunc_i64_reg(dest, t0);
2232 #endif
2233     save_gpr(ctx, a->t, dest);
2234 
2235     cond_free(&ctx->null_cond);
2236     return true;
2237 }
2238 
2239 static bool trans_rsm(DisasContext *ctx, arg_rsm *a)
2240 {
2241     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2242 #ifndef CONFIG_USER_ONLY
2243     TCGv_reg tmp;
2244 
2245     nullify_over(ctx);
2246 
2247     tmp = get_temp(ctx);
2248     tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw));
2249     tcg_gen_andi_reg(tmp, tmp, ~a->i);
2250     gen_helper_swap_system_mask(tmp, cpu_env, tmp);
2251     save_gpr(ctx, a->t, tmp);
2252 
2253     /* Exit the TB to recognize new interrupts, e.g. PSW_M.  */
2254     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2255     return nullify_end(ctx);
2256 #endif
2257 }
2258 
2259 static bool trans_ssm(DisasContext *ctx, arg_ssm *a)
2260 {
2261     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2262 #ifndef CONFIG_USER_ONLY
2263     TCGv_reg tmp;
2264 
2265     nullify_over(ctx);
2266 
2267     tmp = get_temp(ctx);
2268     tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw));
2269     tcg_gen_ori_reg(tmp, tmp, a->i);
2270     gen_helper_swap_system_mask(tmp, cpu_env, tmp);
2271     save_gpr(ctx, a->t, tmp);
2272 
2273     /* Exit the TB to recognize new interrupts, e.g. PSW_I.  */
2274     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2275     return nullify_end(ctx);
2276 #endif
2277 }
2278 
2279 static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a)
2280 {
2281     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2282 #ifndef CONFIG_USER_ONLY
2283     TCGv_reg tmp, reg;
2284     nullify_over(ctx);
2285 
2286     reg = load_gpr(ctx, a->r);
2287     tmp = get_temp(ctx);
2288     gen_helper_swap_system_mask(tmp, cpu_env, reg);
2289 
2290     /* Exit the TB to recognize new interrupts.  */
2291     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2292     return nullify_end(ctx);
2293 #endif
2294 }
2295 
2296 static bool do_rfi(DisasContext *ctx, bool rfi_r)
2297 {
2298     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2299 #ifndef CONFIG_USER_ONLY
2300     nullify_over(ctx);
2301 
2302     if (rfi_r) {
2303         gen_helper_rfi_r(cpu_env);
2304     } else {
2305         gen_helper_rfi(cpu_env);
2306     }
2307     /* Exit the TB to recognize new interrupts.  */
2308     tcg_gen_exit_tb(NULL, 0);
2309     ctx->base.is_jmp = DISAS_NORETURN;
2310 
2311     return nullify_end(ctx);
2312 #endif
2313 }
2314 
2315 static bool trans_rfi(DisasContext *ctx, arg_rfi *a)
2316 {
2317     return do_rfi(ctx, false);
2318 }
2319 
2320 static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a)
2321 {
2322     return do_rfi(ctx, true);
2323 }
2324 
2325 static bool trans_halt(DisasContext *ctx, arg_halt *a)
2326 {
2327     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2328 #ifndef CONFIG_USER_ONLY
2329     nullify_over(ctx);
2330     gen_helper_halt(cpu_env);
2331     ctx->base.is_jmp = DISAS_NORETURN;
2332     return nullify_end(ctx);
2333 #endif
2334 }
2335 
2336 static bool trans_reset(DisasContext *ctx, arg_reset *a)
2337 {
2338     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2339 #ifndef CONFIG_USER_ONLY
2340     nullify_over(ctx);
2341     gen_helper_reset(cpu_env);
2342     ctx->base.is_jmp = DISAS_NORETURN;
2343     return nullify_end(ctx);
2344 #endif
2345 }
2346 
2347 static bool trans_getshadowregs(DisasContext *ctx, arg_getshadowregs *a)
2348 {
2349     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2350 #ifndef CONFIG_USER_ONLY
2351     nullify_over(ctx);
2352     gen_helper_getshadowregs(cpu_env);
2353     return nullify_end(ctx);
2354 #endif
2355 }
2356 
2357 static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a)
2358 {
2359     if (a->m) {
2360         TCGv_reg dest = dest_gpr(ctx, a->b);
2361         TCGv_reg src1 = load_gpr(ctx, a->b);
2362         TCGv_reg src2 = load_gpr(ctx, a->x);
2363 
2364         /* The only thing we need to do is the base register modification.  */
2365         tcg_gen_add_reg(dest, src1, src2);
2366         save_gpr(ctx, a->b, dest);
2367     }
2368     cond_free(&ctx->null_cond);
2369     return true;
2370 }
2371 
2372 static bool trans_probe(DisasContext *ctx, arg_probe *a)
2373 {
2374     TCGv_reg dest, ofs;
2375     TCGv_i32 level, want;
2376     TCGv_tl addr;
2377 
2378     nullify_over(ctx);
2379 
2380     dest = dest_gpr(ctx, a->t);
2381     form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2382 
2383     if (a->imm) {
2384         level = tcg_constant_i32(a->ri);
2385     } else {
2386         level = tcg_temp_new_i32();
2387         tcg_gen_trunc_reg_i32(level, load_gpr(ctx, a->ri));
2388         tcg_gen_andi_i32(level, level, 3);
2389     }
2390     want = tcg_constant_i32(a->write ? PAGE_WRITE : PAGE_READ);
2391 
2392     gen_helper_probe(dest, cpu_env, addr, level, want);
2393 
2394     save_gpr(ctx, a->t, dest);
2395     return nullify_end(ctx);
2396 }
2397 
2398 static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a)
2399 {
2400     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2401 #ifndef CONFIG_USER_ONLY
2402     TCGv_tl addr;
2403     TCGv_reg ofs, reg;
2404 
2405     nullify_over(ctx);
2406 
2407     form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2408     reg = load_gpr(ctx, a->r);
2409     if (a->addr) {
2410         gen_helper_itlba(cpu_env, addr, reg);
2411     } else {
2412         gen_helper_itlbp(cpu_env, addr, reg);
2413     }
2414 
2415     /* Exit TB for TLB change if mmu is enabled.  */
2416     if (ctx->tb_flags & PSW_C) {
2417         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2418     }
2419     return nullify_end(ctx);
2420 #endif
2421 }
2422 
2423 static bool trans_pxtlbx(DisasContext *ctx, arg_pxtlbx *a)
2424 {
2425     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2426 #ifndef CONFIG_USER_ONLY
2427     TCGv_tl addr;
2428     TCGv_reg ofs;
2429 
2430     nullify_over(ctx);
2431 
2432     form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2433     if (a->m) {
2434         save_gpr(ctx, a->b, ofs);
2435     }
2436     if (a->local) {
2437         gen_helper_ptlbe(cpu_env);
2438     } else {
2439         gen_helper_ptlb(cpu_env, addr);
2440     }
2441 
2442     /* Exit TB for TLB change if mmu is enabled.  */
2443     if (ctx->tb_flags & PSW_C) {
2444         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2445     }
2446     return nullify_end(ctx);
2447 #endif
2448 }
2449 
2450 /*
2451  * Implement the pcxl and pcxl2 Fast TLB Insert instructions.
2452  * See
2453  *     https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf
2454  *     page 13-9 (195/206)
2455  */
2456 static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a)
2457 {
2458     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2459 #ifndef CONFIG_USER_ONLY
2460     TCGv_tl addr, atl, stl;
2461     TCGv_reg reg;
2462 
2463     nullify_over(ctx);
2464 
2465     /*
2466      * FIXME:
2467      *  if (not (pcxl or pcxl2))
2468      *    return gen_illegal(ctx);
2469      *
2470      * Note for future: these are 32-bit systems; no hppa64.
2471      */
2472 
2473     atl = tcg_temp_new_tl();
2474     stl = tcg_temp_new_tl();
2475     addr = tcg_temp_new_tl();
2476 
2477     tcg_gen_ld32u_i64(stl, cpu_env,
2478                       a->data ? offsetof(CPUHPPAState, cr[CR_ISR])
2479                       : offsetof(CPUHPPAState, cr[CR_IIASQ]));
2480     tcg_gen_ld32u_i64(atl, cpu_env,
2481                       a->data ? offsetof(CPUHPPAState, cr[CR_IOR])
2482                       : offsetof(CPUHPPAState, cr[CR_IIAOQ]));
2483     tcg_gen_shli_i64(stl, stl, 32);
2484     tcg_gen_or_tl(addr, atl, stl);
2485 
2486     reg = load_gpr(ctx, a->r);
2487     if (a->addr) {
2488         gen_helper_itlba(cpu_env, addr, reg);
2489     } else {
2490         gen_helper_itlbp(cpu_env, addr, reg);
2491     }
2492 
2493     /* Exit TB for TLB change if mmu is enabled.  */
2494     if (ctx->tb_flags & PSW_C) {
2495         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2496     }
2497     return nullify_end(ctx);
2498 #endif
2499 }
2500 
2501 static bool trans_lpa(DisasContext *ctx, arg_ldst *a)
2502 {
2503     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2504 #ifndef CONFIG_USER_ONLY
2505     TCGv_tl vaddr;
2506     TCGv_reg ofs, paddr;
2507 
2508     nullify_over(ctx);
2509 
2510     form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2511 
2512     paddr = tcg_temp_new();
2513     gen_helper_lpa(paddr, cpu_env, vaddr);
2514 
2515     /* Note that physical address result overrides base modification.  */
2516     if (a->m) {
2517         save_gpr(ctx, a->b, ofs);
2518     }
2519     save_gpr(ctx, a->t, paddr);
2520 
2521     return nullify_end(ctx);
2522 #endif
2523 }
2524 
2525 static bool trans_lci(DisasContext *ctx, arg_lci *a)
2526 {
2527     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2528 
2529     /* The Coherence Index is an implementation-defined function of the
2530        physical address.  Two addresses with the same CI have a coherent
2531        view of the cache.  Our implementation is to return 0 for all,
2532        since the entire address space is coherent.  */
2533     save_gpr(ctx, a->t, tcg_constant_reg(0));
2534 
2535     cond_free(&ctx->null_cond);
2536     return true;
2537 }
2538 
2539 static bool trans_add(DisasContext *ctx, arg_rrr_cf_sh *a)
2540 {
2541     return do_add_reg(ctx, a, false, false, false, false);
2542 }
2543 
2544 static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_sh *a)
2545 {
2546     return do_add_reg(ctx, a, true, false, false, false);
2547 }
2548 
2549 static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_sh *a)
2550 {
2551     return do_add_reg(ctx, a, false, true, false, false);
2552 }
2553 
2554 static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_sh *a)
2555 {
2556     return do_add_reg(ctx, a, false, false, false, true);
2557 }
2558 
2559 static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_sh *a)
2560 {
2561     return do_add_reg(ctx, a, false, true, false, true);
2562 }
2563 
2564 static bool trans_sub(DisasContext *ctx, arg_rrr_cf *a)
2565 {
2566     return do_sub_reg(ctx, a, false, false, false);
2567 }
2568 
2569 static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf *a)
2570 {
2571     return do_sub_reg(ctx, a, true, false, false);
2572 }
2573 
2574 static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf *a)
2575 {
2576     return do_sub_reg(ctx, a, false, false, true);
2577 }
2578 
2579 static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf *a)
2580 {
2581     return do_sub_reg(ctx, a, true, false, true);
2582 }
2583 
2584 static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf *a)
2585 {
2586     return do_sub_reg(ctx, a, false, true, false);
2587 }
2588 
2589 static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf *a)
2590 {
2591     return do_sub_reg(ctx, a, true, true, false);
2592 }
2593 
2594 static bool trans_andcm(DisasContext *ctx, arg_rrr_cf *a)
2595 {
2596     return do_log_reg(ctx, a, tcg_gen_andc_reg);
2597 }
2598 
2599 static bool trans_and(DisasContext *ctx, arg_rrr_cf *a)
2600 {
2601     return do_log_reg(ctx, a, tcg_gen_and_reg);
2602 }
2603 
2604 static bool trans_or(DisasContext *ctx, arg_rrr_cf *a)
2605 {
2606     if (a->cf == 0) {
2607         unsigned r2 = a->r2;
2608         unsigned r1 = a->r1;
2609         unsigned rt = a->t;
2610 
2611         if (rt == 0) { /* NOP */
2612             cond_free(&ctx->null_cond);
2613             return true;
2614         }
2615         if (r2 == 0) { /* COPY */
2616             if (r1 == 0) {
2617                 TCGv_reg dest = dest_gpr(ctx, rt);
2618                 tcg_gen_movi_reg(dest, 0);
2619                 save_gpr(ctx, rt, dest);
2620             } else {
2621                 save_gpr(ctx, rt, cpu_gr[r1]);
2622             }
2623             cond_free(&ctx->null_cond);
2624             return true;
2625         }
2626 #ifndef CONFIG_USER_ONLY
2627         /* These are QEMU extensions and are nops in the real architecture:
2628          *
2629          * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2630          * or %r31,%r31,%r31 -- death loop; offline cpu
2631          *                      currently implemented as idle.
2632          */
2633         if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */
2634             /* No need to check for supervisor, as userland can only pause
2635                until the next timer interrupt.  */
2636             nullify_over(ctx);
2637 
2638             /* Advance the instruction queue.  */
2639             copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
2640             copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
2641             nullify_set(ctx, 0);
2642 
2643             /* Tell the qemu main loop to halt until this cpu has work.  */
2644             tcg_gen_st_i32(tcg_constant_i32(1), cpu_env,
2645                            offsetof(CPUState, halted) - offsetof(HPPACPU, env));
2646             gen_excp_1(EXCP_HALTED);
2647             ctx->base.is_jmp = DISAS_NORETURN;
2648 
2649             return nullify_end(ctx);
2650         }
2651 #endif
2652     }
2653     return do_log_reg(ctx, a, tcg_gen_or_reg);
2654 }
2655 
2656 static bool trans_xor(DisasContext *ctx, arg_rrr_cf *a)
2657 {
2658     return do_log_reg(ctx, a, tcg_gen_xor_reg);
2659 }
2660 
2661 static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf *a)
2662 {
2663     TCGv_reg tcg_r1, tcg_r2;
2664 
2665     if (a->cf) {
2666         nullify_over(ctx);
2667     }
2668     tcg_r1 = load_gpr(ctx, a->r1);
2669     tcg_r2 = load_gpr(ctx, a->r2);
2670     do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf);
2671     return nullify_end(ctx);
2672 }
2673 
2674 static bool trans_uxor(DisasContext *ctx, arg_rrr_cf *a)
2675 {
2676     TCGv_reg tcg_r1, tcg_r2;
2677 
2678     if (a->cf) {
2679         nullify_over(ctx);
2680     }
2681     tcg_r1 = load_gpr(ctx, a->r1);
2682     tcg_r2 = load_gpr(ctx, a->r2);
2683     do_unit(ctx, a->t, tcg_r1, tcg_r2, a->cf, false, tcg_gen_xor_reg);
2684     return nullify_end(ctx);
2685 }
2686 
2687 static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf *a, bool is_tc)
2688 {
2689     TCGv_reg tcg_r1, tcg_r2, tmp;
2690 
2691     if (a->cf) {
2692         nullify_over(ctx);
2693     }
2694     tcg_r1 = load_gpr(ctx, a->r1);
2695     tcg_r2 = load_gpr(ctx, a->r2);
2696     tmp = get_temp(ctx);
2697     tcg_gen_not_reg(tmp, tcg_r2);
2698     do_unit(ctx, a->t, tcg_r1, tmp, a->cf, is_tc, tcg_gen_add_reg);
2699     return nullify_end(ctx);
2700 }
2701 
2702 static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf *a)
2703 {
2704     return do_uaddcm(ctx, a, false);
2705 }
2706 
2707 static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf *a)
2708 {
2709     return do_uaddcm(ctx, a, true);
2710 }
2711 
2712 static bool do_dcor(DisasContext *ctx, arg_rr_cf *a, bool is_i)
2713 {
2714     TCGv_reg tmp;
2715 
2716     nullify_over(ctx);
2717 
2718     tmp = get_temp(ctx);
2719     tcg_gen_shri_reg(tmp, cpu_psw_cb, 3);
2720     if (!is_i) {
2721         tcg_gen_not_reg(tmp, tmp);
2722     }
2723     tcg_gen_andi_reg(tmp, tmp, 0x11111111);
2724     tcg_gen_muli_reg(tmp, tmp, 6);
2725     do_unit(ctx, a->t, load_gpr(ctx, a->r), tmp, a->cf, false,
2726             is_i ? tcg_gen_add_reg : tcg_gen_sub_reg);
2727     return nullify_end(ctx);
2728 }
2729 
2730 static bool trans_dcor(DisasContext *ctx, arg_rr_cf *a)
2731 {
2732     return do_dcor(ctx, a, false);
2733 }
2734 
2735 static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf *a)
2736 {
2737     return do_dcor(ctx, a, true);
2738 }
2739 
2740 static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a)
2741 {
2742     TCGv_reg dest, add1, add2, addc, zero, in1, in2;
2743 
2744     nullify_over(ctx);
2745 
2746     in1 = load_gpr(ctx, a->r1);
2747     in2 = load_gpr(ctx, a->r2);
2748 
2749     add1 = tcg_temp_new();
2750     add2 = tcg_temp_new();
2751     addc = tcg_temp_new();
2752     dest = tcg_temp_new();
2753     zero = tcg_constant_reg(0);
2754 
2755     /* Form R1 << 1 | PSW[CB]{8}.  */
2756     tcg_gen_add_reg(add1, in1, in1);
2757     tcg_gen_add_reg(add1, add1, cpu_psw_cb_msb);
2758 
2759     /* Add or subtract R2, depending on PSW[V].  Proper computation of
2760        carry{8} requires that we subtract via + ~R2 + 1, as described in
2761        the manual.  By extracting and masking V, we can produce the
2762        proper inputs to the addition without movcond.  */
2763     tcg_gen_sari_reg(addc, cpu_psw_v, TARGET_REGISTER_BITS - 1);
2764     tcg_gen_xor_reg(add2, in2, addc);
2765     tcg_gen_andi_reg(addc, addc, 1);
2766     /* ??? This is only correct for 32-bit.  */
2767     tcg_gen_add2_i32(dest, cpu_psw_cb_msb, add1, zero, add2, zero);
2768     tcg_gen_add2_i32(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero);
2769 
2770     /* Write back the result register.  */
2771     save_gpr(ctx, a->t, dest);
2772 
2773     /* Write back PSW[CB].  */
2774     tcg_gen_xor_reg(cpu_psw_cb, add1, add2);
2775     tcg_gen_xor_reg(cpu_psw_cb, cpu_psw_cb, dest);
2776 
2777     /* Write back PSW[V] for the division step.  */
2778     tcg_gen_neg_reg(cpu_psw_v, cpu_psw_cb_msb);
2779     tcg_gen_xor_reg(cpu_psw_v, cpu_psw_v, in2);
2780 
2781     /* Install the new nullification.  */
2782     if (a->cf) {
2783         TCGv_reg sv = NULL;
2784         if (cond_need_sv(a->cf >> 1)) {
2785             /* ??? The lshift is supposed to contribute to overflow.  */
2786             sv = do_add_sv(ctx, dest, add1, add2);
2787         }
2788         ctx->null_cond = do_cond(a->cf, dest, cpu_psw_cb_msb, sv);
2789     }
2790 
2791     return nullify_end(ctx);
2792 }
2793 
2794 static bool trans_addi(DisasContext *ctx, arg_rri_cf *a)
2795 {
2796     return do_add_imm(ctx, a, false, false);
2797 }
2798 
2799 static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a)
2800 {
2801     return do_add_imm(ctx, a, true, false);
2802 }
2803 
2804 static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a)
2805 {
2806     return do_add_imm(ctx, a, false, true);
2807 }
2808 
2809 static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a)
2810 {
2811     return do_add_imm(ctx, a, true, true);
2812 }
2813 
2814 static bool trans_subi(DisasContext *ctx, arg_rri_cf *a)
2815 {
2816     return do_sub_imm(ctx, a, false);
2817 }
2818 
2819 static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a)
2820 {
2821     return do_sub_imm(ctx, a, true);
2822 }
2823 
2824 static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf *a)
2825 {
2826     TCGv_reg tcg_im, tcg_r2;
2827 
2828     if (a->cf) {
2829         nullify_over(ctx);
2830     }
2831 
2832     tcg_im = load_const(ctx, a->i);
2833     tcg_r2 = load_gpr(ctx, a->r);
2834     do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf);
2835 
2836     return nullify_end(ctx);
2837 }
2838 
2839 static bool trans_ld(DisasContext *ctx, arg_ldst *a)
2840 {
2841     if (unlikely(TARGET_REGISTER_BITS == 32 && a->size > MO_32)) {
2842         return gen_illegal(ctx);
2843     } else {
2844         return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0,
2845                    a->disp, a->sp, a->m, a->size | MO_TE);
2846     }
2847 }
2848 
2849 static bool trans_st(DisasContext *ctx, arg_ldst *a)
2850 {
2851     assert(a->x == 0 && a->scale == 0);
2852     if (unlikely(TARGET_REGISTER_BITS == 32 && a->size > MO_32)) {
2853         return gen_illegal(ctx);
2854     } else {
2855         return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE);
2856     }
2857 }
2858 
2859 static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
2860 {
2861     MemOp mop = MO_TE | MO_ALIGN | a->size;
2862     TCGv_reg zero, dest, ofs;
2863     TCGv_tl addr;
2864 
2865     nullify_over(ctx);
2866 
2867     if (a->m) {
2868         /* Base register modification.  Make sure if RT == RB,
2869            we see the result of the load.  */
2870         dest = get_temp(ctx);
2871     } else {
2872         dest = dest_gpr(ctx, a->t);
2873     }
2874 
2875     form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? a->size : 0,
2876              a->disp, a->sp, a->m, ctx->mmu_idx == MMU_PHYS_IDX);
2877 
2878     /*
2879      * For hppa1.1, LDCW is undefined unless aligned mod 16.
2880      * However actual hardware succeeds with aligned mod 4.
2881      * Detect this case and log a GUEST_ERROR.
2882      *
2883      * TODO: HPPA64 relaxes the over-alignment requirement
2884      * with the ,co completer.
2885      */
2886     gen_helper_ldc_check(addr);
2887 
2888     zero = tcg_constant_reg(0);
2889     tcg_gen_atomic_xchg_reg(dest, addr, zero, ctx->mmu_idx, mop);
2890 
2891     if (a->m) {
2892         save_gpr(ctx, a->b, ofs);
2893     }
2894     save_gpr(ctx, a->t, dest);
2895 
2896     return nullify_end(ctx);
2897 }
2898 
2899 static bool trans_stby(DisasContext *ctx, arg_stby *a)
2900 {
2901     TCGv_reg ofs, val;
2902     TCGv_tl addr;
2903 
2904     nullify_over(ctx);
2905 
2906     form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
2907              ctx->mmu_idx == MMU_PHYS_IDX);
2908     val = load_gpr(ctx, a->r);
2909     if (a->a) {
2910         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2911             gen_helper_stby_e_parallel(cpu_env, addr, val);
2912         } else {
2913             gen_helper_stby_e(cpu_env, addr, val);
2914         }
2915     } else {
2916         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2917             gen_helper_stby_b_parallel(cpu_env, addr, val);
2918         } else {
2919             gen_helper_stby_b(cpu_env, addr, val);
2920         }
2921     }
2922     if (a->m) {
2923         tcg_gen_andi_reg(ofs, ofs, ~3);
2924         save_gpr(ctx, a->b, ofs);
2925     }
2926 
2927     return nullify_end(ctx);
2928 }
2929 
2930 static bool trans_lda(DisasContext *ctx, arg_ldst *a)
2931 {
2932     int hold_mmu_idx = ctx->mmu_idx;
2933 
2934     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2935     ctx->mmu_idx = MMU_PHYS_IDX;
2936     trans_ld(ctx, a);
2937     ctx->mmu_idx = hold_mmu_idx;
2938     return true;
2939 }
2940 
2941 static bool trans_sta(DisasContext *ctx, arg_ldst *a)
2942 {
2943     int hold_mmu_idx = ctx->mmu_idx;
2944 
2945     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2946     ctx->mmu_idx = MMU_PHYS_IDX;
2947     trans_st(ctx, a);
2948     ctx->mmu_idx = hold_mmu_idx;
2949     return true;
2950 }
2951 
2952 static bool trans_ldil(DisasContext *ctx, arg_ldil *a)
2953 {
2954     TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
2955 
2956     tcg_gen_movi_reg(tcg_rt, a->i);
2957     save_gpr(ctx, a->t, tcg_rt);
2958     cond_free(&ctx->null_cond);
2959     return true;
2960 }
2961 
2962 static bool trans_addil(DisasContext *ctx, arg_addil *a)
2963 {
2964     TCGv_reg tcg_rt = load_gpr(ctx, a->r);
2965     TCGv_reg tcg_r1 = dest_gpr(ctx, 1);
2966 
2967     tcg_gen_addi_reg(tcg_r1, tcg_rt, a->i);
2968     save_gpr(ctx, 1, tcg_r1);
2969     cond_free(&ctx->null_cond);
2970     return true;
2971 }
2972 
2973 static bool trans_ldo(DisasContext *ctx, arg_ldo *a)
2974 {
2975     TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
2976 
2977     /* Special case rb == 0, for the LDI pseudo-op.
2978        The COPY pseudo-op is handled for free within tcg_gen_addi_tl.  */
2979     if (a->b == 0) {
2980         tcg_gen_movi_reg(tcg_rt, a->i);
2981     } else {
2982         tcg_gen_addi_reg(tcg_rt, cpu_gr[a->b], a->i);
2983     }
2984     save_gpr(ctx, a->t, tcg_rt);
2985     cond_free(&ctx->null_cond);
2986     return true;
2987 }
2988 
2989 static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_reg in1,
2990                     unsigned c, unsigned f, unsigned n, int disp)
2991 {
2992     TCGv_reg dest, in2, sv;
2993     DisasCond cond;
2994 
2995     in2 = load_gpr(ctx, r);
2996     dest = get_temp(ctx);
2997 
2998     tcg_gen_sub_reg(dest, in1, in2);
2999 
3000     sv = NULL;
3001     if (cond_need_sv(c)) {
3002         sv = do_sub_sv(ctx, dest, in1, in2);
3003     }
3004 
3005     cond = do_sub_cond(c * 2 + f, dest, in1, in2, sv);
3006     return do_cbranch(ctx, disp, n, &cond);
3007 }
3008 
3009 static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a)
3010 {
3011     nullify_over(ctx);
3012     return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3013 }
3014 
3015 static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a)
3016 {
3017     nullify_over(ctx);
3018     return do_cmpb(ctx, a->r, load_const(ctx, a->i), a->c, a->f, a->n, a->disp);
3019 }
3020 
3021 static bool do_addb(DisasContext *ctx, unsigned r, TCGv_reg in1,
3022                     unsigned c, unsigned f, unsigned n, int disp)
3023 {
3024     TCGv_reg dest, in2, sv, cb_msb;
3025     DisasCond cond;
3026 
3027     in2 = load_gpr(ctx, r);
3028     dest = tcg_temp_new();
3029     sv = NULL;
3030     cb_msb = NULL;
3031 
3032     if (cond_need_cb(c)) {
3033         cb_msb = get_temp(ctx);
3034         tcg_gen_movi_reg(cb_msb, 0);
3035         tcg_gen_add2_reg(dest, cb_msb, in1, cb_msb, in2, cb_msb);
3036     } else {
3037         tcg_gen_add_reg(dest, in1, in2);
3038     }
3039     if (cond_need_sv(c)) {
3040         sv = do_add_sv(ctx, dest, in1, in2);
3041     }
3042 
3043     cond = do_cond(c * 2 + f, dest, cb_msb, sv);
3044     save_gpr(ctx, r, dest);
3045     return do_cbranch(ctx, disp, n, &cond);
3046 }
3047 
3048 static bool trans_addb(DisasContext *ctx, arg_addb *a)
3049 {
3050     nullify_over(ctx);
3051     return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3052 }
3053 
3054 static bool trans_addbi(DisasContext *ctx, arg_addbi *a)
3055 {
3056     nullify_over(ctx);
3057     return do_addb(ctx, a->r, load_const(ctx, a->i), a->c, a->f, a->n, a->disp);
3058 }
3059 
3060 static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a)
3061 {
3062     TCGv_reg tmp, tcg_r;
3063     DisasCond cond;
3064 
3065     nullify_over(ctx);
3066 
3067     tmp = tcg_temp_new();
3068     tcg_r = load_gpr(ctx, a->r);
3069     tcg_gen_shl_reg(tmp, tcg_r, cpu_sar);
3070 
3071     cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3072     return do_cbranch(ctx, a->disp, a->n, &cond);
3073 }
3074 
3075 static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a)
3076 {
3077     TCGv_reg tmp, tcg_r;
3078     DisasCond cond;
3079 
3080     nullify_over(ctx);
3081 
3082     tmp = tcg_temp_new();
3083     tcg_r = load_gpr(ctx, a->r);
3084     tcg_gen_shli_reg(tmp, tcg_r, a->p);
3085 
3086     cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3087     return do_cbranch(ctx, a->disp, a->n, &cond);
3088 }
3089 
3090 static bool trans_movb(DisasContext *ctx, arg_movb *a)
3091 {
3092     TCGv_reg dest;
3093     DisasCond cond;
3094 
3095     nullify_over(ctx);
3096 
3097     dest = dest_gpr(ctx, a->r2);
3098     if (a->r1 == 0) {
3099         tcg_gen_movi_reg(dest, 0);
3100     } else {
3101         tcg_gen_mov_reg(dest, cpu_gr[a->r1]);
3102     }
3103 
3104     cond = do_sed_cond(a->c, dest);
3105     return do_cbranch(ctx, a->disp, a->n, &cond);
3106 }
3107 
3108 static bool trans_movbi(DisasContext *ctx, arg_movbi *a)
3109 {
3110     TCGv_reg dest;
3111     DisasCond cond;
3112 
3113     nullify_over(ctx);
3114 
3115     dest = dest_gpr(ctx, a->r);
3116     tcg_gen_movi_reg(dest, a->i);
3117 
3118     cond = do_sed_cond(a->c, dest);
3119     return do_cbranch(ctx, a->disp, a->n, &cond);
3120 }
3121 
3122 static bool trans_shrpw_sar(DisasContext *ctx, arg_shrpw_sar *a)
3123 {
3124     TCGv_reg dest;
3125 
3126     if (a->c) {
3127         nullify_over(ctx);
3128     }
3129 
3130     dest = dest_gpr(ctx, a->t);
3131     if (a->r1 == 0) {
3132         tcg_gen_ext32u_reg(dest, load_gpr(ctx, a->r2));
3133         tcg_gen_shr_reg(dest, dest, cpu_sar);
3134     } else if (a->r1 == a->r2) {
3135         TCGv_i32 t32 = tcg_temp_new_i32();
3136         tcg_gen_trunc_reg_i32(t32, load_gpr(ctx, a->r2));
3137         tcg_gen_rotr_i32(t32, t32, cpu_sar);
3138         tcg_gen_extu_i32_reg(dest, t32);
3139     } else {
3140         TCGv_i64 t = tcg_temp_new_i64();
3141         TCGv_i64 s = tcg_temp_new_i64();
3142 
3143         tcg_gen_concat_reg_i64(t, load_gpr(ctx, a->r2), load_gpr(ctx, a->r1));
3144         tcg_gen_extu_reg_i64(s, cpu_sar);
3145         tcg_gen_shr_i64(t, t, s);
3146         tcg_gen_trunc_i64_reg(dest, t);
3147     }
3148     save_gpr(ctx, a->t, dest);
3149 
3150     /* Install the new nullification.  */
3151     cond_free(&ctx->null_cond);
3152     if (a->c) {
3153         ctx->null_cond = do_sed_cond(a->c, dest);
3154     }
3155     return nullify_end(ctx);
3156 }
3157 
3158 static bool trans_shrpw_imm(DisasContext *ctx, arg_shrpw_imm *a)
3159 {
3160     unsigned sa = 31 - a->cpos;
3161     TCGv_reg dest, t2;
3162 
3163     if (a->c) {
3164         nullify_over(ctx);
3165     }
3166 
3167     dest = dest_gpr(ctx, a->t);
3168     t2 = load_gpr(ctx, a->r2);
3169     if (a->r1 == 0) {
3170         tcg_gen_extract_reg(dest, t2, sa, 32 - sa);
3171     } else if (TARGET_REGISTER_BITS == 32) {
3172         tcg_gen_extract2_reg(dest, t2, cpu_gr[a->r1], sa);
3173     } else if (a->r1 == a->r2) {
3174         TCGv_i32 t32 = tcg_temp_new_i32();
3175         tcg_gen_trunc_reg_i32(t32, t2);
3176         tcg_gen_rotri_i32(t32, t32, sa);
3177         tcg_gen_extu_i32_reg(dest, t32);
3178     } else {
3179         TCGv_i64 t64 = tcg_temp_new_i64();
3180         tcg_gen_concat_reg_i64(t64, t2, cpu_gr[a->r1]);
3181         tcg_gen_shri_i64(t64, t64, sa);
3182         tcg_gen_trunc_i64_reg(dest, t64);
3183     }
3184     save_gpr(ctx, a->t, dest);
3185 
3186     /* Install the new nullification.  */
3187     cond_free(&ctx->null_cond);
3188     if (a->c) {
3189         ctx->null_cond = do_sed_cond(a->c, dest);
3190     }
3191     return nullify_end(ctx);
3192 }
3193 
3194 static bool trans_extrw_sar(DisasContext *ctx, arg_extrw_sar *a)
3195 {
3196     unsigned len = 32 - a->clen;
3197     TCGv_reg dest, src, tmp;
3198 
3199     if (a->c) {
3200         nullify_over(ctx);
3201     }
3202 
3203     dest = dest_gpr(ctx, a->t);
3204     src = load_gpr(ctx, a->r);
3205     tmp = tcg_temp_new();
3206 
3207     /* Recall that SAR is using big-endian bit numbering.  */
3208     tcg_gen_xori_reg(tmp, cpu_sar, TARGET_REGISTER_BITS - 1);
3209     if (a->se) {
3210         tcg_gen_sar_reg(dest, src, tmp);
3211         tcg_gen_sextract_reg(dest, dest, 0, len);
3212     } else {
3213         tcg_gen_shr_reg(dest, src, tmp);
3214         tcg_gen_extract_reg(dest, dest, 0, len);
3215     }
3216     save_gpr(ctx, a->t, dest);
3217 
3218     /* Install the new nullification.  */
3219     cond_free(&ctx->null_cond);
3220     if (a->c) {
3221         ctx->null_cond = do_sed_cond(a->c, dest);
3222     }
3223     return nullify_end(ctx);
3224 }
3225 
3226 static bool trans_extrw_imm(DisasContext *ctx, arg_extrw_imm *a)
3227 {
3228     unsigned len = 32 - a->clen;
3229     unsigned cpos = 31 - a->pos;
3230     TCGv_reg dest, src;
3231 
3232     if (a->c) {
3233         nullify_over(ctx);
3234     }
3235 
3236     dest = dest_gpr(ctx, a->t);
3237     src = load_gpr(ctx, a->r);
3238     if (a->se) {
3239         tcg_gen_sextract_reg(dest, src, cpos, len);
3240     } else {
3241         tcg_gen_extract_reg(dest, src, cpos, len);
3242     }
3243     save_gpr(ctx, a->t, dest);
3244 
3245     /* Install the new nullification.  */
3246     cond_free(&ctx->null_cond);
3247     if (a->c) {
3248         ctx->null_cond = do_sed_cond(a->c, dest);
3249     }
3250     return nullify_end(ctx);
3251 }
3252 
3253 static bool trans_depwi_imm(DisasContext *ctx, arg_depwi_imm *a)
3254 {
3255     unsigned len = 32 - a->clen;
3256     target_sreg mask0, mask1;
3257     TCGv_reg dest;
3258 
3259     if (a->c) {
3260         nullify_over(ctx);
3261     }
3262     if (a->cpos + len > 32) {
3263         len = 32 - a->cpos;
3264     }
3265 
3266     dest = dest_gpr(ctx, a->t);
3267     mask0 = deposit64(0, a->cpos, len, a->i);
3268     mask1 = deposit64(-1, a->cpos, len, a->i);
3269 
3270     if (a->nz) {
3271         TCGv_reg src = load_gpr(ctx, a->t);
3272         if (mask1 != -1) {
3273             tcg_gen_andi_reg(dest, src, mask1);
3274             src = dest;
3275         }
3276         tcg_gen_ori_reg(dest, src, mask0);
3277     } else {
3278         tcg_gen_movi_reg(dest, mask0);
3279     }
3280     save_gpr(ctx, a->t, dest);
3281 
3282     /* Install the new nullification.  */
3283     cond_free(&ctx->null_cond);
3284     if (a->c) {
3285         ctx->null_cond = do_sed_cond(a->c, dest);
3286     }
3287     return nullify_end(ctx);
3288 }
3289 
3290 static bool trans_depw_imm(DisasContext *ctx, arg_depw_imm *a)
3291 {
3292     unsigned rs = a->nz ? a->t : 0;
3293     unsigned len = 32 - a->clen;
3294     TCGv_reg dest, val;
3295 
3296     if (a->c) {
3297         nullify_over(ctx);
3298     }
3299     if (a->cpos + len > 32) {
3300         len = 32 - a->cpos;
3301     }
3302 
3303     dest = dest_gpr(ctx, a->t);
3304     val = load_gpr(ctx, a->r);
3305     if (rs == 0) {
3306         tcg_gen_deposit_z_reg(dest, val, a->cpos, len);
3307     } else {
3308         tcg_gen_deposit_reg(dest, cpu_gr[rs], val, a->cpos, len);
3309     }
3310     save_gpr(ctx, a->t, dest);
3311 
3312     /* Install the new nullification.  */
3313     cond_free(&ctx->null_cond);
3314     if (a->c) {
3315         ctx->null_cond = do_sed_cond(a->c, dest);
3316     }
3317     return nullify_end(ctx);
3318 }
3319 
3320 static bool do_depw_sar(DisasContext *ctx, unsigned rt, unsigned c,
3321                         unsigned nz, unsigned clen, TCGv_reg val)
3322 {
3323     unsigned rs = nz ? rt : 0;
3324     unsigned len = 32 - clen;
3325     TCGv_reg mask, tmp, shift, dest;
3326     unsigned msb = 1U << (len - 1);
3327 
3328     dest = dest_gpr(ctx, rt);
3329     shift = tcg_temp_new();
3330     tmp = tcg_temp_new();
3331 
3332     /* Convert big-endian bit numbering in SAR to left-shift.  */
3333     tcg_gen_xori_reg(shift, cpu_sar, TARGET_REGISTER_BITS - 1);
3334 
3335     mask = tcg_temp_new();
3336     tcg_gen_movi_reg(mask, msb + (msb - 1));
3337     tcg_gen_and_reg(tmp, val, mask);
3338     if (rs) {
3339         tcg_gen_shl_reg(mask, mask, shift);
3340         tcg_gen_shl_reg(tmp, tmp, shift);
3341         tcg_gen_andc_reg(dest, cpu_gr[rs], mask);
3342         tcg_gen_or_reg(dest, dest, tmp);
3343     } else {
3344         tcg_gen_shl_reg(dest, tmp, shift);
3345     }
3346     save_gpr(ctx, rt, dest);
3347 
3348     /* Install the new nullification.  */
3349     cond_free(&ctx->null_cond);
3350     if (c) {
3351         ctx->null_cond = do_sed_cond(c, dest);
3352     }
3353     return nullify_end(ctx);
3354 }
3355 
3356 static bool trans_depw_sar(DisasContext *ctx, arg_depw_sar *a)
3357 {
3358     if (a->c) {
3359         nullify_over(ctx);
3360     }
3361     return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_gpr(ctx, a->r));
3362 }
3363 
3364 static bool trans_depwi_sar(DisasContext *ctx, arg_depwi_sar *a)
3365 {
3366     if (a->c) {
3367         nullify_over(ctx);
3368     }
3369     return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_const(ctx, a->i));
3370 }
3371 
3372 static bool trans_be(DisasContext *ctx, arg_be *a)
3373 {
3374     TCGv_reg tmp;
3375 
3376 #ifdef CONFIG_USER_ONLY
3377     /* ??? It seems like there should be a good way of using
3378        "be disp(sr2, r0)", the canonical gateway entry mechanism
3379        to our advantage.  But that appears to be inconvenient to
3380        manage along side branch delay slots.  Therefore we handle
3381        entry into the gateway page via absolute address.  */
3382     /* Since we don't implement spaces, just branch.  Do notice the special
3383        case of "be disp(*,r0)" using a direct branch to disp, so that we can
3384        goto_tb to the TB containing the syscall.  */
3385     if (a->b == 0) {
3386         return do_dbranch(ctx, a->disp, a->l, a->n);
3387     }
3388 #else
3389     nullify_over(ctx);
3390 #endif
3391 
3392     tmp = get_temp(ctx);
3393     tcg_gen_addi_reg(tmp, load_gpr(ctx, a->b), a->disp);
3394     tmp = do_ibranch_priv(ctx, tmp);
3395 
3396 #ifdef CONFIG_USER_ONLY
3397     return do_ibranch(ctx, tmp, a->l, a->n);
3398 #else
3399     TCGv_i64 new_spc = tcg_temp_new_i64();
3400 
3401     load_spr(ctx, new_spc, a->sp);
3402     if (a->l) {
3403         copy_iaoq_entry(cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
3404         tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f);
3405     }
3406     if (a->n && use_nullify_skip(ctx)) {
3407         tcg_gen_mov_reg(cpu_iaoq_f, tmp);
3408         tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
3409         tcg_gen_mov_i64(cpu_iasq_f, new_spc);
3410         tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f);
3411     } else {
3412         copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3413         if (ctx->iaoq_b == -1) {
3414             tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3415         }
3416         tcg_gen_mov_reg(cpu_iaoq_b, tmp);
3417         tcg_gen_mov_i64(cpu_iasq_b, new_spc);
3418         nullify_set(ctx, a->n);
3419     }
3420     tcg_gen_lookup_and_goto_ptr();
3421     ctx->base.is_jmp = DISAS_NORETURN;
3422     return nullify_end(ctx);
3423 #endif
3424 }
3425 
3426 static bool trans_bl(DisasContext *ctx, arg_bl *a)
3427 {
3428     return do_dbranch(ctx, iaoq_dest(ctx, a->disp), a->l, a->n);
3429 }
3430 
3431 static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a)
3432 {
3433     target_ureg dest = iaoq_dest(ctx, a->disp);
3434 
3435     nullify_over(ctx);
3436 
3437     /* Make sure the caller hasn't done something weird with the queue.
3438      * ??? This is not quite the same as the PSW[B] bit, which would be
3439      * expensive to track.  Real hardware will trap for
3440      *    b  gateway
3441      *    b  gateway+4  (in delay slot of first branch)
3442      * However, checking for a non-sequential instruction queue *will*
3443      * diagnose the security hole
3444      *    b  gateway
3445      *    b  evil
3446      * in which instructions at evil would run with increased privs.
3447      */
3448     if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) {
3449         return gen_illegal(ctx);
3450     }
3451 
3452 #ifndef CONFIG_USER_ONLY
3453     if (ctx->tb_flags & PSW_C) {
3454         CPUHPPAState *env = ctx->cs->env_ptr;
3455         int type = hppa_artype_for_page(env, ctx->base.pc_next);
3456         /* If we could not find a TLB entry, then we need to generate an
3457            ITLB miss exception so the kernel will provide it.
3458            The resulting TLB fill operation will invalidate this TB and
3459            we will re-translate, at which point we *will* be able to find
3460            the TLB entry and determine if this is in fact a gateway page.  */
3461         if (type < 0) {
3462             gen_excp(ctx, EXCP_ITLB_MISS);
3463             return true;
3464         }
3465         /* No change for non-gateway pages or for priv decrease.  */
3466         if (type >= 4 && type - 4 < ctx->privilege) {
3467             dest = deposit32(dest, 0, 2, type - 4);
3468         }
3469     } else {
3470         dest &= -4;  /* priv = 0 */
3471     }
3472 #endif
3473 
3474     if (a->l) {
3475         TCGv_reg tmp = dest_gpr(ctx, a->l);
3476         if (ctx->privilege < 3) {
3477             tcg_gen_andi_reg(tmp, tmp, -4);
3478         }
3479         tcg_gen_ori_reg(tmp, tmp, ctx->privilege);
3480         save_gpr(ctx, a->l, tmp);
3481     }
3482 
3483     return do_dbranch(ctx, dest, 0, a->n);
3484 }
3485 
3486 static bool trans_blr(DisasContext *ctx, arg_blr *a)
3487 {
3488     if (a->x) {
3489         TCGv_reg tmp = get_temp(ctx);
3490         tcg_gen_shli_reg(tmp, load_gpr(ctx, a->x), 3);
3491         tcg_gen_addi_reg(tmp, tmp, ctx->iaoq_f + 8);
3492         /* The computation here never changes privilege level.  */
3493         return do_ibranch(ctx, tmp, a->l, a->n);
3494     } else {
3495         /* BLR R0,RX is a good way to load PC+8 into RX.  */
3496         return do_dbranch(ctx, ctx->iaoq_f + 8, a->l, a->n);
3497     }
3498 }
3499 
3500 static bool trans_bv(DisasContext *ctx, arg_bv *a)
3501 {
3502     TCGv_reg dest;
3503 
3504     if (a->x == 0) {
3505         dest = load_gpr(ctx, a->b);
3506     } else {
3507         dest = get_temp(ctx);
3508         tcg_gen_shli_reg(dest, load_gpr(ctx, a->x), 3);
3509         tcg_gen_add_reg(dest, dest, load_gpr(ctx, a->b));
3510     }
3511     dest = do_ibranch_priv(ctx, dest);
3512     return do_ibranch(ctx, dest, 0, a->n);
3513 }
3514 
3515 static bool trans_bve(DisasContext *ctx, arg_bve *a)
3516 {
3517     TCGv_reg dest;
3518 
3519 #ifdef CONFIG_USER_ONLY
3520     dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3521     return do_ibranch(ctx, dest, a->l, a->n);
3522 #else
3523     nullify_over(ctx);
3524     dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3525 
3526     copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3527     if (ctx->iaoq_b == -1) {
3528         tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3529     }
3530     copy_iaoq_entry(cpu_iaoq_b, -1, dest);
3531     tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest));
3532     if (a->l) {
3533         copy_iaoq_entry(cpu_gr[a->l], ctx->iaoq_n, ctx->iaoq_n_var);
3534     }
3535     nullify_set(ctx, a->n);
3536     tcg_gen_lookup_and_goto_ptr();
3537     ctx->base.is_jmp = DISAS_NORETURN;
3538     return nullify_end(ctx);
3539 #endif
3540 }
3541 
3542 /*
3543  * Float class 0
3544  */
3545 
3546 static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3547 {
3548     tcg_gen_mov_i32(dst, src);
3549 }
3550 
3551 static bool trans_fid_f(DisasContext *ctx, arg_fid_f *a)
3552 {
3553     uint64_t ret;
3554 
3555     if (TARGET_REGISTER_BITS == 64) {
3556         ret = 0x13080000000000ULL; /* PA8700 (PCX-W2) */
3557     } else {
3558         ret = 0x0f080000000000ULL; /* PA7300LC (PCX-L2) */
3559     }
3560 
3561     nullify_over(ctx);
3562     save_frd(0, tcg_constant_i64(ret));
3563     return nullify_end(ctx);
3564 }
3565 
3566 static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a)
3567 {
3568     return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f);
3569 }
3570 
3571 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3572 {
3573     tcg_gen_mov_i64(dst, src);
3574 }
3575 
3576 static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a)
3577 {
3578     return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d);
3579 }
3580 
3581 static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3582 {
3583     tcg_gen_andi_i32(dst, src, INT32_MAX);
3584 }
3585 
3586 static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a)
3587 {
3588     return do_fop_wew(ctx, a->t, a->r, gen_fabs_f);
3589 }
3590 
3591 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3592 {
3593     tcg_gen_andi_i64(dst, src, INT64_MAX);
3594 }
3595 
3596 static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a)
3597 {
3598     return do_fop_ded(ctx, a->t, a->r, gen_fabs_d);
3599 }
3600 
3601 static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a)
3602 {
3603     return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s);
3604 }
3605 
3606 static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a)
3607 {
3608     return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d);
3609 }
3610 
3611 static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a)
3612 {
3613     return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s);
3614 }
3615 
3616 static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a)
3617 {
3618     return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d);
3619 }
3620 
3621 static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3622 {
3623     tcg_gen_xori_i32(dst, src, INT32_MIN);
3624 }
3625 
3626 static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a)
3627 {
3628     return do_fop_wew(ctx, a->t, a->r, gen_fneg_f);
3629 }
3630 
3631 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3632 {
3633     tcg_gen_xori_i64(dst, src, INT64_MIN);
3634 }
3635 
3636 static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a)
3637 {
3638     return do_fop_ded(ctx, a->t, a->r, gen_fneg_d);
3639 }
3640 
3641 static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3642 {
3643     tcg_gen_ori_i32(dst, src, INT32_MIN);
3644 }
3645 
3646 static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a)
3647 {
3648     return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f);
3649 }
3650 
3651 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3652 {
3653     tcg_gen_ori_i64(dst, src, INT64_MIN);
3654 }
3655 
3656 static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a)
3657 {
3658     return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d);
3659 }
3660 
3661 /*
3662  * Float class 1
3663  */
3664 
3665 static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a)
3666 {
3667     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s);
3668 }
3669 
3670 static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a)
3671 {
3672     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d);
3673 }
3674 
3675 static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a)
3676 {
3677     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s);
3678 }
3679 
3680 static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a)
3681 {
3682     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s);
3683 }
3684 
3685 static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a)
3686 {
3687     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d);
3688 }
3689 
3690 static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a)
3691 {
3692     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d);
3693 }
3694 
3695 static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a)
3696 {
3697     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w);
3698 }
3699 
3700 static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a)
3701 {
3702     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w);
3703 }
3704 
3705 static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a)
3706 {
3707     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw);
3708 }
3709 
3710 static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a)
3711 {
3712     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw);
3713 }
3714 
3715 static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a)
3716 {
3717     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w);
3718 }
3719 
3720 static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a)
3721 {
3722     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w);
3723 }
3724 
3725 static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a)
3726 {
3727     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw);
3728 }
3729 
3730 static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a)
3731 {
3732     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw);
3733 }
3734 
3735 static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a)
3736 {
3737     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s);
3738 }
3739 
3740 static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a)
3741 {
3742     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s);
3743 }
3744 
3745 static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a)
3746 {
3747     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d);
3748 }
3749 
3750 static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a)
3751 {
3752     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d);
3753 }
3754 
3755 static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a)
3756 {
3757     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw);
3758 }
3759 
3760 static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a)
3761 {
3762     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw);
3763 }
3764 
3765 static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a)
3766 {
3767     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw);
3768 }
3769 
3770 static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a)
3771 {
3772     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw);
3773 }
3774 
3775 static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a)
3776 {
3777     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw);
3778 }
3779 
3780 static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a)
3781 {
3782     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw);
3783 }
3784 
3785 static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a)
3786 {
3787     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw);
3788 }
3789 
3790 static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a)
3791 {
3792     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw);
3793 }
3794 
3795 /*
3796  * Float class 2
3797  */
3798 
3799 static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a)
3800 {
3801     TCGv_i32 ta, tb, tc, ty;
3802 
3803     nullify_over(ctx);
3804 
3805     ta = load_frw0_i32(a->r1);
3806     tb = load_frw0_i32(a->r2);
3807     ty = tcg_constant_i32(a->y);
3808     tc = tcg_constant_i32(a->c);
3809 
3810     gen_helper_fcmp_s(cpu_env, ta, tb, ty, tc);
3811 
3812     return nullify_end(ctx);
3813 }
3814 
3815 static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a)
3816 {
3817     TCGv_i64 ta, tb;
3818     TCGv_i32 tc, ty;
3819 
3820     nullify_over(ctx);
3821 
3822     ta = load_frd0(a->r1);
3823     tb = load_frd0(a->r2);
3824     ty = tcg_constant_i32(a->y);
3825     tc = tcg_constant_i32(a->c);
3826 
3827     gen_helper_fcmp_d(cpu_env, ta, tb, ty, tc);
3828 
3829     return nullify_end(ctx);
3830 }
3831 
3832 static bool trans_ftest(DisasContext *ctx, arg_ftest *a)
3833 {
3834     TCGv_reg t;
3835 
3836     nullify_over(ctx);
3837 
3838     t = get_temp(ctx);
3839     tcg_gen_ld32u_reg(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
3840 
3841     if (a->y == 1) {
3842         int mask;
3843         bool inv = false;
3844 
3845         switch (a->c) {
3846         case 0: /* simple */
3847             tcg_gen_andi_reg(t, t, 0x4000000);
3848             ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3849             goto done;
3850         case 2: /* rej */
3851             inv = true;
3852             /* fallthru */
3853         case 1: /* acc */
3854             mask = 0x43ff800;
3855             break;
3856         case 6: /* rej8 */
3857             inv = true;
3858             /* fallthru */
3859         case 5: /* acc8 */
3860             mask = 0x43f8000;
3861             break;
3862         case 9: /* acc6 */
3863             mask = 0x43e0000;
3864             break;
3865         case 13: /* acc4 */
3866             mask = 0x4380000;
3867             break;
3868         case 17: /* acc2 */
3869             mask = 0x4200000;
3870             break;
3871         default:
3872             gen_illegal(ctx);
3873             return true;
3874         }
3875         if (inv) {
3876             TCGv_reg c = load_const(ctx, mask);
3877             tcg_gen_or_reg(t, t, c);
3878             ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
3879         } else {
3880             tcg_gen_andi_reg(t, t, mask);
3881             ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
3882         }
3883     } else {
3884         unsigned cbit = (a->y ^ 1) - 1;
3885 
3886         tcg_gen_extract_reg(t, t, 21 - cbit, 1);
3887         ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3888     }
3889 
3890  done:
3891     return nullify_end(ctx);
3892 }
3893 
3894 /*
3895  * Float class 2
3896  */
3897 
3898 static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a)
3899 {
3900     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s);
3901 }
3902 
3903 static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a)
3904 {
3905     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d);
3906 }
3907 
3908 static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a)
3909 {
3910     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s);
3911 }
3912 
3913 static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a)
3914 {
3915     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d);
3916 }
3917 
3918 static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a)
3919 {
3920     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s);
3921 }
3922 
3923 static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a)
3924 {
3925     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d);
3926 }
3927 
3928 static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a)
3929 {
3930     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s);
3931 }
3932 
3933 static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a)
3934 {
3935     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d);
3936 }
3937 
3938 static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a)
3939 {
3940     TCGv_i64 x, y;
3941 
3942     nullify_over(ctx);
3943 
3944     x = load_frw0_i64(a->r1);
3945     y = load_frw0_i64(a->r2);
3946     tcg_gen_mul_i64(x, x, y);
3947     save_frd(a->t, x);
3948 
3949     return nullify_end(ctx);
3950 }
3951 
3952 /* Convert the fmpyadd single-precision register encodings to standard.  */
3953 static inline int fmpyadd_s_reg(unsigned r)
3954 {
3955     return (r & 16) * 2 + 16 + (r & 15);
3956 }
3957 
3958 static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
3959 {
3960     int tm = fmpyadd_s_reg(a->tm);
3961     int ra = fmpyadd_s_reg(a->ra);
3962     int ta = fmpyadd_s_reg(a->ta);
3963     int rm2 = fmpyadd_s_reg(a->rm2);
3964     int rm1 = fmpyadd_s_reg(a->rm1);
3965 
3966     nullify_over(ctx);
3967 
3968     do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
3969     do_fop_weww(ctx, ta, ta, ra,
3970                 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
3971 
3972     return nullify_end(ctx);
3973 }
3974 
3975 static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a)
3976 {
3977     return do_fmpyadd_s(ctx, a, false);
3978 }
3979 
3980 static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a)
3981 {
3982     return do_fmpyadd_s(ctx, a, true);
3983 }
3984 
3985 static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
3986 {
3987     nullify_over(ctx);
3988 
3989     do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d);
3990     do_fop_dedd(ctx, a->ta, a->ta, a->ra,
3991                 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
3992 
3993     return nullify_end(ctx);
3994 }
3995 
3996 static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a)
3997 {
3998     return do_fmpyadd_d(ctx, a, false);
3999 }
4000 
4001 static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a)
4002 {
4003     return do_fmpyadd_d(ctx, a, true);
4004 }
4005 
4006 static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a)
4007 {
4008     TCGv_i32 x, y, z;
4009 
4010     nullify_over(ctx);
4011     x = load_frw0_i32(a->rm1);
4012     y = load_frw0_i32(a->rm2);
4013     z = load_frw0_i32(a->ra3);
4014 
4015     if (a->neg) {
4016         gen_helper_fmpynfadd_s(x, cpu_env, x, y, z);
4017     } else {
4018         gen_helper_fmpyfadd_s(x, cpu_env, x, y, z);
4019     }
4020 
4021     save_frw_i32(a->t, x);
4022     return nullify_end(ctx);
4023 }
4024 
4025 static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a)
4026 {
4027     TCGv_i64 x, y, z;
4028 
4029     nullify_over(ctx);
4030     x = load_frd0(a->rm1);
4031     y = load_frd0(a->rm2);
4032     z = load_frd0(a->ra3);
4033 
4034     if (a->neg) {
4035         gen_helper_fmpynfadd_d(x, cpu_env, x, y, z);
4036     } else {
4037         gen_helper_fmpyfadd_d(x, cpu_env, x, y, z);
4038     }
4039 
4040     save_frd(a->t, x);
4041     return nullify_end(ctx);
4042 }
4043 
4044 static bool trans_diag(DisasContext *ctx, arg_diag *a)
4045 {
4046     qemu_log_mask(LOG_UNIMP, "DIAG opcode ignored\n");
4047     cond_free(&ctx->null_cond);
4048     return true;
4049 }
4050 
4051 static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
4052 {
4053     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4054     int bound;
4055 
4056     ctx->cs = cs;
4057     ctx->tb_flags = ctx->base.tb->flags;
4058 
4059 #ifdef CONFIG_USER_ONLY
4060     ctx->privilege = MMU_IDX_TO_PRIV(MMU_USER_IDX);
4061     ctx->mmu_idx = MMU_USER_IDX;
4062     ctx->iaoq_f = ctx->base.pc_first | ctx->privilege;
4063     ctx->iaoq_b = ctx->base.tb->cs_base | ctx->privilege;
4064     ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN);
4065 #else
4066     ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
4067     ctx->mmu_idx = (ctx->tb_flags & PSW_D ?
4068                     PRIV_TO_MMU_IDX(ctx->privilege) : MMU_PHYS_IDX);
4069 
4070     /* Recover the IAOQ values from the GVA + PRIV.  */
4071     uint64_t cs_base = ctx->base.tb->cs_base;
4072     uint64_t iasq_f = cs_base & ~0xffffffffull;
4073     int32_t diff = cs_base;
4074 
4075     ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege;
4076     ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1);
4077 #endif
4078     ctx->iaoq_n = -1;
4079     ctx->iaoq_n_var = NULL;
4080 
4081     /* Bound the number of instructions by those left on the page.  */
4082     bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
4083     ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
4084 
4085     ctx->ntempr = 0;
4086     ctx->ntempl = 0;
4087     memset(ctx->tempr, 0, sizeof(ctx->tempr));
4088     memset(ctx->templ, 0, sizeof(ctx->templ));
4089 }
4090 
4091 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
4092 {
4093     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4094 
4095     /* Seed the nullification status from PSW[N], as saved in TB->FLAGS.  */
4096     ctx->null_cond = cond_make_f();
4097     ctx->psw_n_nonzero = false;
4098     if (ctx->tb_flags & PSW_N) {
4099         ctx->null_cond.c = TCG_COND_ALWAYS;
4100         ctx->psw_n_nonzero = true;
4101     }
4102     ctx->null_lab = NULL;
4103 }
4104 
4105 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4106 {
4107     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4108 
4109     tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b);
4110 }
4111 
4112 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4113 {
4114     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4115     CPUHPPAState *env = cs->env_ptr;
4116     DisasJumpType ret;
4117     int i, n;
4118 
4119     /* Execute one insn.  */
4120 #ifdef CONFIG_USER_ONLY
4121     if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
4122         do_page_zero(ctx);
4123         ret = ctx->base.is_jmp;
4124         assert(ret != DISAS_NEXT);
4125     } else
4126 #endif
4127     {
4128         /* Always fetch the insn, even if nullified, so that we check
4129            the page permissions for execute.  */
4130         uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
4131 
4132         /* Set up the IA queue for the next insn.
4133            This will be overwritten by a branch.  */
4134         if (ctx->iaoq_b == -1) {
4135             ctx->iaoq_n = -1;
4136             ctx->iaoq_n_var = get_temp(ctx);
4137             tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
4138         } else {
4139             ctx->iaoq_n = ctx->iaoq_b + 4;
4140             ctx->iaoq_n_var = NULL;
4141         }
4142 
4143         if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
4144             ctx->null_cond.c = TCG_COND_NEVER;
4145             ret = DISAS_NEXT;
4146         } else {
4147             ctx->insn = insn;
4148             if (!decode(ctx, insn)) {
4149                 gen_illegal(ctx);
4150             }
4151             ret = ctx->base.is_jmp;
4152             assert(ctx->null_lab == NULL);
4153         }
4154     }
4155 
4156     /* Forget any temporaries allocated.  */
4157     for (i = 0, n = ctx->ntempr; i < n; ++i) {
4158         ctx->tempr[i] = NULL;
4159     }
4160     for (i = 0, n = ctx->ntempl; i < n; ++i) {
4161         ctx->templ[i] = NULL;
4162     }
4163     ctx->ntempr = 0;
4164     ctx->ntempl = 0;
4165 
4166     /* Advance the insn queue.  Note that this check also detects
4167        a priority change within the instruction queue.  */
4168     if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
4169         if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1
4170             && use_goto_tb(ctx, ctx->iaoq_b)
4171             && (ctx->null_cond.c == TCG_COND_NEVER
4172                 || ctx->null_cond.c == TCG_COND_ALWAYS)) {
4173             nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
4174             gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
4175             ctx->base.is_jmp = ret = DISAS_NORETURN;
4176         } else {
4177             ctx->base.is_jmp = ret = DISAS_IAQ_N_STALE;
4178         }
4179     }
4180     ctx->iaoq_f = ctx->iaoq_b;
4181     ctx->iaoq_b = ctx->iaoq_n;
4182     ctx->base.pc_next += 4;
4183 
4184     switch (ret) {
4185     case DISAS_NORETURN:
4186     case DISAS_IAQ_N_UPDATED:
4187         break;
4188 
4189     case DISAS_NEXT:
4190     case DISAS_IAQ_N_STALE:
4191     case DISAS_IAQ_N_STALE_EXIT:
4192         if (ctx->iaoq_f == -1) {
4193             tcg_gen_mov_reg(cpu_iaoq_f, cpu_iaoq_b);
4194             copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
4195 #ifndef CONFIG_USER_ONLY
4196             tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
4197 #endif
4198             nullify_save(ctx);
4199             ctx->base.is_jmp = (ret == DISAS_IAQ_N_STALE_EXIT
4200                                 ? DISAS_EXIT
4201                                 : DISAS_IAQ_N_UPDATED);
4202         } else if (ctx->iaoq_b == -1) {
4203             tcg_gen_mov_reg(cpu_iaoq_b, ctx->iaoq_n_var);
4204         }
4205         break;
4206 
4207     default:
4208         g_assert_not_reached();
4209     }
4210 }
4211 
4212 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4213 {
4214     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4215     DisasJumpType is_jmp = ctx->base.is_jmp;
4216 
4217     switch (is_jmp) {
4218     case DISAS_NORETURN:
4219         break;
4220     case DISAS_TOO_MANY:
4221     case DISAS_IAQ_N_STALE:
4222     case DISAS_IAQ_N_STALE_EXIT:
4223         copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
4224         copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
4225         nullify_save(ctx);
4226         /* FALLTHRU */
4227     case DISAS_IAQ_N_UPDATED:
4228         if (is_jmp != DISAS_IAQ_N_STALE_EXIT) {
4229             tcg_gen_lookup_and_goto_ptr();
4230             break;
4231         }
4232         /* FALLTHRU */
4233     case DISAS_EXIT:
4234         tcg_gen_exit_tb(NULL, 0);
4235         break;
4236     default:
4237         g_assert_not_reached();
4238     }
4239 }
4240 
4241 static void hppa_tr_disas_log(const DisasContextBase *dcbase,
4242                               CPUState *cs, FILE *logfile)
4243 {
4244     target_ulong pc = dcbase->pc_first;
4245 
4246 #ifdef CONFIG_USER_ONLY
4247     switch (pc) {
4248     case 0x00:
4249         fprintf(logfile, "IN:\n0x00000000:  (null)\n");
4250         return;
4251     case 0xb0:
4252         fprintf(logfile, "IN:\n0x000000b0:  light-weight-syscall\n");
4253         return;
4254     case 0xe0:
4255         fprintf(logfile, "IN:\n0x000000e0:  set-thread-pointer-syscall\n");
4256         return;
4257     case 0x100:
4258         fprintf(logfile, "IN:\n0x00000100:  syscall\n");
4259         return;
4260     }
4261 #endif
4262 
4263     fprintf(logfile, "IN: %s\n", lookup_symbol(pc));
4264     target_disas(logfile, cs, pc, dcbase->tb->size);
4265 }
4266 
4267 static const TranslatorOps hppa_tr_ops = {
4268     .init_disas_context = hppa_tr_init_disas_context,
4269     .tb_start           = hppa_tr_tb_start,
4270     .insn_start         = hppa_tr_insn_start,
4271     .translate_insn     = hppa_tr_translate_insn,
4272     .tb_stop            = hppa_tr_tb_stop,
4273     .disas_log          = hppa_tr_disas_log,
4274 };
4275 
4276 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
4277                            target_ulong pc, void *host_pc)
4278 {
4279     DisasContext ctx;
4280     translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);
4281 }
4282