1 /* 2 * HPPA emulation cpu translation for qemu. 3 * 4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net> 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "cpu.h" 22 #include "disas/disas.h" 23 #include "qemu/host-utils.h" 24 #include "exec/exec-all.h" 25 #include "tcg/tcg-op.h" 26 #include "exec/cpu_ldst.h" 27 #include "exec/helper-proto.h" 28 #include "exec/helper-gen.h" 29 #include "exec/translator.h" 30 #include "exec/log.h" 31 32 /* Since we have a distinction between register size and address size, 33 we need to redefine all of these. */ 34 35 #undef TCGv 36 #undef tcg_temp_new 37 #undef tcg_global_mem_new 38 39 #if TARGET_LONG_BITS == 64 40 #define TCGv_tl TCGv_i64 41 #define tcg_temp_new_tl tcg_temp_new_i64 42 #if TARGET_REGISTER_BITS == 64 43 #define tcg_gen_extu_reg_tl tcg_gen_mov_i64 44 #else 45 #define tcg_gen_extu_reg_tl tcg_gen_extu_i32_i64 46 #endif 47 #else 48 #define TCGv_tl TCGv_i32 49 #define tcg_temp_new_tl tcg_temp_new_i32 50 #define tcg_gen_extu_reg_tl tcg_gen_mov_i32 51 #endif 52 53 #if TARGET_REGISTER_BITS == 64 54 #define TCGv_reg TCGv_i64 55 56 #define tcg_temp_new tcg_temp_new_i64 57 #define tcg_global_mem_new tcg_global_mem_new_i64 58 59 #define tcg_gen_movi_reg tcg_gen_movi_i64 60 #define tcg_gen_mov_reg tcg_gen_mov_i64 61 #define tcg_gen_ld8u_reg tcg_gen_ld8u_i64 62 #define tcg_gen_ld8s_reg tcg_gen_ld8s_i64 63 #define tcg_gen_ld16u_reg tcg_gen_ld16u_i64 64 #define tcg_gen_ld16s_reg tcg_gen_ld16s_i64 65 #define tcg_gen_ld32u_reg tcg_gen_ld32u_i64 66 #define tcg_gen_ld32s_reg tcg_gen_ld32s_i64 67 #define tcg_gen_ld_reg tcg_gen_ld_i64 68 #define tcg_gen_st8_reg tcg_gen_st8_i64 69 #define tcg_gen_st16_reg tcg_gen_st16_i64 70 #define tcg_gen_st32_reg tcg_gen_st32_i64 71 #define tcg_gen_st_reg tcg_gen_st_i64 72 #define tcg_gen_add_reg tcg_gen_add_i64 73 #define tcg_gen_addi_reg tcg_gen_addi_i64 74 #define tcg_gen_sub_reg tcg_gen_sub_i64 75 #define tcg_gen_neg_reg tcg_gen_neg_i64 76 #define tcg_gen_subfi_reg tcg_gen_subfi_i64 77 #define tcg_gen_subi_reg tcg_gen_subi_i64 78 #define tcg_gen_and_reg tcg_gen_and_i64 79 #define tcg_gen_andi_reg tcg_gen_andi_i64 80 #define tcg_gen_or_reg tcg_gen_or_i64 81 #define tcg_gen_ori_reg tcg_gen_ori_i64 82 #define tcg_gen_xor_reg tcg_gen_xor_i64 83 #define tcg_gen_xori_reg tcg_gen_xori_i64 84 #define tcg_gen_not_reg tcg_gen_not_i64 85 #define tcg_gen_shl_reg tcg_gen_shl_i64 86 #define tcg_gen_shli_reg tcg_gen_shli_i64 87 #define tcg_gen_shr_reg tcg_gen_shr_i64 88 #define tcg_gen_shri_reg tcg_gen_shri_i64 89 #define tcg_gen_sar_reg tcg_gen_sar_i64 90 #define tcg_gen_sari_reg tcg_gen_sari_i64 91 #define tcg_gen_brcond_reg tcg_gen_brcond_i64 92 #define tcg_gen_brcondi_reg tcg_gen_brcondi_i64 93 #define tcg_gen_setcond_reg tcg_gen_setcond_i64 94 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i64 95 #define tcg_gen_mul_reg tcg_gen_mul_i64 96 #define tcg_gen_muli_reg tcg_gen_muli_i64 97 #define tcg_gen_div_reg tcg_gen_div_i64 98 #define tcg_gen_rem_reg tcg_gen_rem_i64 99 #define tcg_gen_divu_reg tcg_gen_divu_i64 100 #define tcg_gen_remu_reg tcg_gen_remu_i64 101 #define tcg_gen_discard_reg tcg_gen_discard_i64 102 #define tcg_gen_trunc_reg_i32 tcg_gen_extrl_i64_i32 103 #define tcg_gen_trunc_i64_reg tcg_gen_mov_i64 104 #define tcg_gen_extu_i32_reg tcg_gen_extu_i32_i64 105 #define tcg_gen_ext_i32_reg tcg_gen_ext_i32_i64 106 #define tcg_gen_extu_reg_i64 tcg_gen_mov_i64 107 #define tcg_gen_ext_reg_i64 tcg_gen_mov_i64 108 #define tcg_gen_ext8u_reg tcg_gen_ext8u_i64 109 #define tcg_gen_ext8s_reg tcg_gen_ext8s_i64 110 #define tcg_gen_ext16u_reg tcg_gen_ext16u_i64 111 #define tcg_gen_ext16s_reg tcg_gen_ext16s_i64 112 #define tcg_gen_ext32u_reg tcg_gen_ext32u_i64 113 #define tcg_gen_ext32s_reg tcg_gen_ext32s_i64 114 #define tcg_gen_bswap16_reg tcg_gen_bswap16_i64 115 #define tcg_gen_bswap32_reg tcg_gen_bswap32_i64 116 #define tcg_gen_bswap64_reg tcg_gen_bswap64_i64 117 #define tcg_gen_concat_reg_i64 tcg_gen_concat32_i64 118 #define tcg_gen_andc_reg tcg_gen_andc_i64 119 #define tcg_gen_eqv_reg tcg_gen_eqv_i64 120 #define tcg_gen_nand_reg tcg_gen_nand_i64 121 #define tcg_gen_nor_reg tcg_gen_nor_i64 122 #define tcg_gen_orc_reg tcg_gen_orc_i64 123 #define tcg_gen_clz_reg tcg_gen_clz_i64 124 #define tcg_gen_ctz_reg tcg_gen_ctz_i64 125 #define tcg_gen_clzi_reg tcg_gen_clzi_i64 126 #define tcg_gen_ctzi_reg tcg_gen_ctzi_i64 127 #define tcg_gen_clrsb_reg tcg_gen_clrsb_i64 128 #define tcg_gen_ctpop_reg tcg_gen_ctpop_i64 129 #define tcg_gen_rotl_reg tcg_gen_rotl_i64 130 #define tcg_gen_rotli_reg tcg_gen_rotli_i64 131 #define tcg_gen_rotr_reg tcg_gen_rotr_i64 132 #define tcg_gen_rotri_reg tcg_gen_rotri_i64 133 #define tcg_gen_deposit_reg tcg_gen_deposit_i64 134 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i64 135 #define tcg_gen_extract_reg tcg_gen_extract_i64 136 #define tcg_gen_sextract_reg tcg_gen_sextract_i64 137 #define tcg_gen_extract2_reg tcg_gen_extract2_i64 138 #define tcg_const_reg tcg_const_i64 139 #define tcg_const_local_reg tcg_const_local_i64 140 #define tcg_constant_reg tcg_constant_i64 141 #define tcg_gen_movcond_reg tcg_gen_movcond_i64 142 #define tcg_gen_add2_reg tcg_gen_add2_i64 143 #define tcg_gen_sub2_reg tcg_gen_sub2_i64 144 #define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i64 145 #define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i64 146 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i64 147 #define tcg_gen_trunc_reg_ptr tcg_gen_trunc_i64_ptr 148 #else 149 #define TCGv_reg TCGv_i32 150 #define tcg_temp_new tcg_temp_new_i32 151 #define tcg_global_mem_new tcg_global_mem_new_i32 152 153 #define tcg_gen_movi_reg tcg_gen_movi_i32 154 #define tcg_gen_mov_reg tcg_gen_mov_i32 155 #define tcg_gen_ld8u_reg tcg_gen_ld8u_i32 156 #define tcg_gen_ld8s_reg tcg_gen_ld8s_i32 157 #define tcg_gen_ld16u_reg tcg_gen_ld16u_i32 158 #define tcg_gen_ld16s_reg tcg_gen_ld16s_i32 159 #define tcg_gen_ld32u_reg tcg_gen_ld_i32 160 #define tcg_gen_ld32s_reg tcg_gen_ld_i32 161 #define tcg_gen_ld_reg tcg_gen_ld_i32 162 #define tcg_gen_st8_reg tcg_gen_st8_i32 163 #define tcg_gen_st16_reg tcg_gen_st16_i32 164 #define tcg_gen_st32_reg tcg_gen_st32_i32 165 #define tcg_gen_st_reg tcg_gen_st_i32 166 #define tcg_gen_add_reg tcg_gen_add_i32 167 #define tcg_gen_addi_reg tcg_gen_addi_i32 168 #define tcg_gen_sub_reg tcg_gen_sub_i32 169 #define tcg_gen_neg_reg tcg_gen_neg_i32 170 #define tcg_gen_subfi_reg tcg_gen_subfi_i32 171 #define tcg_gen_subi_reg tcg_gen_subi_i32 172 #define tcg_gen_and_reg tcg_gen_and_i32 173 #define tcg_gen_andi_reg tcg_gen_andi_i32 174 #define tcg_gen_or_reg tcg_gen_or_i32 175 #define tcg_gen_ori_reg tcg_gen_ori_i32 176 #define tcg_gen_xor_reg tcg_gen_xor_i32 177 #define tcg_gen_xori_reg tcg_gen_xori_i32 178 #define tcg_gen_not_reg tcg_gen_not_i32 179 #define tcg_gen_shl_reg tcg_gen_shl_i32 180 #define tcg_gen_shli_reg tcg_gen_shli_i32 181 #define tcg_gen_shr_reg tcg_gen_shr_i32 182 #define tcg_gen_shri_reg tcg_gen_shri_i32 183 #define tcg_gen_sar_reg tcg_gen_sar_i32 184 #define tcg_gen_sari_reg tcg_gen_sari_i32 185 #define tcg_gen_brcond_reg tcg_gen_brcond_i32 186 #define tcg_gen_brcondi_reg tcg_gen_brcondi_i32 187 #define tcg_gen_setcond_reg tcg_gen_setcond_i32 188 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i32 189 #define tcg_gen_mul_reg tcg_gen_mul_i32 190 #define tcg_gen_muli_reg tcg_gen_muli_i32 191 #define tcg_gen_div_reg tcg_gen_div_i32 192 #define tcg_gen_rem_reg tcg_gen_rem_i32 193 #define tcg_gen_divu_reg tcg_gen_divu_i32 194 #define tcg_gen_remu_reg tcg_gen_remu_i32 195 #define tcg_gen_discard_reg tcg_gen_discard_i32 196 #define tcg_gen_trunc_reg_i32 tcg_gen_mov_i32 197 #define tcg_gen_trunc_i64_reg tcg_gen_extrl_i64_i32 198 #define tcg_gen_extu_i32_reg tcg_gen_mov_i32 199 #define tcg_gen_ext_i32_reg tcg_gen_mov_i32 200 #define tcg_gen_extu_reg_i64 tcg_gen_extu_i32_i64 201 #define tcg_gen_ext_reg_i64 tcg_gen_ext_i32_i64 202 #define tcg_gen_ext8u_reg tcg_gen_ext8u_i32 203 #define tcg_gen_ext8s_reg tcg_gen_ext8s_i32 204 #define tcg_gen_ext16u_reg tcg_gen_ext16u_i32 205 #define tcg_gen_ext16s_reg tcg_gen_ext16s_i32 206 #define tcg_gen_ext32u_reg tcg_gen_mov_i32 207 #define tcg_gen_ext32s_reg tcg_gen_mov_i32 208 #define tcg_gen_bswap16_reg tcg_gen_bswap16_i32 209 #define tcg_gen_bswap32_reg tcg_gen_bswap32_i32 210 #define tcg_gen_concat_reg_i64 tcg_gen_concat_i32_i64 211 #define tcg_gen_andc_reg tcg_gen_andc_i32 212 #define tcg_gen_eqv_reg tcg_gen_eqv_i32 213 #define tcg_gen_nand_reg tcg_gen_nand_i32 214 #define tcg_gen_nor_reg tcg_gen_nor_i32 215 #define tcg_gen_orc_reg tcg_gen_orc_i32 216 #define tcg_gen_clz_reg tcg_gen_clz_i32 217 #define tcg_gen_ctz_reg tcg_gen_ctz_i32 218 #define tcg_gen_clzi_reg tcg_gen_clzi_i32 219 #define tcg_gen_ctzi_reg tcg_gen_ctzi_i32 220 #define tcg_gen_clrsb_reg tcg_gen_clrsb_i32 221 #define tcg_gen_ctpop_reg tcg_gen_ctpop_i32 222 #define tcg_gen_rotl_reg tcg_gen_rotl_i32 223 #define tcg_gen_rotli_reg tcg_gen_rotli_i32 224 #define tcg_gen_rotr_reg tcg_gen_rotr_i32 225 #define tcg_gen_rotri_reg tcg_gen_rotri_i32 226 #define tcg_gen_deposit_reg tcg_gen_deposit_i32 227 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i32 228 #define tcg_gen_extract_reg tcg_gen_extract_i32 229 #define tcg_gen_sextract_reg tcg_gen_sextract_i32 230 #define tcg_gen_extract2_reg tcg_gen_extract2_i32 231 #define tcg_const_reg tcg_const_i32 232 #define tcg_const_local_reg tcg_const_local_i32 233 #define tcg_constant_reg tcg_constant_i32 234 #define tcg_gen_movcond_reg tcg_gen_movcond_i32 235 #define tcg_gen_add2_reg tcg_gen_add2_i32 236 #define tcg_gen_sub2_reg tcg_gen_sub2_i32 237 #define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i32 238 #define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i32 239 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i32 240 #define tcg_gen_trunc_reg_ptr tcg_gen_ext_i32_ptr 241 #endif /* TARGET_REGISTER_BITS */ 242 243 typedef struct DisasCond { 244 TCGCond c; 245 TCGv_reg a0, a1; 246 } DisasCond; 247 248 typedef struct DisasContext { 249 DisasContextBase base; 250 CPUState *cs; 251 252 target_ureg iaoq_f; 253 target_ureg iaoq_b; 254 target_ureg iaoq_n; 255 TCGv_reg iaoq_n_var; 256 257 int ntempr, ntempl; 258 TCGv_reg tempr[8]; 259 TCGv_tl templ[4]; 260 261 DisasCond null_cond; 262 TCGLabel *null_lab; 263 264 uint32_t insn; 265 uint32_t tb_flags; 266 int mmu_idx; 267 int privilege; 268 bool psw_n_nonzero; 269 270 #ifdef CONFIG_USER_ONLY 271 MemOp unalign; 272 #endif 273 } DisasContext; 274 275 #ifdef CONFIG_USER_ONLY 276 #define UNALIGN(C) (C)->unalign 277 #else 278 #define UNALIGN(C) 0 279 #endif 280 281 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */ 282 static int expand_sm_imm(DisasContext *ctx, int val) 283 { 284 if (val & PSW_SM_E) { 285 val = (val & ~PSW_SM_E) | PSW_E; 286 } 287 if (val & PSW_SM_W) { 288 val = (val & ~PSW_SM_W) | PSW_W; 289 } 290 return val; 291 } 292 293 /* Inverted space register indicates 0 means sr0 not inferred from base. */ 294 static int expand_sr3x(DisasContext *ctx, int val) 295 { 296 return ~val; 297 } 298 299 /* Convert the M:A bits within a memory insn to the tri-state value 300 we use for the final M. */ 301 static int ma_to_m(DisasContext *ctx, int val) 302 { 303 return val & 2 ? (val & 1 ? -1 : 1) : 0; 304 } 305 306 /* Convert the sign of the displacement to a pre or post-modify. */ 307 static int pos_to_m(DisasContext *ctx, int val) 308 { 309 return val ? 1 : -1; 310 } 311 312 static int neg_to_m(DisasContext *ctx, int val) 313 { 314 return val ? -1 : 1; 315 } 316 317 /* Used for branch targets and fp memory ops. */ 318 static int expand_shl2(DisasContext *ctx, int val) 319 { 320 return val << 2; 321 } 322 323 /* Used for fp memory ops. */ 324 static int expand_shl3(DisasContext *ctx, int val) 325 { 326 return val << 3; 327 } 328 329 /* Used for assemble_21. */ 330 static int expand_shl11(DisasContext *ctx, int val) 331 { 332 return val << 11; 333 } 334 335 336 /* Include the auto-generated decoder. */ 337 #include "decode-insns.c.inc" 338 339 /* We are not using a goto_tb (for whatever reason), but have updated 340 the iaq (for whatever reason), so don't do it again on exit. */ 341 #define DISAS_IAQ_N_UPDATED DISAS_TARGET_0 342 343 /* We are exiting the TB, but have neither emitted a goto_tb, nor 344 updated the iaq for the next instruction to be executed. */ 345 #define DISAS_IAQ_N_STALE DISAS_TARGET_1 346 347 /* Similarly, but we want to return to the main loop immediately 348 to recognize unmasked interrupts. */ 349 #define DISAS_IAQ_N_STALE_EXIT DISAS_TARGET_2 350 #define DISAS_EXIT DISAS_TARGET_3 351 352 /* global register indexes */ 353 static TCGv_reg cpu_gr[32]; 354 static TCGv_i64 cpu_sr[4]; 355 static TCGv_i64 cpu_srH; 356 static TCGv_reg cpu_iaoq_f; 357 static TCGv_reg cpu_iaoq_b; 358 static TCGv_i64 cpu_iasq_f; 359 static TCGv_i64 cpu_iasq_b; 360 static TCGv_reg cpu_sar; 361 static TCGv_reg cpu_psw_n; 362 static TCGv_reg cpu_psw_v; 363 static TCGv_reg cpu_psw_cb; 364 static TCGv_reg cpu_psw_cb_msb; 365 366 #include "exec/gen-icount.h" 367 368 void hppa_translate_init(void) 369 { 370 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) } 371 372 typedef struct { TCGv_reg *var; const char *name; int ofs; } GlobalVar; 373 static const GlobalVar vars[] = { 374 { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) }, 375 DEF_VAR(psw_n), 376 DEF_VAR(psw_v), 377 DEF_VAR(psw_cb), 378 DEF_VAR(psw_cb_msb), 379 DEF_VAR(iaoq_f), 380 DEF_VAR(iaoq_b), 381 }; 382 383 #undef DEF_VAR 384 385 /* Use the symbolic register names that match the disassembler. */ 386 static const char gr_names[32][4] = { 387 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", 388 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", 389 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", 390 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31" 391 }; 392 /* SR[4-7] are not global registers so that we can index them. */ 393 static const char sr_names[5][4] = { 394 "sr0", "sr1", "sr2", "sr3", "srH" 395 }; 396 397 int i; 398 399 cpu_gr[0] = NULL; 400 for (i = 1; i < 32; i++) { 401 cpu_gr[i] = tcg_global_mem_new(cpu_env, 402 offsetof(CPUHPPAState, gr[i]), 403 gr_names[i]); 404 } 405 for (i = 0; i < 4; i++) { 406 cpu_sr[i] = tcg_global_mem_new_i64(cpu_env, 407 offsetof(CPUHPPAState, sr[i]), 408 sr_names[i]); 409 } 410 cpu_srH = tcg_global_mem_new_i64(cpu_env, 411 offsetof(CPUHPPAState, sr[4]), 412 sr_names[4]); 413 414 for (i = 0; i < ARRAY_SIZE(vars); ++i) { 415 const GlobalVar *v = &vars[i]; 416 *v->var = tcg_global_mem_new(cpu_env, v->ofs, v->name); 417 } 418 419 cpu_iasq_f = tcg_global_mem_new_i64(cpu_env, 420 offsetof(CPUHPPAState, iasq_f), 421 "iasq_f"); 422 cpu_iasq_b = tcg_global_mem_new_i64(cpu_env, 423 offsetof(CPUHPPAState, iasq_b), 424 "iasq_b"); 425 } 426 427 static DisasCond cond_make_f(void) 428 { 429 return (DisasCond){ 430 .c = TCG_COND_NEVER, 431 .a0 = NULL, 432 .a1 = NULL, 433 }; 434 } 435 436 static DisasCond cond_make_t(void) 437 { 438 return (DisasCond){ 439 .c = TCG_COND_ALWAYS, 440 .a0 = NULL, 441 .a1 = NULL, 442 }; 443 } 444 445 static DisasCond cond_make_n(void) 446 { 447 return (DisasCond){ 448 .c = TCG_COND_NE, 449 .a0 = cpu_psw_n, 450 .a1 = tcg_constant_reg(0) 451 }; 452 } 453 454 static DisasCond cond_make_0_tmp(TCGCond c, TCGv_reg a0) 455 { 456 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS); 457 return (DisasCond){ 458 .c = c, .a0 = a0, .a1 = tcg_constant_reg(0) 459 }; 460 } 461 462 static DisasCond cond_make_0(TCGCond c, TCGv_reg a0) 463 { 464 TCGv_reg tmp = tcg_temp_new(); 465 tcg_gen_mov_reg(tmp, a0); 466 return cond_make_0_tmp(c, tmp); 467 } 468 469 static DisasCond cond_make(TCGCond c, TCGv_reg a0, TCGv_reg a1) 470 { 471 DisasCond r = { .c = c }; 472 473 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS); 474 r.a0 = tcg_temp_new(); 475 tcg_gen_mov_reg(r.a0, a0); 476 r.a1 = tcg_temp_new(); 477 tcg_gen_mov_reg(r.a1, a1); 478 479 return r; 480 } 481 482 static void cond_free(DisasCond *cond) 483 { 484 switch (cond->c) { 485 default: 486 cond->a0 = NULL; 487 cond->a1 = NULL; 488 /* fallthru */ 489 case TCG_COND_ALWAYS: 490 cond->c = TCG_COND_NEVER; 491 break; 492 case TCG_COND_NEVER: 493 break; 494 } 495 } 496 497 static TCGv_reg get_temp(DisasContext *ctx) 498 { 499 unsigned i = ctx->ntempr++; 500 g_assert(i < ARRAY_SIZE(ctx->tempr)); 501 return ctx->tempr[i] = tcg_temp_new(); 502 } 503 504 #ifndef CONFIG_USER_ONLY 505 static TCGv_tl get_temp_tl(DisasContext *ctx) 506 { 507 unsigned i = ctx->ntempl++; 508 g_assert(i < ARRAY_SIZE(ctx->templ)); 509 return ctx->templ[i] = tcg_temp_new_tl(); 510 } 511 #endif 512 513 static TCGv_reg load_const(DisasContext *ctx, target_sreg v) 514 { 515 TCGv_reg t = get_temp(ctx); 516 tcg_gen_movi_reg(t, v); 517 return t; 518 } 519 520 static TCGv_reg load_gpr(DisasContext *ctx, unsigned reg) 521 { 522 if (reg == 0) { 523 TCGv_reg t = get_temp(ctx); 524 tcg_gen_movi_reg(t, 0); 525 return t; 526 } else { 527 return cpu_gr[reg]; 528 } 529 } 530 531 static TCGv_reg dest_gpr(DisasContext *ctx, unsigned reg) 532 { 533 if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) { 534 return get_temp(ctx); 535 } else { 536 return cpu_gr[reg]; 537 } 538 } 539 540 static void save_or_nullify(DisasContext *ctx, TCGv_reg dest, TCGv_reg t) 541 { 542 if (ctx->null_cond.c != TCG_COND_NEVER) { 543 tcg_gen_movcond_reg(ctx->null_cond.c, dest, ctx->null_cond.a0, 544 ctx->null_cond.a1, dest, t); 545 } else { 546 tcg_gen_mov_reg(dest, t); 547 } 548 } 549 550 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_reg t) 551 { 552 if (reg != 0) { 553 save_or_nullify(ctx, cpu_gr[reg], t); 554 } 555 } 556 557 #if HOST_BIG_ENDIAN 558 # define HI_OFS 0 559 # define LO_OFS 4 560 #else 561 # define HI_OFS 4 562 # define LO_OFS 0 563 #endif 564 565 static TCGv_i32 load_frw_i32(unsigned rt) 566 { 567 TCGv_i32 ret = tcg_temp_new_i32(); 568 tcg_gen_ld_i32(ret, cpu_env, 569 offsetof(CPUHPPAState, fr[rt & 31]) 570 + (rt & 32 ? LO_OFS : HI_OFS)); 571 return ret; 572 } 573 574 static TCGv_i32 load_frw0_i32(unsigned rt) 575 { 576 if (rt == 0) { 577 return tcg_const_i32(0); 578 } else { 579 return load_frw_i32(rt); 580 } 581 } 582 583 static TCGv_i64 load_frw0_i64(unsigned rt) 584 { 585 if (rt == 0) { 586 return tcg_const_i64(0); 587 } else { 588 TCGv_i64 ret = tcg_temp_new_i64(); 589 tcg_gen_ld32u_i64(ret, cpu_env, 590 offsetof(CPUHPPAState, fr[rt & 31]) 591 + (rt & 32 ? LO_OFS : HI_OFS)); 592 return ret; 593 } 594 } 595 596 static void save_frw_i32(unsigned rt, TCGv_i32 val) 597 { 598 tcg_gen_st_i32(val, cpu_env, 599 offsetof(CPUHPPAState, fr[rt & 31]) 600 + (rt & 32 ? LO_OFS : HI_OFS)); 601 } 602 603 #undef HI_OFS 604 #undef LO_OFS 605 606 static TCGv_i64 load_frd(unsigned rt) 607 { 608 TCGv_i64 ret = tcg_temp_new_i64(); 609 tcg_gen_ld_i64(ret, cpu_env, offsetof(CPUHPPAState, fr[rt])); 610 return ret; 611 } 612 613 static TCGv_i64 load_frd0(unsigned rt) 614 { 615 if (rt == 0) { 616 return tcg_const_i64(0); 617 } else { 618 return load_frd(rt); 619 } 620 } 621 622 static void save_frd(unsigned rt, TCGv_i64 val) 623 { 624 tcg_gen_st_i64(val, cpu_env, offsetof(CPUHPPAState, fr[rt])); 625 } 626 627 static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg) 628 { 629 #ifdef CONFIG_USER_ONLY 630 tcg_gen_movi_i64(dest, 0); 631 #else 632 if (reg < 4) { 633 tcg_gen_mov_i64(dest, cpu_sr[reg]); 634 } else if (ctx->tb_flags & TB_FLAG_SR_SAME) { 635 tcg_gen_mov_i64(dest, cpu_srH); 636 } else { 637 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUHPPAState, sr[reg])); 638 } 639 #endif 640 } 641 642 /* Skip over the implementation of an insn that has been nullified. 643 Use this when the insn is too complex for a conditional move. */ 644 static void nullify_over(DisasContext *ctx) 645 { 646 if (ctx->null_cond.c != TCG_COND_NEVER) { 647 /* The always condition should have been handled in the main loop. */ 648 assert(ctx->null_cond.c != TCG_COND_ALWAYS); 649 650 ctx->null_lab = gen_new_label(); 651 652 /* If we're using PSW[N], copy it to a temp because... */ 653 if (ctx->null_cond.a0 == cpu_psw_n) { 654 ctx->null_cond.a0 = tcg_temp_new(); 655 tcg_gen_mov_reg(ctx->null_cond.a0, cpu_psw_n); 656 } 657 /* ... we clear it before branching over the implementation, 658 so that (1) it's clear after nullifying this insn and 659 (2) if this insn nullifies the next, PSW[N] is valid. */ 660 if (ctx->psw_n_nonzero) { 661 ctx->psw_n_nonzero = false; 662 tcg_gen_movi_reg(cpu_psw_n, 0); 663 } 664 665 tcg_gen_brcond_reg(ctx->null_cond.c, ctx->null_cond.a0, 666 ctx->null_cond.a1, ctx->null_lab); 667 cond_free(&ctx->null_cond); 668 } 669 } 670 671 /* Save the current nullification state to PSW[N]. */ 672 static void nullify_save(DisasContext *ctx) 673 { 674 if (ctx->null_cond.c == TCG_COND_NEVER) { 675 if (ctx->psw_n_nonzero) { 676 tcg_gen_movi_reg(cpu_psw_n, 0); 677 } 678 return; 679 } 680 if (ctx->null_cond.a0 != cpu_psw_n) { 681 tcg_gen_setcond_reg(ctx->null_cond.c, cpu_psw_n, 682 ctx->null_cond.a0, ctx->null_cond.a1); 683 ctx->psw_n_nonzero = true; 684 } 685 cond_free(&ctx->null_cond); 686 } 687 688 /* Set a PSW[N] to X. The intention is that this is used immediately 689 before a goto_tb/exit_tb, so that there is no fallthru path to other 690 code within the TB. Therefore we do not update psw_n_nonzero. */ 691 static void nullify_set(DisasContext *ctx, bool x) 692 { 693 if (ctx->psw_n_nonzero || x) { 694 tcg_gen_movi_reg(cpu_psw_n, x); 695 } 696 } 697 698 /* Mark the end of an instruction that may have been nullified. 699 This is the pair to nullify_over. Always returns true so that 700 it may be tail-called from a translate function. */ 701 static bool nullify_end(DisasContext *ctx) 702 { 703 TCGLabel *null_lab = ctx->null_lab; 704 DisasJumpType status = ctx->base.is_jmp; 705 706 /* For NEXT, NORETURN, STALE, we can easily continue (or exit). 707 For UPDATED, we cannot update on the nullified path. */ 708 assert(status != DISAS_IAQ_N_UPDATED); 709 710 if (likely(null_lab == NULL)) { 711 /* The current insn wasn't conditional or handled the condition 712 applied to it without a branch, so the (new) setting of 713 NULL_COND can be applied directly to the next insn. */ 714 return true; 715 } 716 ctx->null_lab = NULL; 717 718 if (likely(ctx->null_cond.c == TCG_COND_NEVER)) { 719 /* The next instruction will be unconditional, 720 and NULL_COND already reflects that. */ 721 gen_set_label(null_lab); 722 } else { 723 /* The insn that we just executed is itself nullifying the next 724 instruction. Store the condition in the PSW[N] global. 725 We asserted PSW[N] = 0 in nullify_over, so that after the 726 label we have the proper value in place. */ 727 nullify_save(ctx); 728 gen_set_label(null_lab); 729 ctx->null_cond = cond_make_n(); 730 } 731 if (status == DISAS_NORETURN) { 732 ctx->base.is_jmp = DISAS_NEXT; 733 } 734 return true; 735 } 736 737 static void copy_iaoq_entry(TCGv_reg dest, target_ureg ival, TCGv_reg vval) 738 { 739 if (unlikely(ival == -1)) { 740 tcg_gen_mov_reg(dest, vval); 741 } else { 742 tcg_gen_movi_reg(dest, ival); 743 } 744 } 745 746 static inline target_ureg iaoq_dest(DisasContext *ctx, target_sreg disp) 747 { 748 return ctx->iaoq_f + disp + 8; 749 } 750 751 static void gen_excp_1(int exception) 752 { 753 gen_helper_excp(cpu_env, tcg_constant_i32(exception)); 754 } 755 756 static void gen_excp(DisasContext *ctx, int exception) 757 { 758 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f); 759 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b); 760 nullify_save(ctx); 761 gen_excp_1(exception); 762 ctx->base.is_jmp = DISAS_NORETURN; 763 } 764 765 static bool gen_excp_iir(DisasContext *ctx, int exc) 766 { 767 nullify_over(ctx); 768 tcg_gen_st_reg(tcg_constant_reg(ctx->insn), 769 cpu_env, offsetof(CPUHPPAState, cr[CR_IIR])); 770 gen_excp(ctx, exc); 771 return nullify_end(ctx); 772 } 773 774 static bool gen_illegal(DisasContext *ctx) 775 { 776 return gen_excp_iir(ctx, EXCP_ILL); 777 } 778 779 #ifdef CONFIG_USER_ONLY 780 #define CHECK_MOST_PRIVILEGED(EXCP) \ 781 return gen_excp_iir(ctx, EXCP) 782 #else 783 #define CHECK_MOST_PRIVILEGED(EXCP) \ 784 do { \ 785 if (ctx->privilege != 0) { \ 786 return gen_excp_iir(ctx, EXCP); \ 787 } \ 788 } while (0) 789 #endif 790 791 static bool use_goto_tb(DisasContext *ctx, target_ureg dest) 792 { 793 return translator_use_goto_tb(&ctx->base, dest); 794 } 795 796 /* If the next insn is to be nullified, and it's on the same page, 797 and we're not attempting to set a breakpoint on it, then we can 798 totally skip the nullified insn. This avoids creating and 799 executing a TB that merely branches to the next TB. */ 800 static bool use_nullify_skip(DisasContext *ctx) 801 { 802 return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0 803 && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY)); 804 } 805 806 static void gen_goto_tb(DisasContext *ctx, int which, 807 target_ureg f, target_ureg b) 808 { 809 if (f != -1 && b != -1 && use_goto_tb(ctx, f)) { 810 tcg_gen_goto_tb(which); 811 tcg_gen_movi_reg(cpu_iaoq_f, f); 812 tcg_gen_movi_reg(cpu_iaoq_b, b); 813 tcg_gen_exit_tb(ctx->base.tb, which); 814 } else { 815 copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b); 816 copy_iaoq_entry(cpu_iaoq_b, b, ctx->iaoq_n_var); 817 tcg_gen_lookup_and_goto_ptr(); 818 } 819 } 820 821 static bool cond_need_sv(int c) 822 { 823 return c == 2 || c == 3 || c == 6; 824 } 825 826 static bool cond_need_cb(int c) 827 { 828 return c == 4 || c == 5; 829 } 830 831 /* 832 * Compute conditional for arithmetic. See Page 5-3, Table 5-1, of 833 * the Parisc 1.1 Architecture Reference Manual for details. 834 */ 835 836 static DisasCond do_cond(unsigned cf, TCGv_reg res, 837 TCGv_reg cb_msb, TCGv_reg sv) 838 { 839 DisasCond cond; 840 TCGv_reg tmp; 841 842 switch (cf >> 1) { 843 case 0: /* Never / TR (0 / 1) */ 844 cond = cond_make_f(); 845 break; 846 case 1: /* = / <> (Z / !Z) */ 847 cond = cond_make_0(TCG_COND_EQ, res); 848 break; 849 case 2: /* < / >= (N ^ V / !(N ^ V) */ 850 tmp = tcg_temp_new(); 851 tcg_gen_xor_reg(tmp, res, sv); 852 cond = cond_make_0_tmp(TCG_COND_LT, tmp); 853 break; 854 case 3: /* <= / > (N ^ V) | Z / !((N ^ V) | Z) */ 855 /* 856 * Simplify: 857 * (N ^ V) | Z 858 * ((res < 0) ^ (sv < 0)) | !res 859 * ((res ^ sv) < 0) | !res 860 * (~(res ^ sv) >= 0) | !res 861 * !(~(res ^ sv) >> 31) | !res 862 * !(~(res ^ sv) >> 31 & res) 863 */ 864 tmp = tcg_temp_new(); 865 tcg_gen_eqv_reg(tmp, res, sv); 866 tcg_gen_sari_reg(tmp, tmp, TARGET_REGISTER_BITS - 1); 867 tcg_gen_and_reg(tmp, tmp, res); 868 cond = cond_make_0_tmp(TCG_COND_EQ, tmp); 869 break; 870 case 4: /* NUV / UV (!C / C) */ 871 cond = cond_make_0(TCG_COND_EQ, cb_msb); 872 break; 873 case 5: /* ZNV / VNZ (!C | Z / C & !Z) */ 874 tmp = tcg_temp_new(); 875 tcg_gen_neg_reg(tmp, cb_msb); 876 tcg_gen_and_reg(tmp, tmp, res); 877 cond = cond_make_0_tmp(TCG_COND_EQ, tmp); 878 break; 879 case 6: /* SV / NSV (V / !V) */ 880 cond = cond_make_0(TCG_COND_LT, sv); 881 break; 882 case 7: /* OD / EV */ 883 tmp = tcg_temp_new(); 884 tcg_gen_andi_reg(tmp, res, 1); 885 cond = cond_make_0_tmp(TCG_COND_NE, tmp); 886 break; 887 default: 888 g_assert_not_reached(); 889 } 890 if (cf & 1) { 891 cond.c = tcg_invert_cond(cond.c); 892 } 893 894 return cond; 895 } 896 897 /* Similar, but for the special case of subtraction without borrow, we 898 can use the inputs directly. This can allow other computation to be 899 deleted as unused. */ 900 901 static DisasCond do_sub_cond(unsigned cf, TCGv_reg res, 902 TCGv_reg in1, TCGv_reg in2, TCGv_reg sv) 903 { 904 DisasCond cond; 905 906 switch (cf >> 1) { 907 case 1: /* = / <> */ 908 cond = cond_make(TCG_COND_EQ, in1, in2); 909 break; 910 case 2: /* < / >= */ 911 cond = cond_make(TCG_COND_LT, in1, in2); 912 break; 913 case 3: /* <= / > */ 914 cond = cond_make(TCG_COND_LE, in1, in2); 915 break; 916 case 4: /* << / >>= */ 917 cond = cond_make(TCG_COND_LTU, in1, in2); 918 break; 919 case 5: /* <<= / >> */ 920 cond = cond_make(TCG_COND_LEU, in1, in2); 921 break; 922 default: 923 return do_cond(cf, res, NULL, sv); 924 } 925 if (cf & 1) { 926 cond.c = tcg_invert_cond(cond.c); 927 } 928 929 return cond; 930 } 931 932 /* 933 * Similar, but for logicals, where the carry and overflow bits are not 934 * computed, and use of them is undefined. 935 * 936 * Undefined or not, hardware does not trap. It seems reasonable to 937 * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's 938 * how cases c={2,3} are treated. 939 */ 940 941 static DisasCond do_log_cond(unsigned cf, TCGv_reg res) 942 { 943 switch (cf) { 944 case 0: /* never */ 945 case 9: /* undef, C */ 946 case 11: /* undef, C & !Z */ 947 case 12: /* undef, V */ 948 return cond_make_f(); 949 950 case 1: /* true */ 951 case 8: /* undef, !C */ 952 case 10: /* undef, !C | Z */ 953 case 13: /* undef, !V */ 954 return cond_make_t(); 955 956 case 2: /* == */ 957 return cond_make_0(TCG_COND_EQ, res); 958 case 3: /* <> */ 959 return cond_make_0(TCG_COND_NE, res); 960 case 4: /* < */ 961 return cond_make_0(TCG_COND_LT, res); 962 case 5: /* >= */ 963 return cond_make_0(TCG_COND_GE, res); 964 case 6: /* <= */ 965 return cond_make_0(TCG_COND_LE, res); 966 case 7: /* > */ 967 return cond_make_0(TCG_COND_GT, res); 968 969 case 14: /* OD */ 970 case 15: /* EV */ 971 return do_cond(cf, res, NULL, NULL); 972 973 default: 974 g_assert_not_reached(); 975 } 976 } 977 978 /* Similar, but for shift/extract/deposit conditions. */ 979 980 static DisasCond do_sed_cond(unsigned orig, TCGv_reg res) 981 { 982 unsigned c, f; 983 984 /* Convert the compressed condition codes to standard. 985 0-2 are the same as logicals (nv,<,<=), while 3 is OD. 986 4-7 are the reverse of 0-3. */ 987 c = orig & 3; 988 if (c == 3) { 989 c = 7; 990 } 991 f = (orig & 4) / 4; 992 993 return do_log_cond(c * 2 + f, res); 994 } 995 996 /* Similar, but for unit conditions. */ 997 998 static DisasCond do_unit_cond(unsigned cf, TCGv_reg res, 999 TCGv_reg in1, TCGv_reg in2) 1000 { 1001 DisasCond cond; 1002 TCGv_reg tmp, cb = NULL; 1003 1004 if (cf & 8) { 1005 /* Since we want to test lots of carry-out bits all at once, do not 1006 * do our normal thing and compute carry-in of bit B+1 since that 1007 * leaves us with carry bits spread across two words. 1008 */ 1009 cb = tcg_temp_new(); 1010 tmp = tcg_temp_new(); 1011 tcg_gen_or_reg(cb, in1, in2); 1012 tcg_gen_and_reg(tmp, in1, in2); 1013 tcg_gen_andc_reg(cb, cb, res); 1014 tcg_gen_or_reg(cb, cb, tmp); 1015 } 1016 1017 switch (cf >> 1) { 1018 case 0: /* never / TR */ 1019 case 1: /* undefined */ 1020 case 5: /* undefined */ 1021 cond = cond_make_f(); 1022 break; 1023 1024 case 2: /* SBZ / NBZ */ 1025 /* See hasless(v,1) from 1026 * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord 1027 */ 1028 tmp = tcg_temp_new(); 1029 tcg_gen_subi_reg(tmp, res, 0x01010101u); 1030 tcg_gen_andc_reg(tmp, tmp, res); 1031 tcg_gen_andi_reg(tmp, tmp, 0x80808080u); 1032 cond = cond_make_0(TCG_COND_NE, tmp); 1033 break; 1034 1035 case 3: /* SHZ / NHZ */ 1036 tmp = tcg_temp_new(); 1037 tcg_gen_subi_reg(tmp, res, 0x00010001u); 1038 tcg_gen_andc_reg(tmp, tmp, res); 1039 tcg_gen_andi_reg(tmp, tmp, 0x80008000u); 1040 cond = cond_make_0(TCG_COND_NE, tmp); 1041 break; 1042 1043 case 4: /* SDC / NDC */ 1044 tcg_gen_andi_reg(cb, cb, 0x88888888u); 1045 cond = cond_make_0(TCG_COND_NE, cb); 1046 break; 1047 1048 case 6: /* SBC / NBC */ 1049 tcg_gen_andi_reg(cb, cb, 0x80808080u); 1050 cond = cond_make_0(TCG_COND_NE, cb); 1051 break; 1052 1053 case 7: /* SHC / NHC */ 1054 tcg_gen_andi_reg(cb, cb, 0x80008000u); 1055 cond = cond_make_0(TCG_COND_NE, cb); 1056 break; 1057 1058 default: 1059 g_assert_not_reached(); 1060 } 1061 if (cf & 1) { 1062 cond.c = tcg_invert_cond(cond.c); 1063 } 1064 1065 return cond; 1066 } 1067 1068 /* Compute signed overflow for addition. */ 1069 static TCGv_reg do_add_sv(DisasContext *ctx, TCGv_reg res, 1070 TCGv_reg in1, TCGv_reg in2) 1071 { 1072 TCGv_reg sv = get_temp(ctx); 1073 TCGv_reg tmp = tcg_temp_new(); 1074 1075 tcg_gen_xor_reg(sv, res, in1); 1076 tcg_gen_xor_reg(tmp, in1, in2); 1077 tcg_gen_andc_reg(sv, sv, tmp); 1078 1079 return sv; 1080 } 1081 1082 /* Compute signed overflow for subtraction. */ 1083 static TCGv_reg do_sub_sv(DisasContext *ctx, TCGv_reg res, 1084 TCGv_reg in1, TCGv_reg in2) 1085 { 1086 TCGv_reg sv = get_temp(ctx); 1087 TCGv_reg tmp = tcg_temp_new(); 1088 1089 tcg_gen_xor_reg(sv, res, in1); 1090 tcg_gen_xor_reg(tmp, in1, in2); 1091 tcg_gen_and_reg(sv, sv, tmp); 1092 1093 return sv; 1094 } 1095 1096 static void do_add(DisasContext *ctx, unsigned rt, TCGv_reg in1, 1097 TCGv_reg in2, unsigned shift, bool is_l, 1098 bool is_tsv, bool is_tc, bool is_c, unsigned cf) 1099 { 1100 TCGv_reg dest, cb, cb_msb, sv, tmp; 1101 unsigned c = cf >> 1; 1102 DisasCond cond; 1103 1104 dest = tcg_temp_new(); 1105 cb = NULL; 1106 cb_msb = NULL; 1107 1108 if (shift) { 1109 tmp = get_temp(ctx); 1110 tcg_gen_shli_reg(tmp, in1, shift); 1111 in1 = tmp; 1112 } 1113 1114 if (!is_l || cond_need_cb(c)) { 1115 TCGv_reg zero = tcg_constant_reg(0); 1116 cb_msb = get_temp(ctx); 1117 tcg_gen_add2_reg(dest, cb_msb, in1, zero, in2, zero); 1118 if (is_c) { 1119 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cpu_psw_cb_msb, zero); 1120 } 1121 if (!is_l) { 1122 cb = get_temp(ctx); 1123 tcg_gen_xor_reg(cb, in1, in2); 1124 tcg_gen_xor_reg(cb, cb, dest); 1125 } 1126 } else { 1127 tcg_gen_add_reg(dest, in1, in2); 1128 if (is_c) { 1129 tcg_gen_add_reg(dest, dest, cpu_psw_cb_msb); 1130 } 1131 } 1132 1133 /* Compute signed overflow if required. */ 1134 sv = NULL; 1135 if (is_tsv || cond_need_sv(c)) { 1136 sv = do_add_sv(ctx, dest, in1, in2); 1137 if (is_tsv) { 1138 /* ??? Need to include overflow from shift. */ 1139 gen_helper_tsv(cpu_env, sv); 1140 } 1141 } 1142 1143 /* Emit any conditional trap before any writeback. */ 1144 cond = do_cond(cf, dest, cb_msb, sv); 1145 if (is_tc) { 1146 tmp = tcg_temp_new(); 1147 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1); 1148 gen_helper_tcond(cpu_env, tmp); 1149 } 1150 1151 /* Write back the result. */ 1152 if (!is_l) { 1153 save_or_nullify(ctx, cpu_psw_cb, cb); 1154 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb); 1155 } 1156 save_gpr(ctx, rt, dest); 1157 1158 /* Install the new nullification. */ 1159 cond_free(&ctx->null_cond); 1160 ctx->null_cond = cond; 1161 } 1162 1163 static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_sh *a, 1164 bool is_l, bool is_tsv, bool is_tc, bool is_c) 1165 { 1166 TCGv_reg tcg_r1, tcg_r2; 1167 1168 if (a->cf) { 1169 nullify_over(ctx); 1170 } 1171 tcg_r1 = load_gpr(ctx, a->r1); 1172 tcg_r2 = load_gpr(ctx, a->r2); 1173 do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l, is_tsv, is_tc, is_c, a->cf); 1174 return nullify_end(ctx); 1175 } 1176 1177 static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a, 1178 bool is_tsv, bool is_tc) 1179 { 1180 TCGv_reg tcg_im, tcg_r2; 1181 1182 if (a->cf) { 1183 nullify_over(ctx); 1184 } 1185 tcg_im = load_const(ctx, a->i); 1186 tcg_r2 = load_gpr(ctx, a->r); 1187 do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf); 1188 return nullify_end(ctx); 1189 } 1190 1191 static void do_sub(DisasContext *ctx, unsigned rt, TCGv_reg in1, 1192 TCGv_reg in2, bool is_tsv, bool is_b, 1193 bool is_tc, unsigned cf) 1194 { 1195 TCGv_reg dest, sv, cb, cb_msb, zero, tmp; 1196 unsigned c = cf >> 1; 1197 DisasCond cond; 1198 1199 dest = tcg_temp_new(); 1200 cb = tcg_temp_new(); 1201 cb_msb = tcg_temp_new(); 1202 1203 zero = tcg_constant_reg(0); 1204 if (is_b) { 1205 /* DEST,C = IN1 + ~IN2 + C. */ 1206 tcg_gen_not_reg(cb, in2); 1207 tcg_gen_add2_reg(dest, cb_msb, in1, zero, cpu_psw_cb_msb, zero); 1208 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cb, zero); 1209 tcg_gen_xor_reg(cb, cb, in1); 1210 tcg_gen_xor_reg(cb, cb, dest); 1211 } else { 1212 /* DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer 1213 operations by seeding the high word with 1 and subtracting. */ 1214 tcg_gen_movi_reg(cb_msb, 1); 1215 tcg_gen_sub2_reg(dest, cb_msb, in1, cb_msb, in2, zero); 1216 tcg_gen_eqv_reg(cb, in1, in2); 1217 tcg_gen_xor_reg(cb, cb, dest); 1218 } 1219 1220 /* Compute signed overflow if required. */ 1221 sv = NULL; 1222 if (is_tsv || cond_need_sv(c)) { 1223 sv = do_sub_sv(ctx, dest, in1, in2); 1224 if (is_tsv) { 1225 gen_helper_tsv(cpu_env, sv); 1226 } 1227 } 1228 1229 /* Compute the condition. We cannot use the special case for borrow. */ 1230 if (!is_b) { 1231 cond = do_sub_cond(cf, dest, in1, in2, sv); 1232 } else { 1233 cond = do_cond(cf, dest, cb_msb, sv); 1234 } 1235 1236 /* Emit any conditional trap before any writeback. */ 1237 if (is_tc) { 1238 tmp = tcg_temp_new(); 1239 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1); 1240 gen_helper_tcond(cpu_env, tmp); 1241 } 1242 1243 /* Write back the result. */ 1244 save_or_nullify(ctx, cpu_psw_cb, cb); 1245 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb); 1246 save_gpr(ctx, rt, dest); 1247 1248 /* Install the new nullification. */ 1249 cond_free(&ctx->null_cond); 1250 ctx->null_cond = cond; 1251 } 1252 1253 static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf *a, 1254 bool is_tsv, bool is_b, bool is_tc) 1255 { 1256 TCGv_reg tcg_r1, tcg_r2; 1257 1258 if (a->cf) { 1259 nullify_over(ctx); 1260 } 1261 tcg_r1 = load_gpr(ctx, a->r1); 1262 tcg_r2 = load_gpr(ctx, a->r2); 1263 do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf); 1264 return nullify_end(ctx); 1265 } 1266 1267 static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv) 1268 { 1269 TCGv_reg tcg_im, tcg_r2; 1270 1271 if (a->cf) { 1272 nullify_over(ctx); 1273 } 1274 tcg_im = load_const(ctx, a->i); 1275 tcg_r2 = load_gpr(ctx, a->r); 1276 do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf); 1277 return nullify_end(ctx); 1278 } 1279 1280 static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_reg in1, 1281 TCGv_reg in2, unsigned cf) 1282 { 1283 TCGv_reg dest, sv; 1284 DisasCond cond; 1285 1286 dest = tcg_temp_new(); 1287 tcg_gen_sub_reg(dest, in1, in2); 1288 1289 /* Compute signed overflow if required. */ 1290 sv = NULL; 1291 if (cond_need_sv(cf >> 1)) { 1292 sv = do_sub_sv(ctx, dest, in1, in2); 1293 } 1294 1295 /* Form the condition for the compare. */ 1296 cond = do_sub_cond(cf, dest, in1, in2, sv); 1297 1298 /* Clear. */ 1299 tcg_gen_movi_reg(dest, 0); 1300 save_gpr(ctx, rt, dest); 1301 1302 /* Install the new nullification. */ 1303 cond_free(&ctx->null_cond); 1304 ctx->null_cond = cond; 1305 } 1306 1307 static void do_log(DisasContext *ctx, unsigned rt, TCGv_reg in1, 1308 TCGv_reg in2, unsigned cf, 1309 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg)) 1310 { 1311 TCGv_reg dest = dest_gpr(ctx, rt); 1312 1313 /* Perform the operation, and writeback. */ 1314 fn(dest, in1, in2); 1315 save_gpr(ctx, rt, dest); 1316 1317 /* Install the new nullification. */ 1318 cond_free(&ctx->null_cond); 1319 if (cf) { 1320 ctx->null_cond = do_log_cond(cf, dest); 1321 } 1322 } 1323 1324 static bool do_log_reg(DisasContext *ctx, arg_rrr_cf *a, 1325 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg)) 1326 { 1327 TCGv_reg tcg_r1, tcg_r2; 1328 1329 if (a->cf) { 1330 nullify_over(ctx); 1331 } 1332 tcg_r1 = load_gpr(ctx, a->r1); 1333 tcg_r2 = load_gpr(ctx, a->r2); 1334 do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, fn); 1335 return nullify_end(ctx); 1336 } 1337 1338 static void do_unit(DisasContext *ctx, unsigned rt, TCGv_reg in1, 1339 TCGv_reg in2, unsigned cf, bool is_tc, 1340 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg)) 1341 { 1342 TCGv_reg dest; 1343 DisasCond cond; 1344 1345 if (cf == 0) { 1346 dest = dest_gpr(ctx, rt); 1347 fn(dest, in1, in2); 1348 save_gpr(ctx, rt, dest); 1349 cond_free(&ctx->null_cond); 1350 } else { 1351 dest = tcg_temp_new(); 1352 fn(dest, in1, in2); 1353 1354 cond = do_unit_cond(cf, dest, in1, in2); 1355 1356 if (is_tc) { 1357 TCGv_reg tmp = tcg_temp_new(); 1358 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1); 1359 gen_helper_tcond(cpu_env, tmp); 1360 } 1361 save_gpr(ctx, rt, dest); 1362 1363 cond_free(&ctx->null_cond); 1364 ctx->null_cond = cond; 1365 } 1366 } 1367 1368 #ifndef CONFIG_USER_ONLY 1369 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space 1370 from the top 2 bits of the base register. There are a few system 1371 instructions that have a 3-bit space specifier, for which SR0 is 1372 not special. To handle this, pass ~SP. */ 1373 static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_reg base) 1374 { 1375 TCGv_ptr ptr; 1376 TCGv_reg tmp; 1377 TCGv_i64 spc; 1378 1379 if (sp != 0) { 1380 if (sp < 0) { 1381 sp = ~sp; 1382 } 1383 spc = get_temp_tl(ctx); 1384 load_spr(ctx, spc, sp); 1385 return spc; 1386 } 1387 if (ctx->tb_flags & TB_FLAG_SR_SAME) { 1388 return cpu_srH; 1389 } 1390 1391 ptr = tcg_temp_new_ptr(); 1392 tmp = tcg_temp_new(); 1393 spc = get_temp_tl(ctx); 1394 1395 tcg_gen_shri_reg(tmp, base, TARGET_REGISTER_BITS - 5); 1396 tcg_gen_andi_reg(tmp, tmp, 030); 1397 tcg_gen_trunc_reg_ptr(ptr, tmp); 1398 1399 tcg_gen_add_ptr(ptr, ptr, cpu_env); 1400 tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4])); 1401 1402 return spc; 1403 } 1404 #endif 1405 1406 static void form_gva(DisasContext *ctx, TCGv_tl *pgva, TCGv_reg *pofs, 1407 unsigned rb, unsigned rx, int scale, target_sreg disp, 1408 unsigned sp, int modify, bool is_phys) 1409 { 1410 TCGv_reg base = load_gpr(ctx, rb); 1411 TCGv_reg ofs; 1412 1413 /* Note that RX is mutually exclusive with DISP. */ 1414 if (rx) { 1415 ofs = get_temp(ctx); 1416 tcg_gen_shli_reg(ofs, cpu_gr[rx], scale); 1417 tcg_gen_add_reg(ofs, ofs, base); 1418 } else if (disp || modify) { 1419 ofs = get_temp(ctx); 1420 tcg_gen_addi_reg(ofs, base, disp); 1421 } else { 1422 ofs = base; 1423 } 1424 1425 *pofs = ofs; 1426 #ifdef CONFIG_USER_ONLY 1427 *pgva = (modify <= 0 ? ofs : base); 1428 #else 1429 TCGv_tl addr = get_temp_tl(ctx); 1430 tcg_gen_extu_reg_tl(addr, modify <= 0 ? ofs : base); 1431 if (ctx->tb_flags & PSW_W) { 1432 tcg_gen_andi_tl(addr, addr, 0x3fffffffffffffffull); 1433 } 1434 if (!is_phys) { 1435 tcg_gen_or_tl(addr, addr, space_select(ctx, sp, base)); 1436 } 1437 *pgva = addr; 1438 #endif 1439 } 1440 1441 /* Emit a memory load. The modify parameter should be 1442 * < 0 for pre-modify, 1443 * > 0 for post-modify, 1444 * = 0 for no base register update. 1445 */ 1446 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb, 1447 unsigned rx, int scale, target_sreg disp, 1448 unsigned sp, int modify, MemOp mop) 1449 { 1450 TCGv_reg ofs; 1451 TCGv_tl addr; 1452 1453 /* Caller uses nullify_over/nullify_end. */ 1454 assert(ctx->null_cond.c == TCG_COND_NEVER); 1455 1456 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify, 1457 ctx->mmu_idx == MMU_PHYS_IDX); 1458 tcg_gen_qemu_ld_reg(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx)); 1459 if (modify) { 1460 save_gpr(ctx, rb, ofs); 1461 } 1462 } 1463 1464 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb, 1465 unsigned rx, int scale, target_sreg disp, 1466 unsigned sp, int modify, MemOp mop) 1467 { 1468 TCGv_reg ofs; 1469 TCGv_tl addr; 1470 1471 /* Caller uses nullify_over/nullify_end. */ 1472 assert(ctx->null_cond.c == TCG_COND_NEVER); 1473 1474 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify, 1475 ctx->mmu_idx == MMU_PHYS_IDX); 1476 tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx)); 1477 if (modify) { 1478 save_gpr(ctx, rb, ofs); 1479 } 1480 } 1481 1482 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb, 1483 unsigned rx, int scale, target_sreg disp, 1484 unsigned sp, int modify, MemOp mop) 1485 { 1486 TCGv_reg ofs; 1487 TCGv_tl addr; 1488 1489 /* Caller uses nullify_over/nullify_end. */ 1490 assert(ctx->null_cond.c == TCG_COND_NEVER); 1491 1492 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify, 1493 ctx->mmu_idx == MMU_PHYS_IDX); 1494 tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx)); 1495 if (modify) { 1496 save_gpr(ctx, rb, ofs); 1497 } 1498 } 1499 1500 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb, 1501 unsigned rx, int scale, target_sreg disp, 1502 unsigned sp, int modify, MemOp mop) 1503 { 1504 TCGv_reg ofs; 1505 TCGv_tl addr; 1506 1507 /* Caller uses nullify_over/nullify_end. */ 1508 assert(ctx->null_cond.c == TCG_COND_NEVER); 1509 1510 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify, 1511 ctx->mmu_idx == MMU_PHYS_IDX); 1512 tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx)); 1513 if (modify) { 1514 save_gpr(ctx, rb, ofs); 1515 } 1516 } 1517 1518 #if TARGET_REGISTER_BITS == 64 1519 #define do_load_reg do_load_64 1520 #define do_store_reg do_store_64 1521 #else 1522 #define do_load_reg do_load_32 1523 #define do_store_reg do_store_32 1524 #endif 1525 1526 static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb, 1527 unsigned rx, int scale, target_sreg disp, 1528 unsigned sp, int modify, MemOp mop) 1529 { 1530 TCGv_reg dest; 1531 1532 nullify_over(ctx); 1533 1534 if (modify == 0) { 1535 /* No base register update. */ 1536 dest = dest_gpr(ctx, rt); 1537 } else { 1538 /* Make sure if RT == RB, we see the result of the load. */ 1539 dest = get_temp(ctx); 1540 } 1541 do_load_reg(ctx, dest, rb, rx, scale, disp, sp, modify, mop); 1542 save_gpr(ctx, rt, dest); 1543 1544 return nullify_end(ctx); 1545 } 1546 1547 static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb, 1548 unsigned rx, int scale, target_sreg disp, 1549 unsigned sp, int modify) 1550 { 1551 TCGv_i32 tmp; 1552 1553 nullify_over(ctx); 1554 1555 tmp = tcg_temp_new_i32(); 1556 do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL); 1557 save_frw_i32(rt, tmp); 1558 1559 if (rt == 0) { 1560 gen_helper_loaded_fr0(cpu_env); 1561 } 1562 1563 return nullify_end(ctx); 1564 } 1565 1566 static bool trans_fldw(DisasContext *ctx, arg_ldst *a) 1567 { 1568 return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0, 1569 a->disp, a->sp, a->m); 1570 } 1571 1572 static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb, 1573 unsigned rx, int scale, target_sreg disp, 1574 unsigned sp, int modify) 1575 { 1576 TCGv_i64 tmp; 1577 1578 nullify_over(ctx); 1579 1580 tmp = tcg_temp_new_i64(); 1581 do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ); 1582 save_frd(rt, tmp); 1583 1584 if (rt == 0) { 1585 gen_helper_loaded_fr0(cpu_env); 1586 } 1587 1588 return nullify_end(ctx); 1589 } 1590 1591 static bool trans_fldd(DisasContext *ctx, arg_ldst *a) 1592 { 1593 return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0, 1594 a->disp, a->sp, a->m); 1595 } 1596 1597 static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb, 1598 target_sreg disp, unsigned sp, 1599 int modify, MemOp mop) 1600 { 1601 nullify_over(ctx); 1602 do_store_reg(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop); 1603 return nullify_end(ctx); 1604 } 1605 1606 static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb, 1607 unsigned rx, int scale, target_sreg disp, 1608 unsigned sp, int modify) 1609 { 1610 TCGv_i32 tmp; 1611 1612 nullify_over(ctx); 1613 1614 tmp = load_frw_i32(rt); 1615 do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL); 1616 1617 return nullify_end(ctx); 1618 } 1619 1620 static bool trans_fstw(DisasContext *ctx, arg_ldst *a) 1621 { 1622 return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0, 1623 a->disp, a->sp, a->m); 1624 } 1625 1626 static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb, 1627 unsigned rx, int scale, target_sreg disp, 1628 unsigned sp, int modify) 1629 { 1630 TCGv_i64 tmp; 1631 1632 nullify_over(ctx); 1633 1634 tmp = load_frd(rt); 1635 do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ); 1636 1637 return nullify_end(ctx); 1638 } 1639 1640 static bool trans_fstd(DisasContext *ctx, arg_ldst *a) 1641 { 1642 return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0, 1643 a->disp, a->sp, a->m); 1644 } 1645 1646 static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra, 1647 void (*func)(TCGv_i32, TCGv_env, TCGv_i32)) 1648 { 1649 TCGv_i32 tmp; 1650 1651 nullify_over(ctx); 1652 tmp = load_frw0_i32(ra); 1653 1654 func(tmp, cpu_env, tmp); 1655 1656 save_frw_i32(rt, tmp); 1657 return nullify_end(ctx); 1658 } 1659 1660 static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra, 1661 void (*func)(TCGv_i32, TCGv_env, TCGv_i64)) 1662 { 1663 TCGv_i32 dst; 1664 TCGv_i64 src; 1665 1666 nullify_over(ctx); 1667 src = load_frd(ra); 1668 dst = tcg_temp_new_i32(); 1669 1670 func(dst, cpu_env, src); 1671 1672 save_frw_i32(rt, dst); 1673 return nullify_end(ctx); 1674 } 1675 1676 static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra, 1677 void (*func)(TCGv_i64, TCGv_env, TCGv_i64)) 1678 { 1679 TCGv_i64 tmp; 1680 1681 nullify_over(ctx); 1682 tmp = load_frd0(ra); 1683 1684 func(tmp, cpu_env, tmp); 1685 1686 save_frd(rt, tmp); 1687 return nullify_end(ctx); 1688 } 1689 1690 static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra, 1691 void (*func)(TCGv_i64, TCGv_env, TCGv_i32)) 1692 { 1693 TCGv_i32 src; 1694 TCGv_i64 dst; 1695 1696 nullify_over(ctx); 1697 src = load_frw0_i32(ra); 1698 dst = tcg_temp_new_i64(); 1699 1700 func(dst, cpu_env, src); 1701 1702 save_frd(rt, dst); 1703 return nullify_end(ctx); 1704 } 1705 1706 static bool do_fop_weww(DisasContext *ctx, unsigned rt, 1707 unsigned ra, unsigned rb, 1708 void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32)) 1709 { 1710 TCGv_i32 a, b; 1711 1712 nullify_over(ctx); 1713 a = load_frw0_i32(ra); 1714 b = load_frw0_i32(rb); 1715 1716 func(a, cpu_env, a, b); 1717 1718 save_frw_i32(rt, a); 1719 return nullify_end(ctx); 1720 } 1721 1722 static bool do_fop_dedd(DisasContext *ctx, unsigned rt, 1723 unsigned ra, unsigned rb, 1724 void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64)) 1725 { 1726 TCGv_i64 a, b; 1727 1728 nullify_over(ctx); 1729 a = load_frd0(ra); 1730 b = load_frd0(rb); 1731 1732 func(a, cpu_env, a, b); 1733 1734 save_frd(rt, a); 1735 return nullify_end(ctx); 1736 } 1737 1738 /* Emit an unconditional branch to a direct target, which may or may not 1739 have already had nullification handled. */ 1740 static bool do_dbranch(DisasContext *ctx, target_ureg dest, 1741 unsigned link, bool is_n) 1742 { 1743 if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) { 1744 if (link != 0) { 1745 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var); 1746 } 1747 ctx->iaoq_n = dest; 1748 if (is_n) { 1749 ctx->null_cond.c = TCG_COND_ALWAYS; 1750 } 1751 } else { 1752 nullify_over(ctx); 1753 1754 if (link != 0) { 1755 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var); 1756 } 1757 1758 if (is_n && use_nullify_skip(ctx)) { 1759 nullify_set(ctx, 0); 1760 gen_goto_tb(ctx, 0, dest, dest + 4); 1761 } else { 1762 nullify_set(ctx, is_n); 1763 gen_goto_tb(ctx, 0, ctx->iaoq_b, dest); 1764 } 1765 1766 nullify_end(ctx); 1767 1768 nullify_set(ctx, 0); 1769 gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n); 1770 ctx->base.is_jmp = DISAS_NORETURN; 1771 } 1772 return true; 1773 } 1774 1775 /* Emit a conditional branch to a direct target. If the branch itself 1776 is nullified, we should have already used nullify_over. */ 1777 static bool do_cbranch(DisasContext *ctx, target_sreg disp, bool is_n, 1778 DisasCond *cond) 1779 { 1780 target_ureg dest = iaoq_dest(ctx, disp); 1781 TCGLabel *taken = NULL; 1782 TCGCond c = cond->c; 1783 bool n; 1784 1785 assert(ctx->null_cond.c == TCG_COND_NEVER); 1786 1787 /* Handle TRUE and NEVER as direct branches. */ 1788 if (c == TCG_COND_ALWAYS) { 1789 return do_dbranch(ctx, dest, 0, is_n && disp >= 0); 1790 } 1791 if (c == TCG_COND_NEVER) { 1792 return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0); 1793 } 1794 1795 taken = gen_new_label(); 1796 tcg_gen_brcond_reg(c, cond->a0, cond->a1, taken); 1797 cond_free(cond); 1798 1799 /* Not taken: Condition not satisfied; nullify on backward branches. */ 1800 n = is_n && disp < 0; 1801 if (n && use_nullify_skip(ctx)) { 1802 nullify_set(ctx, 0); 1803 gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4); 1804 } else { 1805 if (!n && ctx->null_lab) { 1806 gen_set_label(ctx->null_lab); 1807 ctx->null_lab = NULL; 1808 } 1809 nullify_set(ctx, n); 1810 if (ctx->iaoq_n == -1) { 1811 /* The temporary iaoq_n_var died at the branch above. 1812 Regenerate it here instead of saving it. */ 1813 tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4); 1814 } 1815 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n); 1816 } 1817 1818 gen_set_label(taken); 1819 1820 /* Taken: Condition satisfied; nullify on forward branches. */ 1821 n = is_n && disp >= 0; 1822 if (n && use_nullify_skip(ctx)) { 1823 nullify_set(ctx, 0); 1824 gen_goto_tb(ctx, 1, dest, dest + 4); 1825 } else { 1826 nullify_set(ctx, n); 1827 gen_goto_tb(ctx, 1, ctx->iaoq_b, dest); 1828 } 1829 1830 /* Not taken: the branch itself was nullified. */ 1831 if (ctx->null_lab) { 1832 gen_set_label(ctx->null_lab); 1833 ctx->null_lab = NULL; 1834 ctx->base.is_jmp = DISAS_IAQ_N_STALE; 1835 } else { 1836 ctx->base.is_jmp = DISAS_NORETURN; 1837 } 1838 return true; 1839 } 1840 1841 /* Emit an unconditional branch to an indirect target. This handles 1842 nullification of the branch itself. */ 1843 static bool do_ibranch(DisasContext *ctx, TCGv_reg dest, 1844 unsigned link, bool is_n) 1845 { 1846 TCGv_reg a0, a1, next, tmp; 1847 TCGCond c; 1848 1849 assert(ctx->null_lab == NULL); 1850 1851 if (ctx->null_cond.c == TCG_COND_NEVER) { 1852 if (link != 0) { 1853 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var); 1854 } 1855 next = get_temp(ctx); 1856 tcg_gen_mov_reg(next, dest); 1857 if (is_n) { 1858 if (use_nullify_skip(ctx)) { 1859 tcg_gen_mov_reg(cpu_iaoq_f, next); 1860 tcg_gen_addi_reg(cpu_iaoq_b, next, 4); 1861 nullify_set(ctx, 0); 1862 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED; 1863 return true; 1864 } 1865 ctx->null_cond.c = TCG_COND_ALWAYS; 1866 } 1867 ctx->iaoq_n = -1; 1868 ctx->iaoq_n_var = next; 1869 } else if (is_n && use_nullify_skip(ctx)) { 1870 /* The (conditional) branch, B, nullifies the next insn, N, 1871 and we're allowed to skip execution N (no single-step or 1872 tracepoint in effect). Since the goto_ptr that we must use 1873 for the indirect branch consumes no special resources, we 1874 can (conditionally) skip B and continue execution. */ 1875 /* The use_nullify_skip test implies we have a known control path. */ 1876 tcg_debug_assert(ctx->iaoq_b != -1); 1877 tcg_debug_assert(ctx->iaoq_n != -1); 1878 1879 /* We do have to handle the non-local temporary, DEST, before 1880 branching. Since IOAQ_F is not really live at this point, we 1881 can simply store DEST optimistically. Similarly with IAOQ_B. */ 1882 tcg_gen_mov_reg(cpu_iaoq_f, dest); 1883 tcg_gen_addi_reg(cpu_iaoq_b, dest, 4); 1884 1885 nullify_over(ctx); 1886 if (link != 0) { 1887 tcg_gen_movi_reg(cpu_gr[link], ctx->iaoq_n); 1888 } 1889 tcg_gen_lookup_and_goto_ptr(); 1890 return nullify_end(ctx); 1891 } else { 1892 c = ctx->null_cond.c; 1893 a0 = ctx->null_cond.a0; 1894 a1 = ctx->null_cond.a1; 1895 1896 tmp = tcg_temp_new(); 1897 next = get_temp(ctx); 1898 1899 copy_iaoq_entry(tmp, ctx->iaoq_n, ctx->iaoq_n_var); 1900 tcg_gen_movcond_reg(c, next, a0, a1, tmp, dest); 1901 ctx->iaoq_n = -1; 1902 ctx->iaoq_n_var = next; 1903 1904 if (link != 0) { 1905 tcg_gen_movcond_reg(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp); 1906 } 1907 1908 if (is_n) { 1909 /* The branch nullifies the next insn, which means the state of N 1910 after the branch is the inverse of the state of N that applied 1911 to the branch. */ 1912 tcg_gen_setcond_reg(tcg_invert_cond(c), cpu_psw_n, a0, a1); 1913 cond_free(&ctx->null_cond); 1914 ctx->null_cond = cond_make_n(); 1915 ctx->psw_n_nonzero = true; 1916 } else { 1917 cond_free(&ctx->null_cond); 1918 } 1919 } 1920 return true; 1921 } 1922 1923 /* Implement 1924 * if (IAOQ_Front{30..31} < GR[b]{30..31}) 1925 * IAOQ_Next{30..31} ← GR[b]{30..31}; 1926 * else 1927 * IAOQ_Next{30..31} ← IAOQ_Front{30..31}; 1928 * which keeps the privilege level from being increased. 1929 */ 1930 static TCGv_reg do_ibranch_priv(DisasContext *ctx, TCGv_reg offset) 1931 { 1932 TCGv_reg dest; 1933 switch (ctx->privilege) { 1934 case 0: 1935 /* Privilege 0 is maximum and is allowed to decrease. */ 1936 return offset; 1937 case 3: 1938 /* Privilege 3 is minimum and is never allowed to increase. */ 1939 dest = get_temp(ctx); 1940 tcg_gen_ori_reg(dest, offset, 3); 1941 break; 1942 default: 1943 dest = get_temp(ctx); 1944 tcg_gen_andi_reg(dest, offset, -4); 1945 tcg_gen_ori_reg(dest, dest, ctx->privilege); 1946 tcg_gen_movcond_reg(TCG_COND_GTU, dest, dest, offset, dest, offset); 1947 break; 1948 } 1949 return dest; 1950 } 1951 1952 #ifdef CONFIG_USER_ONLY 1953 /* On Linux, page zero is normally marked execute only + gateway. 1954 Therefore normal read or write is supposed to fail, but specific 1955 offsets have kernel code mapped to raise permissions to implement 1956 system calls. Handling this via an explicit check here, rather 1957 in than the "be disp(sr2,r0)" instruction that probably sent us 1958 here, is the easiest way to handle the branch delay slot on the 1959 aforementioned BE. */ 1960 static void do_page_zero(DisasContext *ctx) 1961 { 1962 /* If by some means we get here with PSW[N]=1, that implies that 1963 the B,GATE instruction would be skipped, and we'd fault on the 1964 next insn within the privilaged page. */ 1965 switch (ctx->null_cond.c) { 1966 case TCG_COND_NEVER: 1967 break; 1968 case TCG_COND_ALWAYS: 1969 tcg_gen_movi_reg(cpu_psw_n, 0); 1970 goto do_sigill; 1971 default: 1972 /* Since this is always the first (and only) insn within the 1973 TB, we should know the state of PSW[N] from TB->FLAGS. */ 1974 g_assert_not_reached(); 1975 } 1976 1977 /* Check that we didn't arrive here via some means that allowed 1978 non-sequential instruction execution. Normally the PSW[B] bit 1979 detects this by disallowing the B,GATE instruction to execute 1980 under such conditions. */ 1981 if (ctx->iaoq_b != ctx->iaoq_f + 4) { 1982 goto do_sigill; 1983 } 1984 1985 switch (ctx->iaoq_f & -4) { 1986 case 0x00: /* Null pointer call */ 1987 gen_excp_1(EXCP_IMP); 1988 ctx->base.is_jmp = DISAS_NORETURN; 1989 break; 1990 1991 case 0xb0: /* LWS */ 1992 gen_excp_1(EXCP_SYSCALL_LWS); 1993 ctx->base.is_jmp = DISAS_NORETURN; 1994 break; 1995 1996 case 0xe0: /* SET_THREAD_POINTER */ 1997 tcg_gen_st_reg(cpu_gr[26], cpu_env, offsetof(CPUHPPAState, cr[27])); 1998 tcg_gen_ori_reg(cpu_iaoq_f, cpu_gr[31], 3); 1999 tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4); 2000 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED; 2001 break; 2002 2003 case 0x100: /* SYSCALL */ 2004 gen_excp_1(EXCP_SYSCALL); 2005 ctx->base.is_jmp = DISAS_NORETURN; 2006 break; 2007 2008 default: 2009 do_sigill: 2010 gen_excp_1(EXCP_ILL); 2011 ctx->base.is_jmp = DISAS_NORETURN; 2012 break; 2013 } 2014 } 2015 #endif 2016 2017 static bool trans_nop(DisasContext *ctx, arg_nop *a) 2018 { 2019 cond_free(&ctx->null_cond); 2020 return true; 2021 } 2022 2023 static bool trans_break(DisasContext *ctx, arg_break *a) 2024 { 2025 return gen_excp_iir(ctx, EXCP_BREAK); 2026 } 2027 2028 static bool trans_sync(DisasContext *ctx, arg_sync *a) 2029 { 2030 /* No point in nullifying the memory barrier. */ 2031 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL); 2032 2033 cond_free(&ctx->null_cond); 2034 return true; 2035 } 2036 2037 static bool trans_mfia(DisasContext *ctx, arg_mfia *a) 2038 { 2039 unsigned rt = a->t; 2040 TCGv_reg tmp = dest_gpr(ctx, rt); 2041 tcg_gen_movi_reg(tmp, ctx->iaoq_f); 2042 save_gpr(ctx, rt, tmp); 2043 2044 cond_free(&ctx->null_cond); 2045 return true; 2046 } 2047 2048 static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a) 2049 { 2050 unsigned rt = a->t; 2051 unsigned rs = a->sp; 2052 TCGv_i64 t0 = tcg_temp_new_i64(); 2053 TCGv_reg t1 = tcg_temp_new(); 2054 2055 load_spr(ctx, t0, rs); 2056 tcg_gen_shri_i64(t0, t0, 32); 2057 tcg_gen_trunc_i64_reg(t1, t0); 2058 2059 save_gpr(ctx, rt, t1); 2060 2061 cond_free(&ctx->null_cond); 2062 return true; 2063 } 2064 2065 static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a) 2066 { 2067 unsigned rt = a->t; 2068 unsigned ctl = a->r; 2069 TCGv_reg tmp; 2070 2071 switch (ctl) { 2072 case CR_SAR: 2073 #ifdef TARGET_HPPA64 2074 if (a->e == 0) { 2075 /* MFSAR without ,W masks low 5 bits. */ 2076 tmp = dest_gpr(ctx, rt); 2077 tcg_gen_andi_reg(tmp, cpu_sar, 31); 2078 save_gpr(ctx, rt, tmp); 2079 goto done; 2080 } 2081 #endif 2082 save_gpr(ctx, rt, cpu_sar); 2083 goto done; 2084 case CR_IT: /* Interval Timer */ 2085 /* FIXME: Respect PSW_S bit. */ 2086 nullify_over(ctx); 2087 tmp = dest_gpr(ctx, rt); 2088 if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { 2089 gen_io_start(); 2090 gen_helper_read_interval_timer(tmp); 2091 ctx->base.is_jmp = DISAS_IAQ_N_STALE; 2092 } else { 2093 gen_helper_read_interval_timer(tmp); 2094 } 2095 save_gpr(ctx, rt, tmp); 2096 return nullify_end(ctx); 2097 case 26: 2098 case 27: 2099 break; 2100 default: 2101 /* All other control registers are privileged. */ 2102 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG); 2103 break; 2104 } 2105 2106 tmp = get_temp(ctx); 2107 tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl])); 2108 save_gpr(ctx, rt, tmp); 2109 2110 done: 2111 cond_free(&ctx->null_cond); 2112 return true; 2113 } 2114 2115 static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a) 2116 { 2117 unsigned rr = a->r; 2118 unsigned rs = a->sp; 2119 TCGv_i64 t64; 2120 2121 if (rs >= 5) { 2122 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG); 2123 } 2124 nullify_over(ctx); 2125 2126 t64 = tcg_temp_new_i64(); 2127 tcg_gen_extu_reg_i64(t64, load_gpr(ctx, rr)); 2128 tcg_gen_shli_i64(t64, t64, 32); 2129 2130 if (rs >= 4) { 2131 tcg_gen_st_i64(t64, cpu_env, offsetof(CPUHPPAState, sr[rs])); 2132 ctx->tb_flags &= ~TB_FLAG_SR_SAME; 2133 } else { 2134 tcg_gen_mov_i64(cpu_sr[rs], t64); 2135 } 2136 2137 return nullify_end(ctx); 2138 } 2139 2140 static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a) 2141 { 2142 unsigned ctl = a->t; 2143 TCGv_reg reg; 2144 TCGv_reg tmp; 2145 2146 if (ctl == CR_SAR) { 2147 reg = load_gpr(ctx, a->r); 2148 tmp = tcg_temp_new(); 2149 tcg_gen_andi_reg(tmp, reg, TARGET_REGISTER_BITS - 1); 2150 save_or_nullify(ctx, cpu_sar, tmp); 2151 2152 cond_free(&ctx->null_cond); 2153 return true; 2154 } 2155 2156 /* All other control registers are privileged or read-only. */ 2157 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG); 2158 2159 #ifndef CONFIG_USER_ONLY 2160 nullify_over(ctx); 2161 reg = load_gpr(ctx, a->r); 2162 2163 switch (ctl) { 2164 case CR_IT: 2165 gen_helper_write_interval_timer(cpu_env, reg); 2166 break; 2167 case CR_EIRR: 2168 gen_helper_write_eirr(cpu_env, reg); 2169 break; 2170 case CR_EIEM: 2171 gen_helper_write_eiem(cpu_env, reg); 2172 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT; 2173 break; 2174 2175 case CR_IIASQ: 2176 case CR_IIAOQ: 2177 /* FIXME: Respect PSW_Q bit */ 2178 /* The write advances the queue and stores to the back element. */ 2179 tmp = get_temp(ctx); 2180 tcg_gen_ld_reg(tmp, cpu_env, 2181 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ])); 2182 tcg_gen_st_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl])); 2183 tcg_gen_st_reg(reg, cpu_env, 2184 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ])); 2185 break; 2186 2187 case CR_PID1: 2188 case CR_PID2: 2189 case CR_PID3: 2190 case CR_PID4: 2191 tcg_gen_st_reg(reg, cpu_env, offsetof(CPUHPPAState, cr[ctl])); 2192 #ifndef CONFIG_USER_ONLY 2193 gen_helper_change_prot_id(cpu_env); 2194 #endif 2195 break; 2196 2197 default: 2198 tcg_gen_st_reg(reg, cpu_env, offsetof(CPUHPPAState, cr[ctl])); 2199 break; 2200 } 2201 return nullify_end(ctx); 2202 #endif 2203 } 2204 2205 static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a) 2206 { 2207 TCGv_reg tmp = tcg_temp_new(); 2208 2209 tcg_gen_not_reg(tmp, load_gpr(ctx, a->r)); 2210 tcg_gen_andi_reg(tmp, tmp, TARGET_REGISTER_BITS - 1); 2211 save_or_nullify(ctx, cpu_sar, tmp); 2212 2213 cond_free(&ctx->null_cond); 2214 return true; 2215 } 2216 2217 static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a) 2218 { 2219 TCGv_reg dest = dest_gpr(ctx, a->t); 2220 2221 #ifdef CONFIG_USER_ONLY 2222 /* We don't implement space registers in user mode. */ 2223 tcg_gen_movi_reg(dest, 0); 2224 #else 2225 TCGv_i64 t0 = tcg_temp_new_i64(); 2226 2227 tcg_gen_mov_i64(t0, space_select(ctx, a->sp, load_gpr(ctx, a->b))); 2228 tcg_gen_shri_i64(t0, t0, 32); 2229 tcg_gen_trunc_i64_reg(dest, t0); 2230 #endif 2231 save_gpr(ctx, a->t, dest); 2232 2233 cond_free(&ctx->null_cond); 2234 return true; 2235 } 2236 2237 static bool trans_rsm(DisasContext *ctx, arg_rsm *a) 2238 { 2239 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2240 #ifndef CONFIG_USER_ONLY 2241 TCGv_reg tmp; 2242 2243 nullify_over(ctx); 2244 2245 tmp = get_temp(ctx); 2246 tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw)); 2247 tcg_gen_andi_reg(tmp, tmp, ~a->i); 2248 gen_helper_swap_system_mask(tmp, cpu_env, tmp); 2249 save_gpr(ctx, a->t, tmp); 2250 2251 /* Exit the TB to recognize new interrupts, e.g. PSW_M. */ 2252 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT; 2253 return nullify_end(ctx); 2254 #endif 2255 } 2256 2257 static bool trans_ssm(DisasContext *ctx, arg_ssm *a) 2258 { 2259 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2260 #ifndef CONFIG_USER_ONLY 2261 TCGv_reg tmp; 2262 2263 nullify_over(ctx); 2264 2265 tmp = get_temp(ctx); 2266 tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw)); 2267 tcg_gen_ori_reg(tmp, tmp, a->i); 2268 gen_helper_swap_system_mask(tmp, cpu_env, tmp); 2269 save_gpr(ctx, a->t, tmp); 2270 2271 /* Exit the TB to recognize new interrupts, e.g. PSW_I. */ 2272 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT; 2273 return nullify_end(ctx); 2274 #endif 2275 } 2276 2277 static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a) 2278 { 2279 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2280 #ifndef CONFIG_USER_ONLY 2281 TCGv_reg tmp, reg; 2282 nullify_over(ctx); 2283 2284 reg = load_gpr(ctx, a->r); 2285 tmp = get_temp(ctx); 2286 gen_helper_swap_system_mask(tmp, cpu_env, reg); 2287 2288 /* Exit the TB to recognize new interrupts. */ 2289 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT; 2290 return nullify_end(ctx); 2291 #endif 2292 } 2293 2294 static bool do_rfi(DisasContext *ctx, bool rfi_r) 2295 { 2296 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2297 #ifndef CONFIG_USER_ONLY 2298 nullify_over(ctx); 2299 2300 if (rfi_r) { 2301 gen_helper_rfi_r(cpu_env); 2302 } else { 2303 gen_helper_rfi(cpu_env); 2304 } 2305 /* Exit the TB to recognize new interrupts. */ 2306 tcg_gen_exit_tb(NULL, 0); 2307 ctx->base.is_jmp = DISAS_NORETURN; 2308 2309 return nullify_end(ctx); 2310 #endif 2311 } 2312 2313 static bool trans_rfi(DisasContext *ctx, arg_rfi *a) 2314 { 2315 return do_rfi(ctx, false); 2316 } 2317 2318 static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a) 2319 { 2320 return do_rfi(ctx, true); 2321 } 2322 2323 static bool trans_halt(DisasContext *ctx, arg_halt *a) 2324 { 2325 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2326 #ifndef CONFIG_USER_ONLY 2327 nullify_over(ctx); 2328 gen_helper_halt(cpu_env); 2329 ctx->base.is_jmp = DISAS_NORETURN; 2330 return nullify_end(ctx); 2331 #endif 2332 } 2333 2334 static bool trans_reset(DisasContext *ctx, arg_reset *a) 2335 { 2336 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2337 #ifndef CONFIG_USER_ONLY 2338 nullify_over(ctx); 2339 gen_helper_reset(cpu_env); 2340 ctx->base.is_jmp = DISAS_NORETURN; 2341 return nullify_end(ctx); 2342 #endif 2343 } 2344 2345 static bool trans_getshadowregs(DisasContext *ctx, arg_getshadowregs *a) 2346 { 2347 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2348 #ifndef CONFIG_USER_ONLY 2349 nullify_over(ctx); 2350 gen_helper_getshadowregs(cpu_env); 2351 return nullify_end(ctx); 2352 #endif 2353 } 2354 2355 static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a) 2356 { 2357 if (a->m) { 2358 TCGv_reg dest = dest_gpr(ctx, a->b); 2359 TCGv_reg src1 = load_gpr(ctx, a->b); 2360 TCGv_reg src2 = load_gpr(ctx, a->x); 2361 2362 /* The only thing we need to do is the base register modification. */ 2363 tcg_gen_add_reg(dest, src1, src2); 2364 save_gpr(ctx, a->b, dest); 2365 } 2366 cond_free(&ctx->null_cond); 2367 return true; 2368 } 2369 2370 static bool trans_probe(DisasContext *ctx, arg_probe *a) 2371 { 2372 TCGv_reg dest, ofs; 2373 TCGv_i32 level, want; 2374 TCGv_tl addr; 2375 2376 nullify_over(ctx); 2377 2378 dest = dest_gpr(ctx, a->t); 2379 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false); 2380 2381 if (a->imm) { 2382 level = tcg_constant_i32(a->ri); 2383 } else { 2384 level = tcg_temp_new_i32(); 2385 tcg_gen_trunc_reg_i32(level, load_gpr(ctx, a->ri)); 2386 tcg_gen_andi_i32(level, level, 3); 2387 } 2388 want = tcg_constant_i32(a->write ? PAGE_WRITE : PAGE_READ); 2389 2390 gen_helper_probe(dest, cpu_env, addr, level, want); 2391 2392 save_gpr(ctx, a->t, dest); 2393 return nullify_end(ctx); 2394 } 2395 2396 static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a) 2397 { 2398 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2399 #ifndef CONFIG_USER_ONLY 2400 TCGv_tl addr; 2401 TCGv_reg ofs, reg; 2402 2403 nullify_over(ctx); 2404 2405 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false); 2406 reg = load_gpr(ctx, a->r); 2407 if (a->addr) { 2408 gen_helper_itlba(cpu_env, addr, reg); 2409 } else { 2410 gen_helper_itlbp(cpu_env, addr, reg); 2411 } 2412 2413 /* Exit TB for TLB change if mmu is enabled. */ 2414 if (ctx->tb_flags & PSW_C) { 2415 ctx->base.is_jmp = DISAS_IAQ_N_STALE; 2416 } 2417 return nullify_end(ctx); 2418 #endif 2419 } 2420 2421 static bool trans_pxtlbx(DisasContext *ctx, arg_pxtlbx *a) 2422 { 2423 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2424 #ifndef CONFIG_USER_ONLY 2425 TCGv_tl addr; 2426 TCGv_reg ofs; 2427 2428 nullify_over(ctx); 2429 2430 form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false); 2431 if (a->m) { 2432 save_gpr(ctx, a->b, ofs); 2433 } 2434 if (a->local) { 2435 gen_helper_ptlbe(cpu_env); 2436 } else { 2437 gen_helper_ptlb(cpu_env, addr); 2438 } 2439 2440 /* Exit TB for TLB change if mmu is enabled. */ 2441 if (ctx->tb_flags & PSW_C) { 2442 ctx->base.is_jmp = DISAS_IAQ_N_STALE; 2443 } 2444 return nullify_end(ctx); 2445 #endif 2446 } 2447 2448 /* 2449 * Implement the pcxl and pcxl2 Fast TLB Insert instructions. 2450 * See 2451 * https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf 2452 * page 13-9 (195/206) 2453 */ 2454 static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a) 2455 { 2456 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2457 #ifndef CONFIG_USER_ONLY 2458 TCGv_tl addr, atl, stl; 2459 TCGv_reg reg; 2460 2461 nullify_over(ctx); 2462 2463 /* 2464 * FIXME: 2465 * if (not (pcxl or pcxl2)) 2466 * return gen_illegal(ctx); 2467 * 2468 * Note for future: these are 32-bit systems; no hppa64. 2469 */ 2470 2471 atl = tcg_temp_new_tl(); 2472 stl = tcg_temp_new_tl(); 2473 addr = tcg_temp_new_tl(); 2474 2475 tcg_gen_ld32u_i64(stl, cpu_env, 2476 a->data ? offsetof(CPUHPPAState, cr[CR_ISR]) 2477 : offsetof(CPUHPPAState, cr[CR_IIASQ])); 2478 tcg_gen_ld32u_i64(atl, cpu_env, 2479 a->data ? offsetof(CPUHPPAState, cr[CR_IOR]) 2480 : offsetof(CPUHPPAState, cr[CR_IIAOQ])); 2481 tcg_gen_shli_i64(stl, stl, 32); 2482 tcg_gen_or_tl(addr, atl, stl); 2483 2484 reg = load_gpr(ctx, a->r); 2485 if (a->addr) { 2486 gen_helper_itlba(cpu_env, addr, reg); 2487 } else { 2488 gen_helper_itlbp(cpu_env, addr, reg); 2489 } 2490 2491 /* Exit TB for TLB change if mmu is enabled. */ 2492 if (ctx->tb_flags & PSW_C) { 2493 ctx->base.is_jmp = DISAS_IAQ_N_STALE; 2494 } 2495 return nullify_end(ctx); 2496 #endif 2497 } 2498 2499 static bool trans_lpa(DisasContext *ctx, arg_ldst *a) 2500 { 2501 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2502 #ifndef CONFIG_USER_ONLY 2503 TCGv_tl vaddr; 2504 TCGv_reg ofs, paddr; 2505 2506 nullify_over(ctx); 2507 2508 form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false); 2509 2510 paddr = tcg_temp_new(); 2511 gen_helper_lpa(paddr, cpu_env, vaddr); 2512 2513 /* Note that physical address result overrides base modification. */ 2514 if (a->m) { 2515 save_gpr(ctx, a->b, ofs); 2516 } 2517 save_gpr(ctx, a->t, paddr); 2518 2519 return nullify_end(ctx); 2520 #endif 2521 } 2522 2523 static bool trans_lci(DisasContext *ctx, arg_lci *a) 2524 { 2525 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2526 2527 /* The Coherence Index is an implementation-defined function of the 2528 physical address. Two addresses with the same CI have a coherent 2529 view of the cache. Our implementation is to return 0 for all, 2530 since the entire address space is coherent. */ 2531 save_gpr(ctx, a->t, tcg_constant_reg(0)); 2532 2533 cond_free(&ctx->null_cond); 2534 return true; 2535 } 2536 2537 static bool trans_add(DisasContext *ctx, arg_rrr_cf_sh *a) 2538 { 2539 return do_add_reg(ctx, a, false, false, false, false); 2540 } 2541 2542 static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_sh *a) 2543 { 2544 return do_add_reg(ctx, a, true, false, false, false); 2545 } 2546 2547 static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_sh *a) 2548 { 2549 return do_add_reg(ctx, a, false, true, false, false); 2550 } 2551 2552 static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_sh *a) 2553 { 2554 return do_add_reg(ctx, a, false, false, false, true); 2555 } 2556 2557 static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_sh *a) 2558 { 2559 return do_add_reg(ctx, a, false, true, false, true); 2560 } 2561 2562 static bool trans_sub(DisasContext *ctx, arg_rrr_cf *a) 2563 { 2564 return do_sub_reg(ctx, a, false, false, false); 2565 } 2566 2567 static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf *a) 2568 { 2569 return do_sub_reg(ctx, a, true, false, false); 2570 } 2571 2572 static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf *a) 2573 { 2574 return do_sub_reg(ctx, a, false, false, true); 2575 } 2576 2577 static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf *a) 2578 { 2579 return do_sub_reg(ctx, a, true, false, true); 2580 } 2581 2582 static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf *a) 2583 { 2584 return do_sub_reg(ctx, a, false, true, false); 2585 } 2586 2587 static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf *a) 2588 { 2589 return do_sub_reg(ctx, a, true, true, false); 2590 } 2591 2592 static bool trans_andcm(DisasContext *ctx, arg_rrr_cf *a) 2593 { 2594 return do_log_reg(ctx, a, tcg_gen_andc_reg); 2595 } 2596 2597 static bool trans_and(DisasContext *ctx, arg_rrr_cf *a) 2598 { 2599 return do_log_reg(ctx, a, tcg_gen_and_reg); 2600 } 2601 2602 static bool trans_or(DisasContext *ctx, arg_rrr_cf *a) 2603 { 2604 if (a->cf == 0) { 2605 unsigned r2 = a->r2; 2606 unsigned r1 = a->r1; 2607 unsigned rt = a->t; 2608 2609 if (rt == 0) { /* NOP */ 2610 cond_free(&ctx->null_cond); 2611 return true; 2612 } 2613 if (r2 == 0) { /* COPY */ 2614 if (r1 == 0) { 2615 TCGv_reg dest = dest_gpr(ctx, rt); 2616 tcg_gen_movi_reg(dest, 0); 2617 save_gpr(ctx, rt, dest); 2618 } else { 2619 save_gpr(ctx, rt, cpu_gr[r1]); 2620 } 2621 cond_free(&ctx->null_cond); 2622 return true; 2623 } 2624 #ifndef CONFIG_USER_ONLY 2625 /* These are QEMU extensions and are nops in the real architecture: 2626 * 2627 * or %r10,%r10,%r10 -- idle loop; wait for interrupt 2628 * or %r31,%r31,%r31 -- death loop; offline cpu 2629 * currently implemented as idle. 2630 */ 2631 if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */ 2632 /* No need to check for supervisor, as userland can only pause 2633 until the next timer interrupt. */ 2634 nullify_over(ctx); 2635 2636 /* Advance the instruction queue. */ 2637 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b); 2638 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var); 2639 nullify_set(ctx, 0); 2640 2641 /* Tell the qemu main loop to halt until this cpu has work. */ 2642 tcg_gen_st_i32(tcg_constant_i32(1), cpu_env, 2643 offsetof(CPUState, halted) - offsetof(HPPACPU, env)); 2644 gen_excp_1(EXCP_HALTED); 2645 ctx->base.is_jmp = DISAS_NORETURN; 2646 2647 return nullify_end(ctx); 2648 } 2649 #endif 2650 } 2651 return do_log_reg(ctx, a, tcg_gen_or_reg); 2652 } 2653 2654 static bool trans_xor(DisasContext *ctx, arg_rrr_cf *a) 2655 { 2656 return do_log_reg(ctx, a, tcg_gen_xor_reg); 2657 } 2658 2659 static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf *a) 2660 { 2661 TCGv_reg tcg_r1, tcg_r2; 2662 2663 if (a->cf) { 2664 nullify_over(ctx); 2665 } 2666 tcg_r1 = load_gpr(ctx, a->r1); 2667 tcg_r2 = load_gpr(ctx, a->r2); 2668 do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf); 2669 return nullify_end(ctx); 2670 } 2671 2672 static bool trans_uxor(DisasContext *ctx, arg_rrr_cf *a) 2673 { 2674 TCGv_reg tcg_r1, tcg_r2; 2675 2676 if (a->cf) { 2677 nullify_over(ctx); 2678 } 2679 tcg_r1 = load_gpr(ctx, a->r1); 2680 tcg_r2 = load_gpr(ctx, a->r2); 2681 do_unit(ctx, a->t, tcg_r1, tcg_r2, a->cf, false, tcg_gen_xor_reg); 2682 return nullify_end(ctx); 2683 } 2684 2685 static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf *a, bool is_tc) 2686 { 2687 TCGv_reg tcg_r1, tcg_r2, tmp; 2688 2689 if (a->cf) { 2690 nullify_over(ctx); 2691 } 2692 tcg_r1 = load_gpr(ctx, a->r1); 2693 tcg_r2 = load_gpr(ctx, a->r2); 2694 tmp = get_temp(ctx); 2695 tcg_gen_not_reg(tmp, tcg_r2); 2696 do_unit(ctx, a->t, tcg_r1, tmp, a->cf, is_tc, tcg_gen_add_reg); 2697 return nullify_end(ctx); 2698 } 2699 2700 static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf *a) 2701 { 2702 return do_uaddcm(ctx, a, false); 2703 } 2704 2705 static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf *a) 2706 { 2707 return do_uaddcm(ctx, a, true); 2708 } 2709 2710 static bool do_dcor(DisasContext *ctx, arg_rr_cf *a, bool is_i) 2711 { 2712 TCGv_reg tmp; 2713 2714 nullify_over(ctx); 2715 2716 tmp = get_temp(ctx); 2717 tcg_gen_shri_reg(tmp, cpu_psw_cb, 3); 2718 if (!is_i) { 2719 tcg_gen_not_reg(tmp, tmp); 2720 } 2721 tcg_gen_andi_reg(tmp, tmp, 0x11111111); 2722 tcg_gen_muli_reg(tmp, tmp, 6); 2723 do_unit(ctx, a->t, load_gpr(ctx, a->r), tmp, a->cf, false, 2724 is_i ? tcg_gen_add_reg : tcg_gen_sub_reg); 2725 return nullify_end(ctx); 2726 } 2727 2728 static bool trans_dcor(DisasContext *ctx, arg_rr_cf *a) 2729 { 2730 return do_dcor(ctx, a, false); 2731 } 2732 2733 static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf *a) 2734 { 2735 return do_dcor(ctx, a, true); 2736 } 2737 2738 static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a) 2739 { 2740 TCGv_reg dest, add1, add2, addc, zero, in1, in2; 2741 2742 nullify_over(ctx); 2743 2744 in1 = load_gpr(ctx, a->r1); 2745 in2 = load_gpr(ctx, a->r2); 2746 2747 add1 = tcg_temp_new(); 2748 add2 = tcg_temp_new(); 2749 addc = tcg_temp_new(); 2750 dest = tcg_temp_new(); 2751 zero = tcg_constant_reg(0); 2752 2753 /* Form R1 << 1 | PSW[CB]{8}. */ 2754 tcg_gen_add_reg(add1, in1, in1); 2755 tcg_gen_add_reg(add1, add1, cpu_psw_cb_msb); 2756 2757 /* Add or subtract R2, depending on PSW[V]. Proper computation of 2758 carry{8} requires that we subtract via + ~R2 + 1, as described in 2759 the manual. By extracting and masking V, we can produce the 2760 proper inputs to the addition without movcond. */ 2761 tcg_gen_sari_reg(addc, cpu_psw_v, TARGET_REGISTER_BITS - 1); 2762 tcg_gen_xor_reg(add2, in2, addc); 2763 tcg_gen_andi_reg(addc, addc, 1); 2764 /* ??? This is only correct for 32-bit. */ 2765 tcg_gen_add2_i32(dest, cpu_psw_cb_msb, add1, zero, add2, zero); 2766 tcg_gen_add2_i32(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero); 2767 2768 /* Write back the result register. */ 2769 save_gpr(ctx, a->t, dest); 2770 2771 /* Write back PSW[CB]. */ 2772 tcg_gen_xor_reg(cpu_psw_cb, add1, add2); 2773 tcg_gen_xor_reg(cpu_psw_cb, cpu_psw_cb, dest); 2774 2775 /* Write back PSW[V] for the division step. */ 2776 tcg_gen_neg_reg(cpu_psw_v, cpu_psw_cb_msb); 2777 tcg_gen_xor_reg(cpu_psw_v, cpu_psw_v, in2); 2778 2779 /* Install the new nullification. */ 2780 if (a->cf) { 2781 TCGv_reg sv = NULL; 2782 if (cond_need_sv(a->cf >> 1)) { 2783 /* ??? The lshift is supposed to contribute to overflow. */ 2784 sv = do_add_sv(ctx, dest, add1, add2); 2785 } 2786 ctx->null_cond = do_cond(a->cf, dest, cpu_psw_cb_msb, sv); 2787 } 2788 2789 return nullify_end(ctx); 2790 } 2791 2792 static bool trans_addi(DisasContext *ctx, arg_rri_cf *a) 2793 { 2794 return do_add_imm(ctx, a, false, false); 2795 } 2796 2797 static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a) 2798 { 2799 return do_add_imm(ctx, a, true, false); 2800 } 2801 2802 static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a) 2803 { 2804 return do_add_imm(ctx, a, false, true); 2805 } 2806 2807 static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a) 2808 { 2809 return do_add_imm(ctx, a, true, true); 2810 } 2811 2812 static bool trans_subi(DisasContext *ctx, arg_rri_cf *a) 2813 { 2814 return do_sub_imm(ctx, a, false); 2815 } 2816 2817 static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a) 2818 { 2819 return do_sub_imm(ctx, a, true); 2820 } 2821 2822 static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf *a) 2823 { 2824 TCGv_reg tcg_im, tcg_r2; 2825 2826 if (a->cf) { 2827 nullify_over(ctx); 2828 } 2829 2830 tcg_im = load_const(ctx, a->i); 2831 tcg_r2 = load_gpr(ctx, a->r); 2832 do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf); 2833 2834 return nullify_end(ctx); 2835 } 2836 2837 static bool trans_ld(DisasContext *ctx, arg_ldst *a) 2838 { 2839 if (unlikely(TARGET_REGISTER_BITS == 32 && a->size > MO_32)) { 2840 return gen_illegal(ctx); 2841 } else { 2842 return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0, 2843 a->disp, a->sp, a->m, a->size | MO_TE); 2844 } 2845 } 2846 2847 static bool trans_st(DisasContext *ctx, arg_ldst *a) 2848 { 2849 assert(a->x == 0 && a->scale == 0); 2850 if (unlikely(TARGET_REGISTER_BITS == 32 && a->size > MO_32)) { 2851 return gen_illegal(ctx); 2852 } else { 2853 return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE); 2854 } 2855 } 2856 2857 static bool trans_ldc(DisasContext *ctx, arg_ldst *a) 2858 { 2859 MemOp mop = MO_TE | MO_ALIGN | a->size; 2860 TCGv_reg zero, dest, ofs; 2861 TCGv_tl addr; 2862 2863 nullify_over(ctx); 2864 2865 if (a->m) { 2866 /* Base register modification. Make sure if RT == RB, 2867 we see the result of the load. */ 2868 dest = get_temp(ctx); 2869 } else { 2870 dest = dest_gpr(ctx, a->t); 2871 } 2872 2873 form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? a->size : 0, 2874 a->disp, a->sp, a->m, ctx->mmu_idx == MMU_PHYS_IDX); 2875 2876 /* 2877 * For hppa1.1, LDCW is undefined unless aligned mod 16. 2878 * However actual hardware succeeds with aligned mod 4. 2879 * Detect this case and log a GUEST_ERROR. 2880 * 2881 * TODO: HPPA64 relaxes the over-alignment requirement 2882 * with the ,co completer. 2883 */ 2884 gen_helper_ldc_check(addr); 2885 2886 zero = tcg_constant_reg(0); 2887 tcg_gen_atomic_xchg_reg(dest, addr, zero, ctx->mmu_idx, mop); 2888 2889 if (a->m) { 2890 save_gpr(ctx, a->b, ofs); 2891 } 2892 save_gpr(ctx, a->t, dest); 2893 2894 return nullify_end(ctx); 2895 } 2896 2897 static bool trans_stby(DisasContext *ctx, arg_stby *a) 2898 { 2899 TCGv_reg ofs, val; 2900 TCGv_tl addr; 2901 2902 nullify_over(ctx); 2903 2904 form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m, 2905 ctx->mmu_idx == MMU_PHYS_IDX); 2906 val = load_gpr(ctx, a->r); 2907 if (a->a) { 2908 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { 2909 gen_helper_stby_e_parallel(cpu_env, addr, val); 2910 } else { 2911 gen_helper_stby_e(cpu_env, addr, val); 2912 } 2913 } else { 2914 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { 2915 gen_helper_stby_b_parallel(cpu_env, addr, val); 2916 } else { 2917 gen_helper_stby_b(cpu_env, addr, val); 2918 } 2919 } 2920 if (a->m) { 2921 tcg_gen_andi_reg(ofs, ofs, ~3); 2922 save_gpr(ctx, a->b, ofs); 2923 } 2924 2925 return nullify_end(ctx); 2926 } 2927 2928 static bool trans_lda(DisasContext *ctx, arg_ldst *a) 2929 { 2930 int hold_mmu_idx = ctx->mmu_idx; 2931 2932 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2933 ctx->mmu_idx = MMU_PHYS_IDX; 2934 trans_ld(ctx, a); 2935 ctx->mmu_idx = hold_mmu_idx; 2936 return true; 2937 } 2938 2939 static bool trans_sta(DisasContext *ctx, arg_ldst *a) 2940 { 2941 int hold_mmu_idx = ctx->mmu_idx; 2942 2943 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2944 ctx->mmu_idx = MMU_PHYS_IDX; 2945 trans_st(ctx, a); 2946 ctx->mmu_idx = hold_mmu_idx; 2947 return true; 2948 } 2949 2950 static bool trans_ldil(DisasContext *ctx, arg_ldil *a) 2951 { 2952 TCGv_reg tcg_rt = dest_gpr(ctx, a->t); 2953 2954 tcg_gen_movi_reg(tcg_rt, a->i); 2955 save_gpr(ctx, a->t, tcg_rt); 2956 cond_free(&ctx->null_cond); 2957 return true; 2958 } 2959 2960 static bool trans_addil(DisasContext *ctx, arg_addil *a) 2961 { 2962 TCGv_reg tcg_rt = load_gpr(ctx, a->r); 2963 TCGv_reg tcg_r1 = dest_gpr(ctx, 1); 2964 2965 tcg_gen_addi_reg(tcg_r1, tcg_rt, a->i); 2966 save_gpr(ctx, 1, tcg_r1); 2967 cond_free(&ctx->null_cond); 2968 return true; 2969 } 2970 2971 static bool trans_ldo(DisasContext *ctx, arg_ldo *a) 2972 { 2973 TCGv_reg tcg_rt = dest_gpr(ctx, a->t); 2974 2975 /* Special case rb == 0, for the LDI pseudo-op. 2976 The COPY pseudo-op is handled for free within tcg_gen_addi_tl. */ 2977 if (a->b == 0) { 2978 tcg_gen_movi_reg(tcg_rt, a->i); 2979 } else { 2980 tcg_gen_addi_reg(tcg_rt, cpu_gr[a->b], a->i); 2981 } 2982 save_gpr(ctx, a->t, tcg_rt); 2983 cond_free(&ctx->null_cond); 2984 return true; 2985 } 2986 2987 static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_reg in1, 2988 unsigned c, unsigned f, unsigned n, int disp) 2989 { 2990 TCGv_reg dest, in2, sv; 2991 DisasCond cond; 2992 2993 in2 = load_gpr(ctx, r); 2994 dest = get_temp(ctx); 2995 2996 tcg_gen_sub_reg(dest, in1, in2); 2997 2998 sv = NULL; 2999 if (cond_need_sv(c)) { 3000 sv = do_sub_sv(ctx, dest, in1, in2); 3001 } 3002 3003 cond = do_sub_cond(c * 2 + f, dest, in1, in2, sv); 3004 return do_cbranch(ctx, disp, n, &cond); 3005 } 3006 3007 static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a) 3008 { 3009 nullify_over(ctx); 3010 return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp); 3011 } 3012 3013 static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a) 3014 { 3015 nullify_over(ctx); 3016 return do_cmpb(ctx, a->r, load_const(ctx, a->i), a->c, a->f, a->n, a->disp); 3017 } 3018 3019 static bool do_addb(DisasContext *ctx, unsigned r, TCGv_reg in1, 3020 unsigned c, unsigned f, unsigned n, int disp) 3021 { 3022 TCGv_reg dest, in2, sv, cb_msb; 3023 DisasCond cond; 3024 3025 in2 = load_gpr(ctx, r); 3026 dest = tcg_temp_new(); 3027 sv = NULL; 3028 cb_msb = NULL; 3029 3030 if (cond_need_cb(c)) { 3031 cb_msb = get_temp(ctx); 3032 tcg_gen_movi_reg(cb_msb, 0); 3033 tcg_gen_add2_reg(dest, cb_msb, in1, cb_msb, in2, cb_msb); 3034 } else { 3035 tcg_gen_add_reg(dest, in1, in2); 3036 } 3037 if (cond_need_sv(c)) { 3038 sv = do_add_sv(ctx, dest, in1, in2); 3039 } 3040 3041 cond = do_cond(c * 2 + f, dest, cb_msb, sv); 3042 save_gpr(ctx, r, dest); 3043 return do_cbranch(ctx, disp, n, &cond); 3044 } 3045 3046 static bool trans_addb(DisasContext *ctx, arg_addb *a) 3047 { 3048 nullify_over(ctx); 3049 return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp); 3050 } 3051 3052 static bool trans_addbi(DisasContext *ctx, arg_addbi *a) 3053 { 3054 nullify_over(ctx); 3055 return do_addb(ctx, a->r, load_const(ctx, a->i), a->c, a->f, a->n, a->disp); 3056 } 3057 3058 static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a) 3059 { 3060 TCGv_reg tmp, tcg_r; 3061 DisasCond cond; 3062 3063 nullify_over(ctx); 3064 3065 tmp = tcg_temp_new(); 3066 tcg_r = load_gpr(ctx, a->r); 3067 tcg_gen_shl_reg(tmp, tcg_r, cpu_sar); 3068 3069 cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp); 3070 return do_cbranch(ctx, a->disp, a->n, &cond); 3071 } 3072 3073 static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a) 3074 { 3075 TCGv_reg tmp, tcg_r; 3076 DisasCond cond; 3077 3078 nullify_over(ctx); 3079 3080 tmp = tcg_temp_new(); 3081 tcg_r = load_gpr(ctx, a->r); 3082 tcg_gen_shli_reg(tmp, tcg_r, a->p); 3083 3084 cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp); 3085 return do_cbranch(ctx, a->disp, a->n, &cond); 3086 } 3087 3088 static bool trans_movb(DisasContext *ctx, arg_movb *a) 3089 { 3090 TCGv_reg dest; 3091 DisasCond cond; 3092 3093 nullify_over(ctx); 3094 3095 dest = dest_gpr(ctx, a->r2); 3096 if (a->r1 == 0) { 3097 tcg_gen_movi_reg(dest, 0); 3098 } else { 3099 tcg_gen_mov_reg(dest, cpu_gr[a->r1]); 3100 } 3101 3102 cond = do_sed_cond(a->c, dest); 3103 return do_cbranch(ctx, a->disp, a->n, &cond); 3104 } 3105 3106 static bool trans_movbi(DisasContext *ctx, arg_movbi *a) 3107 { 3108 TCGv_reg dest; 3109 DisasCond cond; 3110 3111 nullify_over(ctx); 3112 3113 dest = dest_gpr(ctx, a->r); 3114 tcg_gen_movi_reg(dest, a->i); 3115 3116 cond = do_sed_cond(a->c, dest); 3117 return do_cbranch(ctx, a->disp, a->n, &cond); 3118 } 3119 3120 static bool trans_shrpw_sar(DisasContext *ctx, arg_shrpw_sar *a) 3121 { 3122 TCGv_reg dest; 3123 3124 if (a->c) { 3125 nullify_over(ctx); 3126 } 3127 3128 dest = dest_gpr(ctx, a->t); 3129 if (a->r1 == 0) { 3130 tcg_gen_ext32u_reg(dest, load_gpr(ctx, a->r2)); 3131 tcg_gen_shr_reg(dest, dest, cpu_sar); 3132 } else if (a->r1 == a->r2) { 3133 TCGv_i32 t32 = tcg_temp_new_i32(); 3134 tcg_gen_trunc_reg_i32(t32, load_gpr(ctx, a->r2)); 3135 tcg_gen_rotr_i32(t32, t32, cpu_sar); 3136 tcg_gen_extu_i32_reg(dest, t32); 3137 } else { 3138 TCGv_i64 t = tcg_temp_new_i64(); 3139 TCGv_i64 s = tcg_temp_new_i64(); 3140 3141 tcg_gen_concat_reg_i64(t, load_gpr(ctx, a->r2), load_gpr(ctx, a->r1)); 3142 tcg_gen_extu_reg_i64(s, cpu_sar); 3143 tcg_gen_shr_i64(t, t, s); 3144 tcg_gen_trunc_i64_reg(dest, t); 3145 } 3146 save_gpr(ctx, a->t, dest); 3147 3148 /* Install the new nullification. */ 3149 cond_free(&ctx->null_cond); 3150 if (a->c) { 3151 ctx->null_cond = do_sed_cond(a->c, dest); 3152 } 3153 return nullify_end(ctx); 3154 } 3155 3156 static bool trans_shrpw_imm(DisasContext *ctx, arg_shrpw_imm *a) 3157 { 3158 unsigned sa = 31 - a->cpos; 3159 TCGv_reg dest, t2; 3160 3161 if (a->c) { 3162 nullify_over(ctx); 3163 } 3164 3165 dest = dest_gpr(ctx, a->t); 3166 t2 = load_gpr(ctx, a->r2); 3167 if (a->r1 == 0) { 3168 tcg_gen_extract_reg(dest, t2, sa, 32 - sa); 3169 } else if (TARGET_REGISTER_BITS == 32) { 3170 tcg_gen_extract2_reg(dest, t2, cpu_gr[a->r1], sa); 3171 } else if (a->r1 == a->r2) { 3172 TCGv_i32 t32 = tcg_temp_new_i32(); 3173 tcg_gen_trunc_reg_i32(t32, t2); 3174 tcg_gen_rotri_i32(t32, t32, sa); 3175 tcg_gen_extu_i32_reg(dest, t32); 3176 } else { 3177 TCGv_i64 t64 = tcg_temp_new_i64(); 3178 tcg_gen_concat_reg_i64(t64, t2, cpu_gr[a->r1]); 3179 tcg_gen_shri_i64(t64, t64, sa); 3180 tcg_gen_trunc_i64_reg(dest, t64); 3181 } 3182 save_gpr(ctx, a->t, dest); 3183 3184 /* Install the new nullification. */ 3185 cond_free(&ctx->null_cond); 3186 if (a->c) { 3187 ctx->null_cond = do_sed_cond(a->c, dest); 3188 } 3189 return nullify_end(ctx); 3190 } 3191 3192 static bool trans_extrw_sar(DisasContext *ctx, arg_extrw_sar *a) 3193 { 3194 unsigned len = 32 - a->clen; 3195 TCGv_reg dest, src, tmp; 3196 3197 if (a->c) { 3198 nullify_over(ctx); 3199 } 3200 3201 dest = dest_gpr(ctx, a->t); 3202 src = load_gpr(ctx, a->r); 3203 tmp = tcg_temp_new(); 3204 3205 /* Recall that SAR is using big-endian bit numbering. */ 3206 tcg_gen_xori_reg(tmp, cpu_sar, TARGET_REGISTER_BITS - 1); 3207 if (a->se) { 3208 tcg_gen_sar_reg(dest, src, tmp); 3209 tcg_gen_sextract_reg(dest, dest, 0, len); 3210 } else { 3211 tcg_gen_shr_reg(dest, src, tmp); 3212 tcg_gen_extract_reg(dest, dest, 0, len); 3213 } 3214 save_gpr(ctx, a->t, dest); 3215 3216 /* Install the new nullification. */ 3217 cond_free(&ctx->null_cond); 3218 if (a->c) { 3219 ctx->null_cond = do_sed_cond(a->c, dest); 3220 } 3221 return nullify_end(ctx); 3222 } 3223 3224 static bool trans_extrw_imm(DisasContext *ctx, arg_extrw_imm *a) 3225 { 3226 unsigned len = 32 - a->clen; 3227 unsigned cpos = 31 - a->pos; 3228 TCGv_reg dest, src; 3229 3230 if (a->c) { 3231 nullify_over(ctx); 3232 } 3233 3234 dest = dest_gpr(ctx, a->t); 3235 src = load_gpr(ctx, a->r); 3236 if (a->se) { 3237 tcg_gen_sextract_reg(dest, src, cpos, len); 3238 } else { 3239 tcg_gen_extract_reg(dest, src, cpos, len); 3240 } 3241 save_gpr(ctx, a->t, dest); 3242 3243 /* Install the new nullification. */ 3244 cond_free(&ctx->null_cond); 3245 if (a->c) { 3246 ctx->null_cond = do_sed_cond(a->c, dest); 3247 } 3248 return nullify_end(ctx); 3249 } 3250 3251 static bool trans_depwi_imm(DisasContext *ctx, arg_depwi_imm *a) 3252 { 3253 unsigned len = 32 - a->clen; 3254 target_sreg mask0, mask1; 3255 TCGv_reg dest; 3256 3257 if (a->c) { 3258 nullify_over(ctx); 3259 } 3260 if (a->cpos + len > 32) { 3261 len = 32 - a->cpos; 3262 } 3263 3264 dest = dest_gpr(ctx, a->t); 3265 mask0 = deposit64(0, a->cpos, len, a->i); 3266 mask1 = deposit64(-1, a->cpos, len, a->i); 3267 3268 if (a->nz) { 3269 TCGv_reg src = load_gpr(ctx, a->t); 3270 if (mask1 != -1) { 3271 tcg_gen_andi_reg(dest, src, mask1); 3272 src = dest; 3273 } 3274 tcg_gen_ori_reg(dest, src, mask0); 3275 } else { 3276 tcg_gen_movi_reg(dest, mask0); 3277 } 3278 save_gpr(ctx, a->t, dest); 3279 3280 /* Install the new nullification. */ 3281 cond_free(&ctx->null_cond); 3282 if (a->c) { 3283 ctx->null_cond = do_sed_cond(a->c, dest); 3284 } 3285 return nullify_end(ctx); 3286 } 3287 3288 static bool trans_depw_imm(DisasContext *ctx, arg_depw_imm *a) 3289 { 3290 unsigned rs = a->nz ? a->t : 0; 3291 unsigned len = 32 - a->clen; 3292 TCGv_reg dest, val; 3293 3294 if (a->c) { 3295 nullify_over(ctx); 3296 } 3297 if (a->cpos + len > 32) { 3298 len = 32 - a->cpos; 3299 } 3300 3301 dest = dest_gpr(ctx, a->t); 3302 val = load_gpr(ctx, a->r); 3303 if (rs == 0) { 3304 tcg_gen_deposit_z_reg(dest, val, a->cpos, len); 3305 } else { 3306 tcg_gen_deposit_reg(dest, cpu_gr[rs], val, a->cpos, len); 3307 } 3308 save_gpr(ctx, a->t, dest); 3309 3310 /* Install the new nullification. */ 3311 cond_free(&ctx->null_cond); 3312 if (a->c) { 3313 ctx->null_cond = do_sed_cond(a->c, dest); 3314 } 3315 return nullify_end(ctx); 3316 } 3317 3318 static bool do_depw_sar(DisasContext *ctx, unsigned rt, unsigned c, 3319 unsigned nz, unsigned clen, TCGv_reg val) 3320 { 3321 unsigned rs = nz ? rt : 0; 3322 unsigned len = 32 - clen; 3323 TCGv_reg mask, tmp, shift, dest; 3324 unsigned msb = 1U << (len - 1); 3325 3326 dest = dest_gpr(ctx, rt); 3327 shift = tcg_temp_new(); 3328 tmp = tcg_temp_new(); 3329 3330 /* Convert big-endian bit numbering in SAR to left-shift. */ 3331 tcg_gen_xori_reg(shift, cpu_sar, TARGET_REGISTER_BITS - 1); 3332 3333 mask = tcg_const_reg(msb + (msb - 1)); 3334 tcg_gen_and_reg(tmp, val, mask); 3335 if (rs) { 3336 tcg_gen_shl_reg(mask, mask, shift); 3337 tcg_gen_shl_reg(tmp, tmp, shift); 3338 tcg_gen_andc_reg(dest, cpu_gr[rs], mask); 3339 tcg_gen_or_reg(dest, dest, tmp); 3340 } else { 3341 tcg_gen_shl_reg(dest, tmp, shift); 3342 } 3343 save_gpr(ctx, rt, dest); 3344 3345 /* Install the new nullification. */ 3346 cond_free(&ctx->null_cond); 3347 if (c) { 3348 ctx->null_cond = do_sed_cond(c, dest); 3349 } 3350 return nullify_end(ctx); 3351 } 3352 3353 static bool trans_depw_sar(DisasContext *ctx, arg_depw_sar *a) 3354 { 3355 if (a->c) { 3356 nullify_over(ctx); 3357 } 3358 return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_gpr(ctx, a->r)); 3359 } 3360 3361 static bool trans_depwi_sar(DisasContext *ctx, arg_depwi_sar *a) 3362 { 3363 if (a->c) { 3364 nullify_over(ctx); 3365 } 3366 return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_const(ctx, a->i)); 3367 } 3368 3369 static bool trans_be(DisasContext *ctx, arg_be *a) 3370 { 3371 TCGv_reg tmp; 3372 3373 #ifdef CONFIG_USER_ONLY 3374 /* ??? It seems like there should be a good way of using 3375 "be disp(sr2, r0)", the canonical gateway entry mechanism 3376 to our advantage. But that appears to be inconvenient to 3377 manage along side branch delay slots. Therefore we handle 3378 entry into the gateway page via absolute address. */ 3379 /* Since we don't implement spaces, just branch. Do notice the special 3380 case of "be disp(*,r0)" using a direct branch to disp, so that we can 3381 goto_tb to the TB containing the syscall. */ 3382 if (a->b == 0) { 3383 return do_dbranch(ctx, a->disp, a->l, a->n); 3384 } 3385 #else 3386 nullify_over(ctx); 3387 #endif 3388 3389 tmp = get_temp(ctx); 3390 tcg_gen_addi_reg(tmp, load_gpr(ctx, a->b), a->disp); 3391 tmp = do_ibranch_priv(ctx, tmp); 3392 3393 #ifdef CONFIG_USER_ONLY 3394 return do_ibranch(ctx, tmp, a->l, a->n); 3395 #else 3396 TCGv_i64 new_spc = tcg_temp_new_i64(); 3397 3398 load_spr(ctx, new_spc, a->sp); 3399 if (a->l) { 3400 copy_iaoq_entry(cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var); 3401 tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f); 3402 } 3403 if (a->n && use_nullify_skip(ctx)) { 3404 tcg_gen_mov_reg(cpu_iaoq_f, tmp); 3405 tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4); 3406 tcg_gen_mov_i64(cpu_iasq_f, new_spc); 3407 tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f); 3408 } else { 3409 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b); 3410 if (ctx->iaoq_b == -1) { 3411 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b); 3412 } 3413 tcg_gen_mov_reg(cpu_iaoq_b, tmp); 3414 tcg_gen_mov_i64(cpu_iasq_b, new_spc); 3415 nullify_set(ctx, a->n); 3416 } 3417 tcg_gen_lookup_and_goto_ptr(); 3418 ctx->base.is_jmp = DISAS_NORETURN; 3419 return nullify_end(ctx); 3420 #endif 3421 } 3422 3423 static bool trans_bl(DisasContext *ctx, arg_bl *a) 3424 { 3425 return do_dbranch(ctx, iaoq_dest(ctx, a->disp), a->l, a->n); 3426 } 3427 3428 static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a) 3429 { 3430 target_ureg dest = iaoq_dest(ctx, a->disp); 3431 3432 nullify_over(ctx); 3433 3434 /* Make sure the caller hasn't done something weird with the queue. 3435 * ??? This is not quite the same as the PSW[B] bit, which would be 3436 * expensive to track. Real hardware will trap for 3437 * b gateway 3438 * b gateway+4 (in delay slot of first branch) 3439 * However, checking for a non-sequential instruction queue *will* 3440 * diagnose the security hole 3441 * b gateway 3442 * b evil 3443 * in which instructions at evil would run with increased privs. 3444 */ 3445 if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) { 3446 return gen_illegal(ctx); 3447 } 3448 3449 #ifndef CONFIG_USER_ONLY 3450 if (ctx->tb_flags & PSW_C) { 3451 CPUHPPAState *env = ctx->cs->env_ptr; 3452 int type = hppa_artype_for_page(env, ctx->base.pc_next); 3453 /* If we could not find a TLB entry, then we need to generate an 3454 ITLB miss exception so the kernel will provide it. 3455 The resulting TLB fill operation will invalidate this TB and 3456 we will re-translate, at which point we *will* be able to find 3457 the TLB entry and determine if this is in fact a gateway page. */ 3458 if (type < 0) { 3459 gen_excp(ctx, EXCP_ITLB_MISS); 3460 return true; 3461 } 3462 /* No change for non-gateway pages or for priv decrease. */ 3463 if (type >= 4 && type - 4 < ctx->privilege) { 3464 dest = deposit32(dest, 0, 2, type - 4); 3465 } 3466 } else { 3467 dest &= -4; /* priv = 0 */ 3468 } 3469 #endif 3470 3471 if (a->l) { 3472 TCGv_reg tmp = dest_gpr(ctx, a->l); 3473 if (ctx->privilege < 3) { 3474 tcg_gen_andi_reg(tmp, tmp, -4); 3475 } 3476 tcg_gen_ori_reg(tmp, tmp, ctx->privilege); 3477 save_gpr(ctx, a->l, tmp); 3478 } 3479 3480 return do_dbranch(ctx, dest, 0, a->n); 3481 } 3482 3483 static bool trans_blr(DisasContext *ctx, arg_blr *a) 3484 { 3485 if (a->x) { 3486 TCGv_reg tmp = get_temp(ctx); 3487 tcg_gen_shli_reg(tmp, load_gpr(ctx, a->x), 3); 3488 tcg_gen_addi_reg(tmp, tmp, ctx->iaoq_f + 8); 3489 /* The computation here never changes privilege level. */ 3490 return do_ibranch(ctx, tmp, a->l, a->n); 3491 } else { 3492 /* BLR R0,RX is a good way to load PC+8 into RX. */ 3493 return do_dbranch(ctx, ctx->iaoq_f + 8, a->l, a->n); 3494 } 3495 } 3496 3497 static bool trans_bv(DisasContext *ctx, arg_bv *a) 3498 { 3499 TCGv_reg dest; 3500 3501 if (a->x == 0) { 3502 dest = load_gpr(ctx, a->b); 3503 } else { 3504 dest = get_temp(ctx); 3505 tcg_gen_shli_reg(dest, load_gpr(ctx, a->x), 3); 3506 tcg_gen_add_reg(dest, dest, load_gpr(ctx, a->b)); 3507 } 3508 dest = do_ibranch_priv(ctx, dest); 3509 return do_ibranch(ctx, dest, 0, a->n); 3510 } 3511 3512 static bool trans_bve(DisasContext *ctx, arg_bve *a) 3513 { 3514 TCGv_reg dest; 3515 3516 #ifdef CONFIG_USER_ONLY 3517 dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b)); 3518 return do_ibranch(ctx, dest, a->l, a->n); 3519 #else 3520 nullify_over(ctx); 3521 dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b)); 3522 3523 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b); 3524 if (ctx->iaoq_b == -1) { 3525 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b); 3526 } 3527 copy_iaoq_entry(cpu_iaoq_b, -1, dest); 3528 tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest)); 3529 if (a->l) { 3530 copy_iaoq_entry(cpu_gr[a->l], ctx->iaoq_n, ctx->iaoq_n_var); 3531 } 3532 nullify_set(ctx, a->n); 3533 tcg_gen_lookup_and_goto_ptr(); 3534 ctx->base.is_jmp = DISAS_NORETURN; 3535 return nullify_end(ctx); 3536 #endif 3537 } 3538 3539 /* 3540 * Float class 0 3541 */ 3542 3543 static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src) 3544 { 3545 tcg_gen_mov_i32(dst, src); 3546 } 3547 3548 static bool trans_fid_f(DisasContext *ctx, arg_fid_f *a) 3549 { 3550 nullify_over(ctx); 3551 #if TARGET_REGISTER_BITS == 64 3552 save_frd(0, tcg_const_i64(0x13080000000000ULL)); /* PA8700 (PCX-W2) */ 3553 #else 3554 save_frd(0, tcg_const_i64(0x0f080000000000ULL)); /* PA7300LC (PCX-L2) */ 3555 #endif 3556 return nullify_end(ctx); 3557 } 3558 3559 static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a) 3560 { 3561 return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f); 3562 } 3563 3564 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src) 3565 { 3566 tcg_gen_mov_i64(dst, src); 3567 } 3568 3569 static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a) 3570 { 3571 return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d); 3572 } 3573 3574 static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src) 3575 { 3576 tcg_gen_andi_i32(dst, src, INT32_MAX); 3577 } 3578 3579 static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a) 3580 { 3581 return do_fop_wew(ctx, a->t, a->r, gen_fabs_f); 3582 } 3583 3584 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src) 3585 { 3586 tcg_gen_andi_i64(dst, src, INT64_MAX); 3587 } 3588 3589 static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a) 3590 { 3591 return do_fop_ded(ctx, a->t, a->r, gen_fabs_d); 3592 } 3593 3594 static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a) 3595 { 3596 return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s); 3597 } 3598 3599 static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a) 3600 { 3601 return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d); 3602 } 3603 3604 static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a) 3605 { 3606 return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s); 3607 } 3608 3609 static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a) 3610 { 3611 return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d); 3612 } 3613 3614 static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src) 3615 { 3616 tcg_gen_xori_i32(dst, src, INT32_MIN); 3617 } 3618 3619 static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a) 3620 { 3621 return do_fop_wew(ctx, a->t, a->r, gen_fneg_f); 3622 } 3623 3624 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src) 3625 { 3626 tcg_gen_xori_i64(dst, src, INT64_MIN); 3627 } 3628 3629 static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a) 3630 { 3631 return do_fop_ded(ctx, a->t, a->r, gen_fneg_d); 3632 } 3633 3634 static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src) 3635 { 3636 tcg_gen_ori_i32(dst, src, INT32_MIN); 3637 } 3638 3639 static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a) 3640 { 3641 return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f); 3642 } 3643 3644 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src) 3645 { 3646 tcg_gen_ori_i64(dst, src, INT64_MIN); 3647 } 3648 3649 static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a) 3650 { 3651 return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d); 3652 } 3653 3654 /* 3655 * Float class 1 3656 */ 3657 3658 static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a) 3659 { 3660 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s); 3661 } 3662 3663 static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a) 3664 { 3665 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d); 3666 } 3667 3668 static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a) 3669 { 3670 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s); 3671 } 3672 3673 static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a) 3674 { 3675 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s); 3676 } 3677 3678 static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a) 3679 { 3680 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d); 3681 } 3682 3683 static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a) 3684 { 3685 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d); 3686 } 3687 3688 static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a) 3689 { 3690 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w); 3691 } 3692 3693 static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a) 3694 { 3695 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w); 3696 } 3697 3698 static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a) 3699 { 3700 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw); 3701 } 3702 3703 static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a) 3704 { 3705 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw); 3706 } 3707 3708 static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a) 3709 { 3710 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w); 3711 } 3712 3713 static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a) 3714 { 3715 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w); 3716 } 3717 3718 static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a) 3719 { 3720 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw); 3721 } 3722 3723 static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a) 3724 { 3725 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw); 3726 } 3727 3728 static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a) 3729 { 3730 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s); 3731 } 3732 3733 static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a) 3734 { 3735 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s); 3736 } 3737 3738 static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a) 3739 { 3740 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d); 3741 } 3742 3743 static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a) 3744 { 3745 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d); 3746 } 3747 3748 static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a) 3749 { 3750 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw); 3751 } 3752 3753 static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a) 3754 { 3755 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw); 3756 } 3757 3758 static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a) 3759 { 3760 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw); 3761 } 3762 3763 static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a) 3764 { 3765 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw); 3766 } 3767 3768 static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a) 3769 { 3770 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw); 3771 } 3772 3773 static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a) 3774 { 3775 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw); 3776 } 3777 3778 static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a) 3779 { 3780 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw); 3781 } 3782 3783 static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a) 3784 { 3785 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw); 3786 } 3787 3788 /* 3789 * Float class 2 3790 */ 3791 3792 static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a) 3793 { 3794 TCGv_i32 ta, tb, tc, ty; 3795 3796 nullify_over(ctx); 3797 3798 ta = load_frw0_i32(a->r1); 3799 tb = load_frw0_i32(a->r2); 3800 ty = tcg_constant_i32(a->y); 3801 tc = tcg_constant_i32(a->c); 3802 3803 gen_helper_fcmp_s(cpu_env, ta, tb, ty, tc); 3804 3805 return nullify_end(ctx); 3806 } 3807 3808 static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a) 3809 { 3810 TCGv_i64 ta, tb; 3811 TCGv_i32 tc, ty; 3812 3813 nullify_over(ctx); 3814 3815 ta = load_frd0(a->r1); 3816 tb = load_frd0(a->r2); 3817 ty = tcg_constant_i32(a->y); 3818 tc = tcg_constant_i32(a->c); 3819 3820 gen_helper_fcmp_d(cpu_env, ta, tb, ty, tc); 3821 3822 return nullify_end(ctx); 3823 } 3824 3825 static bool trans_ftest(DisasContext *ctx, arg_ftest *a) 3826 { 3827 TCGv_reg t; 3828 3829 nullify_over(ctx); 3830 3831 t = get_temp(ctx); 3832 tcg_gen_ld32u_reg(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow)); 3833 3834 if (a->y == 1) { 3835 int mask; 3836 bool inv = false; 3837 3838 switch (a->c) { 3839 case 0: /* simple */ 3840 tcg_gen_andi_reg(t, t, 0x4000000); 3841 ctx->null_cond = cond_make_0(TCG_COND_NE, t); 3842 goto done; 3843 case 2: /* rej */ 3844 inv = true; 3845 /* fallthru */ 3846 case 1: /* acc */ 3847 mask = 0x43ff800; 3848 break; 3849 case 6: /* rej8 */ 3850 inv = true; 3851 /* fallthru */ 3852 case 5: /* acc8 */ 3853 mask = 0x43f8000; 3854 break; 3855 case 9: /* acc6 */ 3856 mask = 0x43e0000; 3857 break; 3858 case 13: /* acc4 */ 3859 mask = 0x4380000; 3860 break; 3861 case 17: /* acc2 */ 3862 mask = 0x4200000; 3863 break; 3864 default: 3865 gen_illegal(ctx); 3866 return true; 3867 } 3868 if (inv) { 3869 TCGv_reg c = load_const(ctx, mask); 3870 tcg_gen_or_reg(t, t, c); 3871 ctx->null_cond = cond_make(TCG_COND_EQ, t, c); 3872 } else { 3873 tcg_gen_andi_reg(t, t, mask); 3874 ctx->null_cond = cond_make_0(TCG_COND_EQ, t); 3875 } 3876 } else { 3877 unsigned cbit = (a->y ^ 1) - 1; 3878 3879 tcg_gen_extract_reg(t, t, 21 - cbit, 1); 3880 ctx->null_cond = cond_make_0(TCG_COND_NE, t); 3881 } 3882 3883 done: 3884 return nullify_end(ctx); 3885 } 3886 3887 /* 3888 * Float class 2 3889 */ 3890 3891 static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a) 3892 { 3893 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s); 3894 } 3895 3896 static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a) 3897 { 3898 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d); 3899 } 3900 3901 static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a) 3902 { 3903 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s); 3904 } 3905 3906 static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a) 3907 { 3908 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d); 3909 } 3910 3911 static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a) 3912 { 3913 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s); 3914 } 3915 3916 static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a) 3917 { 3918 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d); 3919 } 3920 3921 static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a) 3922 { 3923 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s); 3924 } 3925 3926 static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a) 3927 { 3928 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d); 3929 } 3930 3931 static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a) 3932 { 3933 TCGv_i64 x, y; 3934 3935 nullify_over(ctx); 3936 3937 x = load_frw0_i64(a->r1); 3938 y = load_frw0_i64(a->r2); 3939 tcg_gen_mul_i64(x, x, y); 3940 save_frd(a->t, x); 3941 3942 return nullify_end(ctx); 3943 } 3944 3945 /* Convert the fmpyadd single-precision register encodings to standard. */ 3946 static inline int fmpyadd_s_reg(unsigned r) 3947 { 3948 return (r & 16) * 2 + 16 + (r & 15); 3949 } 3950 3951 static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub) 3952 { 3953 int tm = fmpyadd_s_reg(a->tm); 3954 int ra = fmpyadd_s_reg(a->ra); 3955 int ta = fmpyadd_s_reg(a->ta); 3956 int rm2 = fmpyadd_s_reg(a->rm2); 3957 int rm1 = fmpyadd_s_reg(a->rm1); 3958 3959 nullify_over(ctx); 3960 3961 do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s); 3962 do_fop_weww(ctx, ta, ta, ra, 3963 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s); 3964 3965 return nullify_end(ctx); 3966 } 3967 3968 static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a) 3969 { 3970 return do_fmpyadd_s(ctx, a, false); 3971 } 3972 3973 static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a) 3974 { 3975 return do_fmpyadd_s(ctx, a, true); 3976 } 3977 3978 static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub) 3979 { 3980 nullify_over(ctx); 3981 3982 do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d); 3983 do_fop_dedd(ctx, a->ta, a->ta, a->ra, 3984 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d); 3985 3986 return nullify_end(ctx); 3987 } 3988 3989 static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a) 3990 { 3991 return do_fmpyadd_d(ctx, a, false); 3992 } 3993 3994 static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a) 3995 { 3996 return do_fmpyadd_d(ctx, a, true); 3997 } 3998 3999 static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a) 4000 { 4001 TCGv_i32 x, y, z; 4002 4003 nullify_over(ctx); 4004 x = load_frw0_i32(a->rm1); 4005 y = load_frw0_i32(a->rm2); 4006 z = load_frw0_i32(a->ra3); 4007 4008 if (a->neg) { 4009 gen_helper_fmpynfadd_s(x, cpu_env, x, y, z); 4010 } else { 4011 gen_helper_fmpyfadd_s(x, cpu_env, x, y, z); 4012 } 4013 4014 save_frw_i32(a->t, x); 4015 return nullify_end(ctx); 4016 } 4017 4018 static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a) 4019 { 4020 TCGv_i64 x, y, z; 4021 4022 nullify_over(ctx); 4023 x = load_frd0(a->rm1); 4024 y = load_frd0(a->rm2); 4025 z = load_frd0(a->ra3); 4026 4027 if (a->neg) { 4028 gen_helper_fmpynfadd_d(x, cpu_env, x, y, z); 4029 } else { 4030 gen_helper_fmpyfadd_d(x, cpu_env, x, y, z); 4031 } 4032 4033 save_frd(a->t, x); 4034 return nullify_end(ctx); 4035 } 4036 4037 static bool trans_diag(DisasContext *ctx, arg_diag *a) 4038 { 4039 qemu_log_mask(LOG_UNIMP, "DIAG opcode ignored\n"); 4040 cond_free(&ctx->null_cond); 4041 return true; 4042 } 4043 4044 static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) 4045 { 4046 DisasContext *ctx = container_of(dcbase, DisasContext, base); 4047 int bound; 4048 4049 ctx->cs = cs; 4050 ctx->tb_flags = ctx->base.tb->flags; 4051 4052 #ifdef CONFIG_USER_ONLY 4053 ctx->privilege = MMU_USER_IDX; 4054 ctx->mmu_idx = MMU_USER_IDX; 4055 ctx->iaoq_f = ctx->base.pc_first | MMU_USER_IDX; 4056 ctx->iaoq_b = ctx->base.tb->cs_base | MMU_USER_IDX; 4057 ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN); 4058 #else 4059 ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3; 4060 ctx->mmu_idx = (ctx->tb_flags & PSW_D ? ctx->privilege : MMU_PHYS_IDX); 4061 4062 /* Recover the IAOQ values from the GVA + PRIV. */ 4063 uint64_t cs_base = ctx->base.tb->cs_base; 4064 uint64_t iasq_f = cs_base & ~0xffffffffull; 4065 int32_t diff = cs_base; 4066 4067 ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege; 4068 ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1); 4069 #endif 4070 ctx->iaoq_n = -1; 4071 ctx->iaoq_n_var = NULL; 4072 4073 /* Bound the number of instructions by those left on the page. */ 4074 bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4; 4075 ctx->base.max_insns = MIN(ctx->base.max_insns, bound); 4076 4077 ctx->ntempr = 0; 4078 ctx->ntempl = 0; 4079 memset(ctx->tempr, 0, sizeof(ctx->tempr)); 4080 memset(ctx->templ, 0, sizeof(ctx->templ)); 4081 } 4082 4083 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs) 4084 { 4085 DisasContext *ctx = container_of(dcbase, DisasContext, base); 4086 4087 /* Seed the nullification status from PSW[N], as saved in TB->FLAGS. */ 4088 ctx->null_cond = cond_make_f(); 4089 ctx->psw_n_nonzero = false; 4090 if (ctx->tb_flags & PSW_N) { 4091 ctx->null_cond.c = TCG_COND_ALWAYS; 4092 ctx->psw_n_nonzero = true; 4093 } 4094 ctx->null_lab = NULL; 4095 } 4096 4097 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs) 4098 { 4099 DisasContext *ctx = container_of(dcbase, DisasContext, base); 4100 4101 tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b); 4102 } 4103 4104 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) 4105 { 4106 DisasContext *ctx = container_of(dcbase, DisasContext, base); 4107 CPUHPPAState *env = cs->env_ptr; 4108 DisasJumpType ret; 4109 int i, n; 4110 4111 /* Execute one insn. */ 4112 #ifdef CONFIG_USER_ONLY 4113 if (ctx->base.pc_next < TARGET_PAGE_SIZE) { 4114 do_page_zero(ctx); 4115 ret = ctx->base.is_jmp; 4116 assert(ret != DISAS_NEXT); 4117 } else 4118 #endif 4119 { 4120 /* Always fetch the insn, even if nullified, so that we check 4121 the page permissions for execute. */ 4122 uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next); 4123 4124 /* Set up the IA queue for the next insn. 4125 This will be overwritten by a branch. */ 4126 if (ctx->iaoq_b == -1) { 4127 ctx->iaoq_n = -1; 4128 ctx->iaoq_n_var = get_temp(ctx); 4129 tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4); 4130 } else { 4131 ctx->iaoq_n = ctx->iaoq_b + 4; 4132 ctx->iaoq_n_var = NULL; 4133 } 4134 4135 if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) { 4136 ctx->null_cond.c = TCG_COND_NEVER; 4137 ret = DISAS_NEXT; 4138 } else { 4139 ctx->insn = insn; 4140 if (!decode(ctx, insn)) { 4141 gen_illegal(ctx); 4142 } 4143 ret = ctx->base.is_jmp; 4144 assert(ctx->null_lab == NULL); 4145 } 4146 } 4147 4148 /* Forget any temporaries allocated. */ 4149 for (i = 0, n = ctx->ntempr; i < n; ++i) { 4150 ctx->tempr[i] = NULL; 4151 } 4152 for (i = 0, n = ctx->ntempl; i < n; ++i) { 4153 ctx->templ[i] = NULL; 4154 } 4155 ctx->ntempr = 0; 4156 ctx->ntempl = 0; 4157 4158 /* Advance the insn queue. Note that this check also detects 4159 a priority change within the instruction queue. */ 4160 if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) { 4161 if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1 4162 && use_goto_tb(ctx, ctx->iaoq_b) 4163 && (ctx->null_cond.c == TCG_COND_NEVER 4164 || ctx->null_cond.c == TCG_COND_ALWAYS)) { 4165 nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS); 4166 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n); 4167 ctx->base.is_jmp = ret = DISAS_NORETURN; 4168 } else { 4169 ctx->base.is_jmp = ret = DISAS_IAQ_N_STALE; 4170 } 4171 } 4172 ctx->iaoq_f = ctx->iaoq_b; 4173 ctx->iaoq_b = ctx->iaoq_n; 4174 ctx->base.pc_next += 4; 4175 4176 switch (ret) { 4177 case DISAS_NORETURN: 4178 case DISAS_IAQ_N_UPDATED: 4179 break; 4180 4181 case DISAS_NEXT: 4182 case DISAS_IAQ_N_STALE: 4183 case DISAS_IAQ_N_STALE_EXIT: 4184 if (ctx->iaoq_f == -1) { 4185 tcg_gen_mov_reg(cpu_iaoq_f, cpu_iaoq_b); 4186 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var); 4187 #ifndef CONFIG_USER_ONLY 4188 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b); 4189 #endif 4190 nullify_save(ctx); 4191 ctx->base.is_jmp = (ret == DISAS_IAQ_N_STALE_EXIT 4192 ? DISAS_EXIT 4193 : DISAS_IAQ_N_UPDATED); 4194 } else if (ctx->iaoq_b == -1) { 4195 tcg_gen_mov_reg(cpu_iaoq_b, ctx->iaoq_n_var); 4196 } 4197 break; 4198 4199 default: 4200 g_assert_not_reached(); 4201 } 4202 } 4203 4204 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) 4205 { 4206 DisasContext *ctx = container_of(dcbase, DisasContext, base); 4207 DisasJumpType is_jmp = ctx->base.is_jmp; 4208 4209 switch (is_jmp) { 4210 case DISAS_NORETURN: 4211 break; 4212 case DISAS_TOO_MANY: 4213 case DISAS_IAQ_N_STALE: 4214 case DISAS_IAQ_N_STALE_EXIT: 4215 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f); 4216 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b); 4217 nullify_save(ctx); 4218 /* FALLTHRU */ 4219 case DISAS_IAQ_N_UPDATED: 4220 if (is_jmp != DISAS_IAQ_N_STALE_EXIT) { 4221 tcg_gen_lookup_and_goto_ptr(); 4222 break; 4223 } 4224 /* FALLTHRU */ 4225 case DISAS_EXIT: 4226 tcg_gen_exit_tb(NULL, 0); 4227 break; 4228 default: 4229 g_assert_not_reached(); 4230 } 4231 } 4232 4233 static void hppa_tr_disas_log(const DisasContextBase *dcbase, 4234 CPUState *cs, FILE *logfile) 4235 { 4236 target_ulong pc = dcbase->pc_first; 4237 4238 #ifdef CONFIG_USER_ONLY 4239 switch (pc) { 4240 case 0x00: 4241 fprintf(logfile, "IN:\n0x00000000: (null)\n"); 4242 return; 4243 case 0xb0: 4244 fprintf(logfile, "IN:\n0x000000b0: light-weight-syscall\n"); 4245 return; 4246 case 0xe0: 4247 fprintf(logfile, "IN:\n0x000000e0: set-thread-pointer-syscall\n"); 4248 return; 4249 case 0x100: 4250 fprintf(logfile, "IN:\n0x00000100: syscall\n"); 4251 return; 4252 } 4253 #endif 4254 4255 fprintf(logfile, "IN: %s\n", lookup_symbol(pc)); 4256 target_disas(logfile, cs, pc, dcbase->tb->size); 4257 } 4258 4259 static const TranslatorOps hppa_tr_ops = { 4260 .init_disas_context = hppa_tr_init_disas_context, 4261 .tb_start = hppa_tr_tb_start, 4262 .insn_start = hppa_tr_insn_start, 4263 .translate_insn = hppa_tr_translate_insn, 4264 .tb_stop = hppa_tr_tb_stop, 4265 .disas_log = hppa_tr_disas_log, 4266 }; 4267 4268 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, 4269 target_ulong pc, void *host_pc) 4270 { 4271 DisasContext ctx; 4272 translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base); 4273 } 4274