1 /* 2 * HPPA emulation cpu translation for qemu. 3 * 4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net> 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "cpu.h" 22 #include "disas/disas.h" 23 #include "qemu/host-utils.h" 24 #include "exec/exec-all.h" 25 #include "tcg/tcg-op.h" 26 #include "exec/helper-proto.h" 27 #include "exec/helper-gen.h" 28 #include "exec/translator.h" 29 #include "exec/log.h" 30 31 #define HELPER_H "helper.h" 32 #include "exec/helper-info.c.inc" 33 #undef HELPER_H 34 35 36 /* Since we have a distinction between register size and address size, 37 we need to redefine all of these. */ 38 39 #undef TCGv 40 #undef tcg_temp_new 41 #undef tcg_global_mem_new 42 43 #if TARGET_LONG_BITS == 64 44 #define TCGv_tl TCGv_i64 45 #define tcg_temp_new_tl tcg_temp_new_i64 46 #if TARGET_REGISTER_BITS == 64 47 #define tcg_gen_extu_reg_tl tcg_gen_mov_i64 48 #else 49 #define tcg_gen_extu_reg_tl tcg_gen_extu_i32_i64 50 #endif 51 #else 52 #define TCGv_tl TCGv_i32 53 #define tcg_temp_new_tl tcg_temp_new_i32 54 #define tcg_gen_extu_reg_tl tcg_gen_mov_i32 55 #endif 56 57 #if TARGET_REGISTER_BITS == 64 58 #define TCGv_reg TCGv_i64 59 60 #define tcg_temp_new tcg_temp_new_i64 61 #define tcg_global_mem_new tcg_global_mem_new_i64 62 63 #define tcg_gen_movi_reg tcg_gen_movi_i64 64 #define tcg_gen_mov_reg tcg_gen_mov_i64 65 #define tcg_gen_ld8u_reg tcg_gen_ld8u_i64 66 #define tcg_gen_ld8s_reg tcg_gen_ld8s_i64 67 #define tcg_gen_ld16u_reg tcg_gen_ld16u_i64 68 #define tcg_gen_ld16s_reg tcg_gen_ld16s_i64 69 #define tcg_gen_ld32u_reg tcg_gen_ld32u_i64 70 #define tcg_gen_ld32s_reg tcg_gen_ld32s_i64 71 #define tcg_gen_ld_reg tcg_gen_ld_i64 72 #define tcg_gen_st8_reg tcg_gen_st8_i64 73 #define tcg_gen_st16_reg tcg_gen_st16_i64 74 #define tcg_gen_st32_reg tcg_gen_st32_i64 75 #define tcg_gen_st_reg tcg_gen_st_i64 76 #define tcg_gen_add_reg tcg_gen_add_i64 77 #define tcg_gen_addi_reg tcg_gen_addi_i64 78 #define tcg_gen_sub_reg tcg_gen_sub_i64 79 #define tcg_gen_neg_reg tcg_gen_neg_i64 80 #define tcg_gen_subfi_reg tcg_gen_subfi_i64 81 #define tcg_gen_subi_reg tcg_gen_subi_i64 82 #define tcg_gen_and_reg tcg_gen_and_i64 83 #define tcg_gen_andi_reg tcg_gen_andi_i64 84 #define tcg_gen_or_reg tcg_gen_or_i64 85 #define tcg_gen_ori_reg tcg_gen_ori_i64 86 #define tcg_gen_xor_reg tcg_gen_xor_i64 87 #define tcg_gen_xori_reg tcg_gen_xori_i64 88 #define tcg_gen_not_reg tcg_gen_not_i64 89 #define tcg_gen_shl_reg tcg_gen_shl_i64 90 #define tcg_gen_shli_reg tcg_gen_shli_i64 91 #define tcg_gen_shr_reg tcg_gen_shr_i64 92 #define tcg_gen_shri_reg tcg_gen_shri_i64 93 #define tcg_gen_sar_reg tcg_gen_sar_i64 94 #define tcg_gen_sari_reg tcg_gen_sari_i64 95 #define tcg_gen_brcond_reg tcg_gen_brcond_i64 96 #define tcg_gen_brcondi_reg tcg_gen_brcondi_i64 97 #define tcg_gen_setcond_reg tcg_gen_setcond_i64 98 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i64 99 #define tcg_gen_mul_reg tcg_gen_mul_i64 100 #define tcg_gen_muli_reg tcg_gen_muli_i64 101 #define tcg_gen_div_reg tcg_gen_div_i64 102 #define tcg_gen_rem_reg tcg_gen_rem_i64 103 #define tcg_gen_divu_reg tcg_gen_divu_i64 104 #define tcg_gen_remu_reg tcg_gen_remu_i64 105 #define tcg_gen_discard_reg tcg_gen_discard_i64 106 #define tcg_gen_trunc_reg_i32 tcg_gen_extrl_i64_i32 107 #define tcg_gen_trunc_i64_reg tcg_gen_mov_i64 108 #define tcg_gen_extu_i32_reg tcg_gen_extu_i32_i64 109 #define tcg_gen_ext_i32_reg tcg_gen_ext_i32_i64 110 #define tcg_gen_extu_reg_i64 tcg_gen_mov_i64 111 #define tcg_gen_ext_reg_i64 tcg_gen_mov_i64 112 #define tcg_gen_ext8u_reg tcg_gen_ext8u_i64 113 #define tcg_gen_ext8s_reg tcg_gen_ext8s_i64 114 #define tcg_gen_ext16u_reg tcg_gen_ext16u_i64 115 #define tcg_gen_ext16s_reg tcg_gen_ext16s_i64 116 #define tcg_gen_ext32u_reg tcg_gen_ext32u_i64 117 #define tcg_gen_ext32s_reg tcg_gen_ext32s_i64 118 #define tcg_gen_bswap16_reg tcg_gen_bswap16_i64 119 #define tcg_gen_bswap32_reg tcg_gen_bswap32_i64 120 #define tcg_gen_bswap64_reg tcg_gen_bswap64_i64 121 #define tcg_gen_concat_reg_i64 tcg_gen_concat32_i64 122 #define tcg_gen_andc_reg tcg_gen_andc_i64 123 #define tcg_gen_eqv_reg tcg_gen_eqv_i64 124 #define tcg_gen_nand_reg tcg_gen_nand_i64 125 #define tcg_gen_nor_reg tcg_gen_nor_i64 126 #define tcg_gen_orc_reg tcg_gen_orc_i64 127 #define tcg_gen_clz_reg tcg_gen_clz_i64 128 #define tcg_gen_ctz_reg tcg_gen_ctz_i64 129 #define tcg_gen_clzi_reg tcg_gen_clzi_i64 130 #define tcg_gen_ctzi_reg tcg_gen_ctzi_i64 131 #define tcg_gen_clrsb_reg tcg_gen_clrsb_i64 132 #define tcg_gen_ctpop_reg tcg_gen_ctpop_i64 133 #define tcg_gen_rotl_reg tcg_gen_rotl_i64 134 #define tcg_gen_rotli_reg tcg_gen_rotli_i64 135 #define tcg_gen_rotr_reg tcg_gen_rotr_i64 136 #define tcg_gen_rotri_reg tcg_gen_rotri_i64 137 #define tcg_gen_deposit_reg tcg_gen_deposit_i64 138 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i64 139 #define tcg_gen_extract_reg tcg_gen_extract_i64 140 #define tcg_gen_sextract_reg tcg_gen_sextract_i64 141 #define tcg_gen_extract2_reg tcg_gen_extract2_i64 142 #define tcg_constant_reg tcg_constant_i64 143 #define tcg_gen_movcond_reg tcg_gen_movcond_i64 144 #define tcg_gen_add2_reg tcg_gen_add2_i64 145 #define tcg_gen_sub2_reg tcg_gen_sub2_i64 146 #define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i64 147 #define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i64 148 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i64 149 #define tcg_gen_trunc_reg_ptr tcg_gen_trunc_i64_ptr 150 #else 151 #define TCGv_reg TCGv_i32 152 #define tcg_temp_new tcg_temp_new_i32 153 #define tcg_global_mem_new tcg_global_mem_new_i32 154 155 #define tcg_gen_movi_reg tcg_gen_movi_i32 156 #define tcg_gen_mov_reg tcg_gen_mov_i32 157 #define tcg_gen_ld8u_reg tcg_gen_ld8u_i32 158 #define tcg_gen_ld8s_reg tcg_gen_ld8s_i32 159 #define tcg_gen_ld16u_reg tcg_gen_ld16u_i32 160 #define tcg_gen_ld16s_reg tcg_gen_ld16s_i32 161 #define tcg_gen_ld32u_reg tcg_gen_ld_i32 162 #define tcg_gen_ld32s_reg tcg_gen_ld_i32 163 #define tcg_gen_ld_reg tcg_gen_ld_i32 164 #define tcg_gen_st8_reg tcg_gen_st8_i32 165 #define tcg_gen_st16_reg tcg_gen_st16_i32 166 #define tcg_gen_st32_reg tcg_gen_st32_i32 167 #define tcg_gen_st_reg tcg_gen_st_i32 168 #define tcg_gen_add_reg tcg_gen_add_i32 169 #define tcg_gen_addi_reg tcg_gen_addi_i32 170 #define tcg_gen_sub_reg tcg_gen_sub_i32 171 #define tcg_gen_neg_reg tcg_gen_neg_i32 172 #define tcg_gen_subfi_reg tcg_gen_subfi_i32 173 #define tcg_gen_subi_reg tcg_gen_subi_i32 174 #define tcg_gen_and_reg tcg_gen_and_i32 175 #define tcg_gen_andi_reg tcg_gen_andi_i32 176 #define tcg_gen_or_reg tcg_gen_or_i32 177 #define tcg_gen_ori_reg tcg_gen_ori_i32 178 #define tcg_gen_xor_reg tcg_gen_xor_i32 179 #define tcg_gen_xori_reg tcg_gen_xori_i32 180 #define tcg_gen_not_reg tcg_gen_not_i32 181 #define tcg_gen_shl_reg tcg_gen_shl_i32 182 #define tcg_gen_shli_reg tcg_gen_shli_i32 183 #define tcg_gen_shr_reg tcg_gen_shr_i32 184 #define tcg_gen_shri_reg tcg_gen_shri_i32 185 #define tcg_gen_sar_reg tcg_gen_sar_i32 186 #define tcg_gen_sari_reg tcg_gen_sari_i32 187 #define tcg_gen_brcond_reg tcg_gen_brcond_i32 188 #define tcg_gen_brcondi_reg tcg_gen_brcondi_i32 189 #define tcg_gen_setcond_reg tcg_gen_setcond_i32 190 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i32 191 #define tcg_gen_mul_reg tcg_gen_mul_i32 192 #define tcg_gen_muli_reg tcg_gen_muli_i32 193 #define tcg_gen_div_reg tcg_gen_div_i32 194 #define tcg_gen_rem_reg tcg_gen_rem_i32 195 #define tcg_gen_divu_reg tcg_gen_divu_i32 196 #define tcg_gen_remu_reg tcg_gen_remu_i32 197 #define tcg_gen_discard_reg tcg_gen_discard_i32 198 #define tcg_gen_trunc_reg_i32 tcg_gen_mov_i32 199 #define tcg_gen_trunc_i64_reg tcg_gen_extrl_i64_i32 200 #define tcg_gen_extu_i32_reg tcg_gen_mov_i32 201 #define tcg_gen_ext_i32_reg tcg_gen_mov_i32 202 #define tcg_gen_extu_reg_i64 tcg_gen_extu_i32_i64 203 #define tcg_gen_ext_reg_i64 tcg_gen_ext_i32_i64 204 #define tcg_gen_ext8u_reg tcg_gen_ext8u_i32 205 #define tcg_gen_ext8s_reg tcg_gen_ext8s_i32 206 #define tcg_gen_ext16u_reg tcg_gen_ext16u_i32 207 #define tcg_gen_ext16s_reg tcg_gen_ext16s_i32 208 #define tcg_gen_ext32u_reg tcg_gen_mov_i32 209 #define tcg_gen_ext32s_reg tcg_gen_mov_i32 210 #define tcg_gen_bswap16_reg tcg_gen_bswap16_i32 211 #define tcg_gen_bswap32_reg tcg_gen_bswap32_i32 212 #define tcg_gen_concat_reg_i64 tcg_gen_concat_i32_i64 213 #define tcg_gen_andc_reg tcg_gen_andc_i32 214 #define tcg_gen_eqv_reg tcg_gen_eqv_i32 215 #define tcg_gen_nand_reg tcg_gen_nand_i32 216 #define tcg_gen_nor_reg tcg_gen_nor_i32 217 #define tcg_gen_orc_reg tcg_gen_orc_i32 218 #define tcg_gen_clz_reg tcg_gen_clz_i32 219 #define tcg_gen_ctz_reg tcg_gen_ctz_i32 220 #define tcg_gen_clzi_reg tcg_gen_clzi_i32 221 #define tcg_gen_ctzi_reg tcg_gen_ctzi_i32 222 #define tcg_gen_clrsb_reg tcg_gen_clrsb_i32 223 #define tcg_gen_ctpop_reg tcg_gen_ctpop_i32 224 #define tcg_gen_rotl_reg tcg_gen_rotl_i32 225 #define tcg_gen_rotli_reg tcg_gen_rotli_i32 226 #define tcg_gen_rotr_reg tcg_gen_rotr_i32 227 #define tcg_gen_rotri_reg tcg_gen_rotri_i32 228 #define tcg_gen_deposit_reg tcg_gen_deposit_i32 229 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i32 230 #define tcg_gen_extract_reg tcg_gen_extract_i32 231 #define tcg_gen_sextract_reg tcg_gen_sextract_i32 232 #define tcg_gen_extract2_reg tcg_gen_extract2_i32 233 #define tcg_constant_reg tcg_constant_i32 234 #define tcg_gen_movcond_reg tcg_gen_movcond_i32 235 #define tcg_gen_add2_reg tcg_gen_add2_i32 236 #define tcg_gen_sub2_reg tcg_gen_sub2_i32 237 #define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i32 238 #define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i32 239 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i32 240 #define tcg_gen_trunc_reg_ptr tcg_gen_ext_i32_ptr 241 #endif /* TARGET_REGISTER_BITS */ 242 243 typedef struct DisasCond { 244 TCGCond c; 245 TCGv_reg a0, a1; 246 } DisasCond; 247 248 typedef struct DisasContext { 249 DisasContextBase base; 250 CPUState *cs; 251 252 target_ureg iaoq_f; 253 target_ureg iaoq_b; 254 target_ureg iaoq_n; 255 TCGv_reg iaoq_n_var; 256 257 int ntempr, ntempl; 258 TCGv_reg tempr[8]; 259 TCGv_tl templ[4]; 260 261 DisasCond null_cond; 262 TCGLabel *null_lab; 263 264 uint32_t insn; 265 uint32_t tb_flags; 266 int mmu_idx; 267 int privilege; 268 bool psw_n_nonzero; 269 270 #ifdef CONFIG_USER_ONLY 271 MemOp unalign; 272 #endif 273 } DisasContext; 274 275 #ifdef CONFIG_USER_ONLY 276 #define UNALIGN(C) (C)->unalign 277 #else 278 #define UNALIGN(C) MO_ALIGN 279 #endif 280 281 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */ 282 static int expand_sm_imm(DisasContext *ctx, int val) 283 { 284 if (val & PSW_SM_E) { 285 val = (val & ~PSW_SM_E) | PSW_E; 286 } 287 if (val & PSW_SM_W) { 288 val = (val & ~PSW_SM_W) | PSW_W; 289 } 290 return val; 291 } 292 293 /* Inverted space register indicates 0 means sr0 not inferred from base. */ 294 static int expand_sr3x(DisasContext *ctx, int val) 295 { 296 return ~val; 297 } 298 299 /* Convert the M:A bits within a memory insn to the tri-state value 300 we use for the final M. */ 301 static int ma_to_m(DisasContext *ctx, int val) 302 { 303 return val & 2 ? (val & 1 ? -1 : 1) : 0; 304 } 305 306 /* Convert the sign of the displacement to a pre or post-modify. */ 307 static int pos_to_m(DisasContext *ctx, int val) 308 { 309 return val ? 1 : -1; 310 } 311 312 static int neg_to_m(DisasContext *ctx, int val) 313 { 314 return val ? -1 : 1; 315 } 316 317 /* Used for branch targets and fp memory ops. */ 318 static int expand_shl2(DisasContext *ctx, int val) 319 { 320 return val << 2; 321 } 322 323 /* Used for fp memory ops. */ 324 static int expand_shl3(DisasContext *ctx, int val) 325 { 326 return val << 3; 327 } 328 329 /* Used for assemble_21. */ 330 static int expand_shl11(DisasContext *ctx, int val) 331 { 332 return val << 11; 333 } 334 335 336 /* Include the auto-generated decoder. */ 337 #include "decode-insns.c.inc" 338 339 /* We are not using a goto_tb (for whatever reason), but have updated 340 the iaq (for whatever reason), so don't do it again on exit. */ 341 #define DISAS_IAQ_N_UPDATED DISAS_TARGET_0 342 343 /* We are exiting the TB, but have neither emitted a goto_tb, nor 344 updated the iaq for the next instruction to be executed. */ 345 #define DISAS_IAQ_N_STALE DISAS_TARGET_1 346 347 /* Similarly, but we want to return to the main loop immediately 348 to recognize unmasked interrupts. */ 349 #define DISAS_IAQ_N_STALE_EXIT DISAS_TARGET_2 350 #define DISAS_EXIT DISAS_TARGET_3 351 352 /* global register indexes */ 353 static TCGv_reg cpu_gr[32]; 354 static TCGv_i64 cpu_sr[4]; 355 static TCGv_i64 cpu_srH; 356 static TCGv_reg cpu_iaoq_f; 357 static TCGv_reg cpu_iaoq_b; 358 static TCGv_i64 cpu_iasq_f; 359 static TCGv_i64 cpu_iasq_b; 360 static TCGv_reg cpu_sar; 361 static TCGv_reg cpu_psw_n; 362 static TCGv_reg cpu_psw_v; 363 static TCGv_reg cpu_psw_cb; 364 static TCGv_reg cpu_psw_cb_msb; 365 366 void hppa_translate_init(void) 367 { 368 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) } 369 370 typedef struct { TCGv_reg *var; const char *name; int ofs; } GlobalVar; 371 static const GlobalVar vars[] = { 372 { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) }, 373 DEF_VAR(psw_n), 374 DEF_VAR(psw_v), 375 DEF_VAR(psw_cb), 376 DEF_VAR(psw_cb_msb), 377 DEF_VAR(iaoq_f), 378 DEF_VAR(iaoq_b), 379 }; 380 381 #undef DEF_VAR 382 383 /* Use the symbolic register names that match the disassembler. */ 384 static const char gr_names[32][4] = { 385 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", 386 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", 387 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", 388 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31" 389 }; 390 /* SR[4-7] are not global registers so that we can index them. */ 391 static const char sr_names[5][4] = { 392 "sr0", "sr1", "sr2", "sr3", "srH" 393 }; 394 395 int i; 396 397 cpu_gr[0] = NULL; 398 for (i = 1; i < 32; i++) { 399 cpu_gr[i] = tcg_global_mem_new(tcg_env, 400 offsetof(CPUHPPAState, gr[i]), 401 gr_names[i]); 402 } 403 for (i = 0; i < 4; i++) { 404 cpu_sr[i] = tcg_global_mem_new_i64(tcg_env, 405 offsetof(CPUHPPAState, sr[i]), 406 sr_names[i]); 407 } 408 cpu_srH = tcg_global_mem_new_i64(tcg_env, 409 offsetof(CPUHPPAState, sr[4]), 410 sr_names[4]); 411 412 for (i = 0; i < ARRAY_SIZE(vars); ++i) { 413 const GlobalVar *v = &vars[i]; 414 *v->var = tcg_global_mem_new(tcg_env, v->ofs, v->name); 415 } 416 417 cpu_iasq_f = tcg_global_mem_new_i64(tcg_env, 418 offsetof(CPUHPPAState, iasq_f), 419 "iasq_f"); 420 cpu_iasq_b = tcg_global_mem_new_i64(tcg_env, 421 offsetof(CPUHPPAState, iasq_b), 422 "iasq_b"); 423 } 424 425 static DisasCond cond_make_f(void) 426 { 427 return (DisasCond){ 428 .c = TCG_COND_NEVER, 429 .a0 = NULL, 430 .a1 = NULL, 431 }; 432 } 433 434 static DisasCond cond_make_t(void) 435 { 436 return (DisasCond){ 437 .c = TCG_COND_ALWAYS, 438 .a0 = NULL, 439 .a1 = NULL, 440 }; 441 } 442 443 static DisasCond cond_make_n(void) 444 { 445 return (DisasCond){ 446 .c = TCG_COND_NE, 447 .a0 = cpu_psw_n, 448 .a1 = tcg_constant_reg(0) 449 }; 450 } 451 452 static DisasCond cond_make_0_tmp(TCGCond c, TCGv_reg a0) 453 { 454 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS); 455 return (DisasCond){ 456 .c = c, .a0 = a0, .a1 = tcg_constant_reg(0) 457 }; 458 } 459 460 static DisasCond cond_make_0(TCGCond c, TCGv_reg a0) 461 { 462 TCGv_reg tmp = tcg_temp_new(); 463 tcg_gen_mov_reg(tmp, a0); 464 return cond_make_0_tmp(c, tmp); 465 } 466 467 static DisasCond cond_make(TCGCond c, TCGv_reg a0, TCGv_reg a1) 468 { 469 DisasCond r = { .c = c }; 470 471 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS); 472 r.a0 = tcg_temp_new(); 473 tcg_gen_mov_reg(r.a0, a0); 474 r.a1 = tcg_temp_new(); 475 tcg_gen_mov_reg(r.a1, a1); 476 477 return r; 478 } 479 480 static void cond_free(DisasCond *cond) 481 { 482 switch (cond->c) { 483 default: 484 cond->a0 = NULL; 485 cond->a1 = NULL; 486 /* fallthru */ 487 case TCG_COND_ALWAYS: 488 cond->c = TCG_COND_NEVER; 489 break; 490 case TCG_COND_NEVER: 491 break; 492 } 493 } 494 495 static TCGv_reg get_temp(DisasContext *ctx) 496 { 497 unsigned i = ctx->ntempr++; 498 g_assert(i < ARRAY_SIZE(ctx->tempr)); 499 return ctx->tempr[i] = tcg_temp_new(); 500 } 501 502 #ifndef CONFIG_USER_ONLY 503 static TCGv_tl get_temp_tl(DisasContext *ctx) 504 { 505 unsigned i = ctx->ntempl++; 506 g_assert(i < ARRAY_SIZE(ctx->templ)); 507 return ctx->templ[i] = tcg_temp_new_tl(); 508 } 509 #endif 510 511 static TCGv_reg load_const(DisasContext *ctx, target_sreg v) 512 { 513 TCGv_reg t = get_temp(ctx); 514 tcg_gen_movi_reg(t, v); 515 return t; 516 } 517 518 static TCGv_reg load_gpr(DisasContext *ctx, unsigned reg) 519 { 520 if (reg == 0) { 521 TCGv_reg t = get_temp(ctx); 522 tcg_gen_movi_reg(t, 0); 523 return t; 524 } else { 525 return cpu_gr[reg]; 526 } 527 } 528 529 static TCGv_reg dest_gpr(DisasContext *ctx, unsigned reg) 530 { 531 if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) { 532 return get_temp(ctx); 533 } else { 534 return cpu_gr[reg]; 535 } 536 } 537 538 static void save_or_nullify(DisasContext *ctx, TCGv_reg dest, TCGv_reg t) 539 { 540 if (ctx->null_cond.c != TCG_COND_NEVER) { 541 tcg_gen_movcond_reg(ctx->null_cond.c, dest, ctx->null_cond.a0, 542 ctx->null_cond.a1, dest, t); 543 } else { 544 tcg_gen_mov_reg(dest, t); 545 } 546 } 547 548 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_reg t) 549 { 550 if (reg != 0) { 551 save_or_nullify(ctx, cpu_gr[reg], t); 552 } 553 } 554 555 #if HOST_BIG_ENDIAN 556 # define HI_OFS 0 557 # define LO_OFS 4 558 #else 559 # define HI_OFS 4 560 # define LO_OFS 0 561 #endif 562 563 static TCGv_i32 load_frw_i32(unsigned rt) 564 { 565 TCGv_i32 ret = tcg_temp_new_i32(); 566 tcg_gen_ld_i32(ret, tcg_env, 567 offsetof(CPUHPPAState, fr[rt & 31]) 568 + (rt & 32 ? LO_OFS : HI_OFS)); 569 return ret; 570 } 571 572 static TCGv_i32 load_frw0_i32(unsigned rt) 573 { 574 if (rt == 0) { 575 TCGv_i32 ret = tcg_temp_new_i32(); 576 tcg_gen_movi_i32(ret, 0); 577 return ret; 578 } else { 579 return load_frw_i32(rt); 580 } 581 } 582 583 static TCGv_i64 load_frw0_i64(unsigned rt) 584 { 585 TCGv_i64 ret = tcg_temp_new_i64(); 586 if (rt == 0) { 587 tcg_gen_movi_i64(ret, 0); 588 } else { 589 tcg_gen_ld32u_i64(ret, tcg_env, 590 offsetof(CPUHPPAState, fr[rt & 31]) 591 + (rt & 32 ? LO_OFS : HI_OFS)); 592 } 593 return ret; 594 } 595 596 static void save_frw_i32(unsigned rt, TCGv_i32 val) 597 { 598 tcg_gen_st_i32(val, tcg_env, 599 offsetof(CPUHPPAState, fr[rt & 31]) 600 + (rt & 32 ? LO_OFS : HI_OFS)); 601 } 602 603 #undef HI_OFS 604 #undef LO_OFS 605 606 static TCGv_i64 load_frd(unsigned rt) 607 { 608 TCGv_i64 ret = tcg_temp_new_i64(); 609 tcg_gen_ld_i64(ret, tcg_env, offsetof(CPUHPPAState, fr[rt])); 610 return ret; 611 } 612 613 static TCGv_i64 load_frd0(unsigned rt) 614 { 615 if (rt == 0) { 616 TCGv_i64 ret = tcg_temp_new_i64(); 617 tcg_gen_movi_i64(ret, 0); 618 return ret; 619 } else { 620 return load_frd(rt); 621 } 622 } 623 624 static void save_frd(unsigned rt, TCGv_i64 val) 625 { 626 tcg_gen_st_i64(val, tcg_env, offsetof(CPUHPPAState, fr[rt])); 627 } 628 629 static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg) 630 { 631 #ifdef CONFIG_USER_ONLY 632 tcg_gen_movi_i64(dest, 0); 633 #else 634 if (reg < 4) { 635 tcg_gen_mov_i64(dest, cpu_sr[reg]); 636 } else if (ctx->tb_flags & TB_FLAG_SR_SAME) { 637 tcg_gen_mov_i64(dest, cpu_srH); 638 } else { 639 tcg_gen_ld_i64(dest, tcg_env, offsetof(CPUHPPAState, sr[reg])); 640 } 641 #endif 642 } 643 644 /* Skip over the implementation of an insn that has been nullified. 645 Use this when the insn is too complex for a conditional move. */ 646 static void nullify_over(DisasContext *ctx) 647 { 648 if (ctx->null_cond.c != TCG_COND_NEVER) { 649 /* The always condition should have been handled in the main loop. */ 650 assert(ctx->null_cond.c != TCG_COND_ALWAYS); 651 652 ctx->null_lab = gen_new_label(); 653 654 /* If we're using PSW[N], copy it to a temp because... */ 655 if (ctx->null_cond.a0 == cpu_psw_n) { 656 ctx->null_cond.a0 = tcg_temp_new(); 657 tcg_gen_mov_reg(ctx->null_cond.a0, cpu_psw_n); 658 } 659 /* ... we clear it before branching over the implementation, 660 so that (1) it's clear after nullifying this insn and 661 (2) if this insn nullifies the next, PSW[N] is valid. */ 662 if (ctx->psw_n_nonzero) { 663 ctx->psw_n_nonzero = false; 664 tcg_gen_movi_reg(cpu_psw_n, 0); 665 } 666 667 tcg_gen_brcond_reg(ctx->null_cond.c, ctx->null_cond.a0, 668 ctx->null_cond.a1, ctx->null_lab); 669 cond_free(&ctx->null_cond); 670 } 671 } 672 673 /* Save the current nullification state to PSW[N]. */ 674 static void nullify_save(DisasContext *ctx) 675 { 676 if (ctx->null_cond.c == TCG_COND_NEVER) { 677 if (ctx->psw_n_nonzero) { 678 tcg_gen_movi_reg(cpu_psw_n, 0); 679 } 680 return; 681 } 682 if (ctx->null_cond.a0 != cpu_psw_n) { 683 tcg_gen_setcond_reg(ctx->null_cond.c, cpu_psw_n, 684 ctx->null_cond.a0, ctx->null_cond.a1); 685 ctx->psw_n_nonzero = true; 686 } 687 cond_free(&ctx->null_cond); 688 } 689 690 /* Set a PSW[N] to X. The intention is that this is used immediately 691 before a goto_tb/exit_tb, so that there is no fallthru path to other 692 code within the TB. Therefore we do not update psw_n_nonzero. */ 693 static void nullify_set(DisasContext *ctx, bool x) 694 { 695 if (ctx->psw_n_nonzero || x) { 696 tcg_gen_movi_reg(cpu_psw_n, x); 697 } 698 } 699 700 /* Mark the end of an instruction that may have been nullified. 701 This is the pair to nullify_over. Always returns true so that 702 it may be tail-called from a translate function. */ 703 static bool nullify_end(DisasContext *ctx) 704 { 705 TCGLabel *null_lab = ctx->null_lab; 706 DisasJumpType status = ctx->base.is_jmp; 707 708 /* For NEXT, NORETURN, STALE, we can easily continue (or exit). 709 For UPDATED, we cannot update on the nullified path. */ 710 assert(status != DISAS_IAQ_N_UPDATED); 711 712 if (likely(null_lab == NULL)) { 713 /* The current insn wasn't conditional or handled the condition 714 applied to it without a branch, so the (new) setting of 715 NULL_COND can be applied directly to the next insn. */ 716 return true; 717 } 718 ctx->null_lab = NULL; 719 720 if (likely(ctx->null_cond.c == TCG_COND_NEVER)) { 721 /* The next instruction will be unconditional, 722 and NULL_COND already reflects that. */ 723 gen_set_label(null_lab); 724 } else { 725 /* The insn that we just executed is itself nullifying the next 726 instruction. Store the condition in the PSW[N] global. 727 We asserted PSW[N] = 0 in nullify_over, so that after the 728 label we have the proper value in place. */ 729 nullify_save(ctx); 730 gen_set_label(null_lab); 731 ctx->null_cond = cond_make_n(); 732 } 733 if (status == DISAS_NORETURN) { 734 ctx->base.is_jmp = DISAS_NEXT; 735 } 736 return true; 737 } 738 739 static void copy_iaoq_entry(TCGv_reg dest, target_ureg ival, TCGv_reg vval) 740 { 741 if (unlikely(ival == -1)) { 742 tcg_gen_mov_reg(dest, vval); 743 } else { 744 tcg_gen_movi_reg(dest, ival); 745 } 746 } 747 748 static inline target_ureg iaoq_dest(DisasContext *ctx, target_sreg disp) 749 { 750 return ctx->iaoq_f + disp + 8; 751 } 752 753 static void gen_excp_1(int exception) 754 { 755 gen_helper_excp(tcg_env, tcg_constant_i32(exception)); 756 } 757 758 static void gen_excp(DisasContext *ctx, int exception) 759 { 760 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f); 761 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b); 762 nullify_save(ctx); 763 gen_excp_1(exception); 764 ctx->base.is_jmp = DISAS_NORETURN; 765 } 766 767 static bool gen_excp_iir(DisasContext *ctx, int exc) 768 { 769 nullify_over(ctx); 770 tcg_gen_st_reg(tcg_constant_reg(ctx->insn), 771 tcg_env, offsetof(CPUHPPAState, cr[CR_IIR])); 772 gen_excp(ctx, exc); 773 return nullify_end(ctx); 774 } 775 776 static bool gen_illegal(DisasContext *ctx) 777 { 778 return gen_excp_iir(ctx, EXCP_ILL); 779 } 780 781 #ifdef CONFIG_USER_ONLY 782 #define CHECK_MOST_PRIVILEGED(EXCP) \ 783 return gen_excp_iir(ctx, EXCP) 784 #else 785 #define CHECK_MOST_PRIVILEGED(EXCP) \ 786 do { \ 787 if (ctx->privilege != 0) { \ 788 return gen_excp_iir(ctx, EXCP); \ 789 } \ 790 } while (0) 791 #endif 792 793 static bool use_goto_tb(DisasContext *ctx, target_ureg dest) 794 { 795 return translator_use_goto_tb(&ctx->base, dest); 796 } 797 798 /* If the next insn is to be nullified, and it's on the same page, 799 and we're not attempting to set a breakpoint on it, then we can 800 totally skip the nullified insn. This avoids creating and 801 executing a TB that merely branches to the next TB. */ 802 static bool use_nullify_skip(DisasContext *ctx) 803 { 804 return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0 805 && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY)); 806 } 807 808 static void gen_goto_tb(DisasContext *ctx, int which, 809 target_ureg f, target_ureg b) 810 { 811 if (f != -1 && b != -1 && use_goto_tb(ctx, f)) { 812 tcg_gen_goto_tb(which); 813 tcg_gen_movi_reg(cpu_iaoq_f, f); 814 tcg_gen_movi_reg(cpu_iaoq_b, b); 815 tcg_gen_exit_tb(ctx->base.tb, which); 816 } else { 817 copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b); 818 copy_iaoq_entry(cpu_iaoq_b, b, ctx->iaoq_n_var); 819 tcg_gen_lookup_and_goto_ptr(); 820 } 821 } 822 823 static bool cond_need_sv(int c) 824 { 825 return c == 2 || c == 3 || c == 6; 826 } 827 828 static bool cond_need_cb(int c) 829 { 830 return c == 4 || c == 5; 831 } 832 833 /* 834 * Compute conditional for arithmetic. See Page 5-3, Table 5-1, of 835 * the Parisc 1.1 Architecture Reference Manual for details. 836 */ 837 838 static DisasCond do_cond(unsigned cf, TCGv_reg res, 839 TCGv_reg cb_msb, TCGv_reg sv) 840 { 841 DisasCond cond; 842 TCGv_reg tmp; 843 844 switch (cf >> 1) { 845 case 0: /* Never / TR (0 / 1) */ 846 cond = cond_make_f(); 847 break; 848 case 1: /* = / <> (Z / !Z) */ 849 cond = cond_make_0(TCG_COND_EQ, res); 850 break; 851 case 2: /* < / >= (N ^ V / !(N ^ V) */ 852 tmp = tcg_temp_new(); 853 tcg_gen_xor_reg(tmp, res, sv); 854 cond = cond_make_0_tmp(TCG_COND_LT, tmp); 855 break; 856 case 3: /* <= / > (N ^ V) | Z / !((N ^ V) | Z) */ 857 /* 858 * Simplify: 859 * (N ^ V) | Z 860 * ((res < 0) ^ (sv < 0)) | !res 861 * ((res ^ sv) < 0) | !res 862 * (~(res ^ sv) >= 0) | !res 863 * !(~(res ^ sv) >> 31) | !res 864 * !(~(res ^ sv) >> 31 & res) 865 */ 866 tmp = tcg_temp_new(); 867 tcg_gen_eqv_reg(tmp, res, sv); 868 tcg_gen_sari_reg(tmp, tmp, TARGET_REGISTER_BITS - 1); 869 tcg_gen_and_reg(tmp, tmp, res); 870 cond = cond_make_0_tmp(TCG_COND_EQ, tmp); 871 break; 872 case 4: /* NUV / UV (!C / C) */ 873 cond = cond_make_0(TCG_COND_EQ, cb_msb); 874 break; 875 case 5: /* ZNV / VNZ (!C | Z / C & !Z) */ 876 tmp = tcg_temp_new(); 877 tcg_gen_neg_reg(tmp, cb_msb); 878 tcg_gen_and_reg(tmp, tmp, res); 879 cond = cond_make_0_tmp(TCG_COND_EQ, tmp); 880 break; 881 case 6: /* SV / NSV (V / !V) */ 882 cond = cond_make_0(TCG_COND_LT, sv); 883 break; 884 case 7: /* OD / EV */ 885 tmp = tcg_temp_new(); 886 tcg_gen_andi_reg(tmp, res, 1); 887 cond = cond_make_0_tmp(TCG_COND_NE, tmp); 888 break; 889 default: 890 g_assert_not_reached(); 891 } 892 if (cf & 1) { 893 cond.c = tcg_invert_cond(cond.c); 894 } 895 896 return cond; 897 } 898 899 /* Similar, but for the special case of subtraction without borrow, we 900 can use the inputs directly. This can allow other computation to be 901 deleted as unused. */ 902 903 static DisasCond do_sub_cond(unsigned cf, TCGv_reg res, 904 TCGv_reg in1, TCGv_reg in2, TCGv_reg sv) 905 { 906 DisasCond cond; 907 908 switch (cf >> 1) { 909 case 1: /* = / <> */ 910 cond = cond_make(TCG_COND_EQ, in1, in2); 911 break; 912 case 2: /* < / >= */ 913 cond = cond_make(TCG_COND_LT, in1, in2); 914 break; 915 case 3: /* <= / > */ 916 cond = cond_make(TCG_COND_LE, in1, in2); 917 break; 918 case 4: /* << / >>= */ 919 cond = cond_make(TCG_COND_LTU, in1, in2); 920 break; 921 case 5: /* <<= / >> */ 922 cond = cond_make(TCG_COND_LEU, in1, in2); 923 break; 924 default: 925 return do_cond(cf, res, NULL, sv); 926 } 927 if (cf & 1) { 928 cond.c = tcg_invert_cond(cond.c); 929 } 930 931 return cond; 932 } 933 934 /* 935 * Similar, but for logicals, where the carry and overflow bits are not 936 * computed, and use of them is undefined. 937 * 938 * Undefined or not, hardware does not trap. It seems reasonable to 939 * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's 940 * how cases c={2,3} are treated. 941 */ 942 943 static DisasCond do_log_cond(unsigned cf, TCGv_reg res) 944 { 945 switch (cf) { 946 case 0: /* never */ 947 case 9: /* undef, C */ 948 case 11: /* undef, C & !Z */ 949 case 12: /* undef, V */ 950 return cond_make_f(); 951 952 case 1: /* true */ 953 case 8: /* undef, !C */ 954 case 10: /* undef, !C | Z */ 955 case 13: /* undef, !V */ 956 return cond_make_t(); 957 958 case 2: /* == */ 959 return cond_make_0(TCG_COND_EQ, res); 960 case 3: /* <> */ 961 return cond_make_0(TCG_COND_NE, res); 962 case 4: /* < */ 963 return cond_make_0(TCG_COND_LT, res); 964 case 5: /* >= */ 965 return cond_make_0(TCG_COND_GE, res); 966 case 6: /* <= */ 967 return cond_make_0(TCG_COND_LE, res); 968 case 7: /* > */ 969 return cond_make_0(TCG_COND_GT, res); 970 971 case 14: /* OD */ 972 case 15: /* EV */ 973 return do_cond(cf, res, NULL, NULL); 974 975 default: 976 g_assert_not_reached(); 977 } 978 } 979 980 /* Similar, but for shift/extract/deposit conditions. */ 981 982 static DisasCond do_sed_cond(unsigned orig, TCGv_reg res) 983 { 984 unsigned c, f; 985 986 /* Convert the compressed condition codes to standard. 987 0-2 are the same as logicals (nv,<,<=), while 3 is OD. 988 4-7 are the reverse of 0-3. */ 989 c = orig & 3; 990 if (c == 3) { 991 c = 7; 992 } 993 f = (orig & 4) / 4; 994 995 return do_log_cond(c * 2 + f, res); 996 } 997 998 /* Similar, but for unit conditions. */ 999 1000 static DisasCond do_unit_cond(unsigned cf, TCGv_reg res, 1001 TCGv_reg in1, TCGv_reg in2) 1002 { 1003 DisasCond cond; 1004 TCGv_reg tmp, cb = NULL; 1005 1006 if (cf & 8) { 1007 /* Since we want to test lots of carry-out bits all at once, do not 1008 * do our normal thing and compute carry-in of bit B+1 since that 1009 * leaves us with carry bits spread across two words. 1010 */ 1011 cb = tcg_temp_new(); 1012 tmp = tcg_temp_new(); 1013 tcg_gen_or_reg(cb, in1, in2); 1014 tcg_gen_and_reg(tmp, in1, in2); 1015 tcg_gen_andc_reg(cb, cb, res); 1016 tcg_gen_or_reg(cb, cb, tmp); 1017 } 1018 1019 switch (cf >> 1) { 1020 case 0: /* never / TR */ 1021 case 1: /* undefined */ 1022 case 5: /* undefined */ 1023 cond = cond_make_f(); 1024 break; 1025 1026 case 2: /* SBZ / NBZ */ 1027 /* See hasless(v,1) from 1028 * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord 1029 */ 1030 tmp = tcg_temp_new(); 1031 tcg_gen_subi_reg(tmp, res, 0x01010101u); 1032 tcg_gen_andc_reg(tmp, tmp, res); 1033 tcg_gen_andi_reg(tmp, tmp, 0x80808080u); 1034 cond = cond_make_0(TCG_COND_NE, tmp); 1035 break; 1036 1037 case 3: /* SHZ / NHZ */ 1038 tmp = tcg_temp_new(); 1039 tcg_gen_subi_reg(tmp, res, 0x00010001u); 1040 tcg_gen_andc_reg(tmp, tmp, res); 1041 tcg_gen_andi_reg(tmp, tmp, 0x80008000u); 1042 cond = cond_make_0(TCG_COND_NE, tmp); 1043 break; 1044 1045 case 4: /* SDC / NDC */ 1046 tcg_gen_andi_reg(cb, cb, 0x88888888u); 1047 cond = cond_make_0(TCG_COND_NE, cb); 1048 break; 1049 1050 case 6: /* SBC / NBC */ 1051 tcg_gen_andi_reg(cb, cb, 0x80808080u); 1052 cond = cond_make_0(TCG_COND_NE, cb); 1053 break; 1054 1055 case 7: /* SHC / NHC */ 1056 tcg_gen_andi_reg(cb, cb, 0x80008000u); 1057 cond = cond_make_0(TCG_COND_NE, cb); 1058 break; 1059 1060 default: 1061 g_assert_not_reached(); 1062 } 1063 if (cf & 1) { 1064 cond.c = tcg_invert_cond(cond.c); 1065 } 1066 1067 return cond; 1068 } 1069 1070 /* Compute signed overflow for addition. */ 1071 static TCGv_reg do_add_sv(DisasContext *ctx, TCGv_reg res, 1072 TCGv_reg in1, TCGv_reg in2) 1073 { 1074 TCGv_reg sv = get_temp(ctx); 1075 TCGv_reg tmp = tcg_temp_new(); 1076 1077 tcg_gen_xor_reg(sv, res, in1); 1078 tcg_gen_xor_reg(tmp, in1, in2); 1079 tcg_gen_andc_reg(sv, sv, tmp); 1080 1081 return sv; 1082 } 1083 1084 /* Compute signed overflow for subtraction. */ 1085 static TCGv_reg do_sub_sv(DisasContext *ctx, TCGv_reg res, 1086 TCGv_reg in1, TCGv_reg in2) 1087 { 1088 TCGv_reg sv = get_temp(ctx); 1089 TCGv_reg tmp = tcg_temp_new(); 1090 1091 tcg_gen_xor_reg(sv, res, in1); 1092 tcg_gen_xor_reg(tmp, in1, in2); 1093 tcg_gen_and_reg(sv, sv, tmp); 1094 1095 return sv; 1096 } 1097 1098 static void do_add(DisasContext *ctx, unsigned rt, TCGv_reg in1, 1099 TCGv_reg in2, unsigned shift, bool is_l, 1100 bool is_tsv, bool is_tc, bool is_c, unsigned cf) 1101 { 1102 TCGv_reg dest, cb, cb_msb, sv, tmp; 1103 unsigned c = cf >> 1; 1104 DisasCond cond; 1105 1106 dest = tcg_temp_new(); 1107 cb = NULL; 1108 cb_msb = NULL; 1109 1110 if (shift) { 1111 tmp = get_temp(ctx); 1112 tcg_gen_shli_reg(tmp, in1, shift); 1113 in1 = tmp; 1114 } 1115 1116 if (!is_l || cond_need_cb(c)) { 1117 TCGv_reg zero = tcg_constant_reg(0); 1118 cb_msb = get_temp(ctx); 1119 tcg_gen_add2_reg(dest, cb_msb, in1, zero, in2, zero); 1120 if (is_c) { 1121 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cpu_psw_cb_msb, zero); 1122 } 1123 if (!is_l) { 1124 cb = get_temp(ctx); 1125 tcg_gen_xor_reg(cb, in1, in2); 1126 tcg_gen_xor_reg(cb, cb, dest); 1127 } 1128 } else { 1129 tcg_gen_add_reg(dest, in1, in2); 1130 if (is_c) { 1131 tcg_gen_add_reg(dest, dest, cpu_psw_cb_msb); 1132 } 1133 } 1134 1135 /* Compute signed overflow if required. */ 1136 sv = NULL; 1137 if (is_tsv || cond_need_sv(c)) { 1138 sv = do_add_sv(ctx, dest, in1, in2); 1139 if (is_tsv) { 1140 /* ??? Need to include overflow from shift. */ 1141 gen_helper_tsv(tcg_env, sv); 1142 } 1143 } 1144 1145 /* Emit any conditional trap before any writeback. */ 1146 cond = do_cond(cf, dest, cb_msb, sv); 1147 if (is_tc) { 1148 tmp = tcg_temp_new(); 1149 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1); 1150 gen_helper_tcond(tcg_env, tmp); 1151 } 1152 1153 /* Write back the result. */ 1154 if (!is_l) { 1155 save_or_nullify(ctx, cpu_psw_cb, cb); 1156 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb); 1157 } 1158 save_gpr(ctx, rt, dest); 1159 1160 /* Install the new nullification. */ 1161 cond_free(&ctx->null_cond); 1162 ctx->null_cond = cond; 1163 } 1164 1165 static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_sh *a, 1166 bool is_l, bool is_tsv, bool is_tc, bool is_c) 1167 { 1168 TCGv_reg tcg_r1, tcg_r2; 1169 1170 if (a->cf) { 1171 nullify_over(ctx); 1172 } 1173 tcg_r1 = load_gpr(ctx, a->r1); 1174 tcg_r2 = load_gpr(ctx, a->r2); 1175 do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l, is_tsv, is_tc, is_c, a->cf); 1176 return nullify_end(ctx); 1177 } 1178 1179 static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a, 1180 bool is_tsv, bool is_tc) 1181 { 1182 TCGv_reg tcg_im, tcg_r2; 1183 1184 if (a->cf) { 1185 nullify_over(ctx); 1186 } 1187 tcg_im = load_const(ctx, a->i); 1188 tcg_r2 = load_gpr(ctx, a->r); 1189 do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf); 1190 return nullify_end(ctx); 1191 } 1192 1193 static void do_sub(DisasContext *ctx, unsigned rt, TCGv_reg in1, 1194 TCGv_reg in2, bool is_tsv, bool is_b, 1195 bool is_tc, unsigned cf) 1196 { 1197 TCGv_reg dest, sv, cb, cb_msb, zero, tmp; 1198 unsigned c = cf >> 1; 1199 DisasCond cond; 1200 1201 dest = tcg_temp_new(); 1202 cb = tcg_temp_new(); 1203 cb_msb = tcg_temp_new(); 1204 1205 zero = tcg_constant_reg(0); 1206 if (is_b) { 1207 /* DEST,C = IN1 + ~IN2 + C. */ 1208 tcg_gen_not_reg(cb, in2); 1209 tcg_gen_add2_reg(dest, cb_msb, in1, zero, cpu_psw_cb_msb, zero); 1210 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cb, zero); 1211 tcg_gen_xor_reg(cb, cb, in1); 1212 tcg_gen_xor_reg(cb, cb, dest); 1213 } else { 1214 /* DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer 1215 operations by seeding the high word with 1 and subtracting. */ 1216 tcg_gen_movi_reg(cb_msb, 1); 1217 tcg_gen_sub2_reg(dest, cb_msb, in1, cb_msb, in2, zero); 1218 tcg_gen_eqv_reg(cb, in1, in2); 1219 tcg_gen_xor_reg(cb, cb, dest); 1220 } 1221 1222 /* Compute signed overflow if required. */ 1223 sv = NULL; 1224 if (is_tsv || cond_need_sv(c)) { 1225 sv = do_sub_sv(ctx, dest, in1, in2); 1226 if (is_tsv) { 1227 gen_helper_tsv(tcg_env, sv); 1228 } 1229 } 1230 1231 /* Compute the condition. We cannot use the special case for borrow. */ 1232 if (!is_b) { 1233 cond = do_sub_cond(cf, dest, in1, in2, sv); 1234 } else { 1235 cond = do_cond(cf, dest, cb_msb, sv); 1236 } 1237 1238 /* Emit any conditional trap before any writeback. */ 1239 if (is_tc) { 1240 tmp = tcg_temp_new(); 1241 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1); 1242 gen_helper_tcond(tcg_env, tmp); 1243 } 1244 1245 /* Write back the result. */ 1246 save_or_nullify(ctx, cpu_psw_cb, cb); 1247 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb); 1248 save_gpr(ctx, rt, dest); 1249 1250 /* Install the new nullification. */ 1251 cond_free(&ctx->null_cond); 1252 ctx->null_cond = cond; 1253 } 1254 1255 static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf *a, 1256 bool is_tsv, bool is_b, bool is_tc) 1257 { 1258 TCGv_reg tcg_r1, tcg_r2; 1259 1260 if (a->cf) { 1261 nullify_over(ctx); 1262 } 1263 tcg_r1 = load_gpr(ctx, a->r1); 1264 tcg_r2 = load_gpr(ctx, a->r2); 1265 do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf); 1266 return nullify_end(ctx); 1267 } 1268 1269 static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv) 1270 { 1271 TCGv_reg tcg_im, tcg_r2; 1272 1273 if (a->cf) { 1274 nullify_over(ctx); 1275 } 1276 tcg_im = load_const(ctx, a->i); 1277 tcg_r2 = load_gpr(ctx, a->r); 1278 do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf); 1279 return nullify_end(ctx); 1280 } 1281 1282 static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_reg in1, 1283 TCGv_reg in2, unsigned cf) 1284 { 1285 TCGv_reg dest, sv; 1286 DisasCond cond; 1287 1288 dest = tcg_temp_new(); 1289 tcg_gen_sub_reg(dest, in1, in2); 1290 1291 /* Compute signed overflow if required. */ 1292 sv = NULL; 1293 if (cond_need_sv(cf >> 1)) { 1294 sv = do_sub_sv(ctx, dest, in1, in2); 1295 } 1296 1297 /* Form the condition for the compare. */ 1298 cond = do_sub_cond(cf, dest, in1, in2, sv); 1299 1300 /* Clear. */ 1301 tcg_gen_movi_reg(dest, 0); 1302 save_gpr(ctx, rt, dest); 1303 1304 /* Install the new nullification. */ 1305 cond_free(&ctx->null_cond); 1306 ctx->null_cond = cond; 1307 } 1308 1309 static void do_log(DisasContext *ctx, unsigned rt, TCGv_reg in1, 1310 TCGv_reg in2, unsigned cf, 1311 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg)) 1312 { 1313 TCGv_reg dest = dest_gpr(ctx, rt); 1314 1315 /* Perform the operation, and writeback. */ 1316 fn(dest, in1, in2); 1317 save_gpr(ctx, rt, dest); 1318 1319 /* Install the new nullification. */ 1320 cond_free(&ctx->null_cond); 1321 if (cf) { 1322 ctx->null_cond = do_log_cond(cf, dest); 1323 } 1324 } 1325 1326 static bool do_log_reg(DisasContext *ctx, arg_rrr_cf *a, 1327 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg)) 1328 { 1329 TCGv_reg tcg_r1, tcg_r2; 1330 1331 if (a->cf) { 1332 nullify_over(ctx); 1333 } 1334 tcg_r1 = load_gpr(ctx, a->r1); 1335 tcg_r2 = load_gpr(ctx, a->r2); 1336 do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, fn); 1337 return nullify_end(ctx); 1338 } 1339 1340 static void do_unit(DisasContext *ctx, unsigned rt, TCGv_reg in1, 1341 TCGv_reg in2, unsigned cf, bool is_tc, 1342 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg)) 1343 { 1344 TCGv_reg dest; 1345 DisasCond cond; 1346 1347 if (cf == 0) { 1348 dest = dest_gpr(ctx, rt); 1349 fn(dest, in1, in2); 1350 save_gpr(ctx, rt, dest); 1351 cond_free(&ctx->null_cond); 1352 } else { 1353 dest = tcg_temp_new(); 1354 fn(dest, in1, in2); 1355 1356 cond = do_unit_cond(cf, dest, in1, in2); 1357 1358 if (is_tc) { 1359 TCGv_reg tmp = tcg_temp_new(); 1360 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1); 1361 gen_helper_tcond(tcg_env, tmp); 1362 } 1363 save_gpr(ctx, rt, dest); 1364 1365 cond_free(&ctx->null_cond); 1366 ctx->null_cond = cond; 1367 } 1368 } 1369 1370 #ifndef CONFIG_USER_ONLY 1371 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space 1372 from the top 2 bits of the base register. There are a few system 1373 instructions that have a 3-bit space specifier, for which SR0 is 1374 not special. To handle this, pass ~SP. */ 1375 static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_reg base) 1376 { 1377 TCGv_ptr ptr; 1378 TCGv_reg tmp; 1379 TCGv_i64 spc; 1380 1381 if (sp != 0) { 1382 if (sp < 0) { 1383 sp = ~sp; 1384 } 1385 spc = get_temp_tl(ctx); 1386 load_spr(ctx, spc, sp); 1387 return spc; 1388 } 1389 if (ctx->tb_flags & TB_FLAG_SR_SAME) { 1390 return cpu_srH; 1391 } 1392 1393 ptr = tcg_temp_new_ptr(); 1394 tmp = tcg_temp_new(); 1395 spc = get_temp_tl(ctx); 1396 1397 tcg_gen_shri_reg(tmp, base, TARGET_REGISTER_BITS - 5); 1398 tcg_gen_andi_reg(tmp, tmp, 030); 1399 tcg_gen_trunc_reg_ptr(ptr, tmp); 1400 1401 tcg_gen_add_ptr(ptr, ptr, tcg_env); 1402 tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4])); 1403 1404 return spc; 1405 } 1406 #endif 1407 1408 static void form_gva(DisasContext *ctx, TCGv_tl *pgva, TCGv_reg *pofs, 1409 unsigned rb, unsigned rx, int scale, target_sreg disp, 1410 unsigned sp, int modify, bool is_phys) 1411 { 1412 TCGv_reg base = load_gpr(ctx, rb); 1413 TCGv_reg ofs; 1414 1415 /* Note that RX is mutually exclusive with DISP. */ 1416 if (rx) { 1417 ofs = get_temp(ctx); 1418 tcg_gen_shli_reg(ofs, cpu_gr[rx], scale); 1419 tcg_gen_add_reg(ofs, ofs, base); 1420 } else if (disp || modify) { 1421 ofs = get_temp(ctx); 1422 tcg_gen_addi_reg(ofs, base, disp); 1423 } else { 1424 ofs = base; 1425 } 1426 1427 *pofs = ofs; 1428 #ifdef CONFIG_USER_ONLY 1429 *pgva = (modify <= 0 ? ofs : base); 1430 #else 1431 TCGv_tl addr = get_temp_tl(ctx); 1432 tcg_gen_extu_reg_tl(addr, modify <= 0 ? ofs : base); 1433 if (ctx->tb_flags & PSW_W) { 1434 tcg_gen_andi_tl(addr, addr, 0x3fffffffffffffffull); 1435 } 1436 if (!is_phys) { 1437 tcg_gen_or_tl(addr, addr, space_select(ctx, sp, base)); 1438 } 1439 *pgva = addr; 1440 #endif 1441 } 1442 1443 /* Emit a memory load. The modify parameter should be 1444 * < 0 for pre-modify, 1445 * > 0 for post-modify, 1446 * = 0 for no base register update. 1447 */ 1448 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb, 1449 unsigned rx, int scale, target_sreg disp, 1450 unsigned sp, int modify, MemOp mop) 1451 { 1452 TCGv_reg ofs; 1453 TCGv_tl addr; 1454 1455 /* Caller uses nullify_over/nullify_end. */ 1456 assert(ctx->null_cond.c == TCG_COND_NEVER); 1457 1458 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify, 1459 ctx->mmu_idx == MMU_PHYS_IDX); 1460 tcg_gen_qemu_ld_reg(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx)); 1461 if (modify) { 1462 save_gpr(ctx, rb, ofs); 1463 } 1464 } 1465 1466 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb, 1467 unsigned rx, int scale, target_sreg disp, 1468 unsigned sp, int modify, MemOp mop) 1469 { 1470 TCGv_reg ofs; 1471 TCGv_tl addr; 1472 1473 /* Caller uses nullify_over/nullify_end. */ 1474 assert(ctx->null_cond.c == TCG_COND_NEVER); 1475 1476 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify, 1477 ctx->mmu_idx == MMU_PHYS_IDX); 1478 tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx)); 1479 if (modify) { 1480 save_gpr(ctx, rb, ofs); 1481 } 1482 } 1483 1484 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb, 1485 unsigned rx, int scale, target_sreg disp, 1486 unsigned sp, int modify, MemOp mop) 1487 { 1488 TCGv_reg ofs; 1489 TCGv_tl addr; 1490 1491 /* Caller uses nullify_over/nullify_end. */ 1492 assert(ctx->null_cond.c == TCG_COND_NEVER); 1493 1494 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify, 1495 ctx->mmu_idx == MMU_PHYS_IDX); 1496 tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx)); 1497 if (modify) { 1498 save_gpr(ctx, rb, ofs); 1499 } 1500 } 1501 1502 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb, 1503 unsigned rx, int scale, target_sreg disp, 1504 unsigned sp, int modify, MemOp mop) 1505 { 1506 TCGv_reg ofs; 1507 TCGv_tl addr; 1508 1509 /* Caller uses nullify_over/nullify_end. */ 1510 assert(ctx->null_cond.c == TCG_COND_NEVER); 1511 1512 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify, 1513 ctx->mmu_idx == MMU_PHYS_IDX); 1514 tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx)); 1515 if (modify) { 1516 save_gpr(ctx, rb, ofs); 1517 } 1518 } 1519 1520 #if TARGET_REGISTER_BITS == 64 1521 #define do_load_reg do_load_64 1522 #define do_store_reg do_store_64 1523 #else 1524 #define do_load_reg do_load_32 1525 #define do_store_reg do_store_32 1526 #endif 1527 1528 static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb, 1529 unsigned rx, int scale, target_sreg disp, 1530 unsigned sp, int modify, MemOp mop) 1531 { 1532 TCGv_reg dest; 1533 1534 nullify_over(ctx); 1535 1536 if (modify == 0) { 1537 /* No base register update. */ 1538 dest = dest_gpr(ctx, rt); 1539 } else { 1540 /* Make sure if RT == RB, we see the result of the load. */ 1541 dest = get_temp(ctx); 1542 } 1543 do_load_reg(ctx, dest, rb, rx, scale, disp, sp, modify, mop); 1544 save_gpr(ctx, rt, dest); 1545 1546 return nullify_end(ctx); 1547 } 1548 1549 static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb, 1550 unsigned rx, int scale, target_sreg disp, 1551 unsigned sp, int modify) 1552 { 1553 TCGv_i32 tmp; 1554 1555 nullify_over(ctx); 1556 1557 tmp = tcg_temp_new_i32(); 1558 do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL); 1559 save_frw_i32(rt, tmp); 1560 1561 if (rt == 0) { 1562 gen_helper_loaded_fr0(tcg_env); 1563 } 1564 1565 return nullify_end(ctx); 1566 } 1567 1568 static bool trans_fldw(DisasContext *ctx, arg_ldst *a) 1569 { 1570 return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0, 1571 a->disp, a->sp, a->m); 1572 } 1573 1574 static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb, 1575 unsigned rx, int scale, target_sreg disp, 1576 unsigned sp, int modify) 1577 { 1578 TCGv_i64 tmp; 1579 1580 nullify_over(ctx); 1581 1582 tmp = tcg_temp_new_i64(); 1583 do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ); 1584 save_frd(rt, tmp); 1585 1586 if (rt == 0) { 1587 gen_helper_loaded_fr0(tcg_env); 1588 } 1589 1590 return nullify_end(ctx); 1591 } 1592 1593 static bool trans_fldd(DisasContext *ctx, arg_ldst *a) 1594 { 1595 return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0, 1596 a->disp, a->sp, a->m); 1597 } 1598 1599 static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb, 1600 target_sreg disp, unsigned sp, 1601 int modify, MemOp mop) 1602 { 1603 nullify_over(ctx); 1604 do_store_reg(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop); 1605 return nullify_end(ctx); 1606 } 1607 1608 static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb, 1609 unsigned rx, int scale, target_sreg disp, 1610 unsigned sp, int modify) 1611 { 1612 TCGv_i32 tmp; 1613 1614 nullify_over(ctx); 1615 1616 tmp = load_frw_i32(rt); 1617 do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL); 1618 1619 return nullify_end(ctx); 1620 } 1621 1622 static bool trans_fstw(DisasContext *ctx, arg_ldst *a) 1623 { 1624 return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0, 1625 a->disp, a->sp, a->m); 1626 } 1627 1628 static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb, 1629 unsigned rx, int scale, target_sreg disp, 1630 unsigned sp, int modify) 1631 { 1632 TCGv_i64 tmp; 1633 1634 nullify_over(ctx); 1635 1636 tmp = load_frd(rt); 1637 do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ); 1638 1639 return nullify_end(ctx); 1640 } 1641 1642 static bool trans_fstd(DisasContext *ctx, arg_ldst *a) 1643 { 1644 return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0, 1645 a->disp, a->sp, a->m); 1646 } 1647 1648 static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra, 1649 void (*func)(TCGv_i32, TCGv_env, TCGv_i32)) 1650 { 1651 TCGv_i32 tmp; 1652 1653 nullify_over(ctx); 1654 tmp = load_frw0_i32(ra); 1655 1656 func(tmp, tcg_env, tmp); 1657 1658 save_frw_i32(rt, tmp); 1659 return nullify_end(ctx); 1660 } 1661 1662 static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra, 1663 void (*func)(TCGv_i32, TCGv_env, TCGv_i64)) 1664 { 1665 TCGv_i32 dst; 1666 TCGv_i64 src; 1667 1668 nullify_over(ctx); 1669 src = load_frd(ra); 1670 dst = tcg_temp_new_i32(); 1671 1672 func(dst, tcg_env, src); 1673 1674 save_frw_i32(rt, dst); 1675 return nullify_end(ctx); 1676 } 1677 1678 static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra, 1679 void (*func)(TCGv_i64, TCGv_env, TCGv_i64)) 1680 { 1681 TCGv_i64 tmp; 1682 1683 nullify_over(ctx); 1684 tmp = load_frd0(ra); 1685 1686 func(tmp, tcg_env, tmp); 1687 1688 save_frd(rt, tmp); 1689 return nullify_end(ctx); 1690 } 1691 1692 static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra, 1693 void (*func)(TCGv_i64, TCGv_env, TCGv_i32)) 1694 { 1695 TCGv_i32 src; 1696 TCGv_i64 dst; 1697 1698 nullify_over(ctx); 1699 src = load_frw0_i32(ra); 1700 dst = tcg_temp_new_i64(); 1701 1702 func(dst, tcg_env, src); 1703 1704 save_frd(rt, dst); 1705 return nullify_end(ctx); 1706 } 1707 1708 static bool do_fop_weww(DisasContext *ctx, unsigned rt, 1709 unsigned ra, unsigned rb, 1710 void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32)) 1711 { 1712 TCGv_i32 a, b; 1713 1714 nullify_over(ctx); 1715 a = load_frw0_i32(ra); 1716 b = load_frw0_i32(rb); 1717 1718 func(a, tcg_env, a, b); 1719 1720 save_frw_i32(rt, a); 1721 return nullify_end(ctx); 1722 } 1723 1724 static bool do_fop_dedd(DisasContext *ctx, unsigned rt, 1725 unsigned ra, unsigned rb, 1726 void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64)) 1727 { 1728 TCGv_i64 a, b; 1729 1730 nullify_over(ctx); 1731 a = load_frd0(ra); 1732 b = load_frd0(rb); 1733 1734 func(a, tcg_env, a, b); 1735 1736 save_frd(rt, a); 1737 return nullify_end(ctx); 1738 } 1739 1740 /* Emit an unconditional branch to a direct target, which may or may not 1741 have already had nullification handled. */ 1742 static bool do_dbranch(DisasContext *ctx, target_ureg dest, 1743 unsigned link, bool is_n) 1744 { 1745 if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) { 1746 if (link != 0) { 1747 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var); 1748 } 1749 ctx->iaoq_n = dest; 1750 if (is_n) { 1751 ctx->null_cond.c = TCG_COND_ALWAYS; 1752 } 1753 } else { 1754 nullify_over(ctx); 1755 1756 if (link != 0) { 1757 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var); 1758 } 1759 1760 if (is_n && use_nullify_skip(ctx)) { 1761 nullify_set(ctx, 0); 1762 gen_goto_tb(ctx, 0, dest, dest + 4); 1763 } else { 1764 nullify_set(ctx, is_n); 1765 gen_goto_tb(ctx, 0, ctx->iaoq_b, dest); 1766 } 1767 1768 nullify_end(ctx); 1769 1770 nullify_set(ctx, 0); 1771 gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n); 1772 ctx->base.is_jmp = DISAS_NORETURN; 1773 } 1774 return true; 1775 } 1776 1777 /* Emit a conditional branch to a direct target. If the branch itself 1778 is nullified, we should have already used nullify_over. */ 1779 static bool do_cbranch(DisasContext *ctx, target_sreg disp, bool is_n, 1780 DisasCond *cond) 1781 { 1782 target_ureg dest = iaoq_dest(ctx, disp); 1783 TCGLabel *taken = NULL; 1784 TCGCond c = cond->c; 1785 bool n; 1786 1787 assert(ctx->null_cond.c == TCG_COND_NEVER); 1788 1789 /* Handle TRUE and NEVER as direct branches. */ 1790 if (c == TCG_COND_ALWAYS) { 1791 return do_dbranch(ctx, dest, 0, is_n && disp >= 0); 1792 } 1793 if (c == TCG_COND_NEVER) { 1794 return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0); 1795 } 1796 1797 taken = gen_new_label(); 1798 tcg_gen_brcond_reg(c, cond->a0, cond->a1, taken); 1799 cond_free(cond); 1800 1801 /* Not taken: Condition not satisfied; nullify on backward branches. */ 1802 n = is_n && disp < 0; 1803 if (n && use_nullify_skip(ctx)) { 1804 nullify_set(ctx, 0); 1805 gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4); 1806 } else { 1807 if (!n && ctx->null_lab) { 1808 gen_set_label(ctx->null_lab); 1809 ctx->null_lab = NULL; 1810 } 1811 nullify_set(ctx, n); 1812 if (ctx->iaoq_n == -1) { 1813 /* The temporary iaoq_n_var died at the branch above. 1814 Regenerate it here instead of saving it. */ 1815 tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4); 1816 } 1817 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n); 1818 } 1819 1820 gen_set_label(taken); 1821 1822 /* Taken: Condition satisfied; nullify on forward branches. */ 1823 n = is_n && disp >= 0; 1824 if (n && use_nullify_skip(ctx)) { 1825 nullify_set(ctx, 0); 1826 gen_goto_tb(ctx, 1, dest, dest + 4); 1827 } else { 1828 nullify_set(ctx, n); 1829 gen_goto_tb(ctx, 1, ctx->iaoq_b, dest); 1830 } 1831 1832 /* Not taken: the branch itself was nullified. */ 1833 if (ctx->null_lab) { 1834 gen_set_label(ctx->null_lab); 1835 ctx->null_lab = NULL; 1836 ctx->base.is_jmp = DISAS_IAQ_N_STALE; 1837 } else { 1838 ctx->base.is_jmp = DISAS_NORETURN; 1839 } 1840 return true; 1841 } 1842 1843 /* Emit an unconditional branch to an indirect target. This handles 1844 nullification of the branch itself. */ 1845 static bool do_ibranch(DisasContext *ctx, TCGv_reg dest, 1846 unsigned link, bool is_n) 1847 { 1848 TCGv_reg a0, a1, next, tmp; 1849 TCGCond c; 1850 1851 assert(ctx->null_lab == NULL); 1852 1853 if (ctx->null_cond.c == TCG_COND_NEVER) { 1854 if (link != 0) { 1855 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var); 1856 } 1857 next = get_temp(ctx); 1858 tcg_gen_mov_reg(next, dest); 1859 if (is_n) { 1860 if (use_nullify_skip(ctx)) { 1861 tcg_gen_mov_reg(cpu_iaoq_f, next); 1862 tcg_gen_addi_reg(cpu_iaoq_b, next, 4); 1863 nullify_set(ctx, 0); 1864 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED; 1865 return true; 1866 } 1867 ctx->null_cond.c = TCG_COND_ALWAYS; 1868 } 1869 ctx->iaoq_n = -1; 1870 ctx->iaoq_n_var = next; 1871 } else if (is_n && use_nullify_skip(ctx)) { 1872 /* The (conditional) branch, B, nullifies the next insn, N, 1873 and we're allowed to skip execution N (no single-step or 1874 tracepoint in effect). Since the goto_ptr that we must use 1875 for the indirect branch consumes no special resources, we 1876 can (conditionally) skip B and continue execution. */ 1877 /* The use_nullify_skip test implies we have a known control path. */ 1878 tcg_debug_assert(ctx->iaoq_b != -1); 1879 tcg_debug_assert(ctx->iaoq_n != -1); 1880 1881 /* We do have to handle the non-local temporary, DEST, before 1882 branching. Since IOAQ_F is not really live at this point, we 1883 can simply store DEST optimistically. Similarly with IAOQ_B. */ 1884 tcg_gen_mov_reg(cpu_iaoq_f, dest); 1885 tcg_gen_addi_reg(cpu_iaoq_b, dest, 4); 1886 1887 nullify_over(ctx); 1888 if (link != 0) { 1889 tcg_gen_movi_reg(cpu_gr[link], ctx->iaoq_n); 1890 } 1891 tcg_gen_lookup_and_goto_ptr(); 1892 return nullify_end(ctx); 1893 } else { 1894 c = ctx->null_cond.c; 1895 a0 = ctx->null_cond.a0; 1896 a1 = ctx->null_cond.a1; 1897 1898 tmp = tcg_temp_new(); 1899 next = get_temp(ctx); 1900 1901 copy_iaoq_entry(tmp, ctx->iaoq_n, ctx->iaoq_n_var); 1902 tcg_gen_movcond_reg(c, next, a0, a1, tmp, dest); 1903 ctx->iaoq_n = -1; 1904 ctx->iaoq_n_var = next; 1905 1906 if (link != 0) { 1907 tcg_gen_movcond_reg(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp); 1908 } 1909 1910 if (is_n) { 1911 /* The branch nullifies the next insn, which means the state of N 1912 after the branch is the inverse of the state of N that applied 1913 to the branch. */ 1914 tcg_gen_setcond_reg(tcg_invert_cond(c), cpu_psw_n, a0, a1); 1915 cond_free(&ctx->null_cond); 1916 ctx->null_cond = cond_make_n(); 1917 ctx->psw_n_nonzero = true; 1918 } else { 1919 cond_free(&ctx->null_cond); 1920 } 1921 } 1922 return true; 1923 } 1924 1925 /* Implement 1926 * if (IAOQ_Front{30..31} < GR[b]{30..31}) 1927 * IAOQ_Next{30..31} ← GR[b]{30..31}; 1928 * else 1929 * IAOQ_Next{30..31} ← IAOQ_Front{30..31}; 1930 * which keeps the privilege level from being increased. 1931 */ 1932 static TCGv_reg do_ibranch_priv(DisasContext *ctx, TCGv_reg offset) 1933 { 1934 TCGv_reg dest; 1935 switch (ctx->privilege) { 1936 case 0: 1937 /* Privilege 0 is maximum and is allowed to decrease. */ 1938 return offset; 1939 case 3: 1940 /* Privilege 3 is minimum and is never allowed to increase. */ 1941 dest = get_temp(ctx); 1942 tcg_gen_ori_reg(dest, offset, 3); 1943 break; 1944 default: 1945 dest = get_temp(ctx); 1946 tcg_gen_andi_reg(dest, offset, -4); 1947 tcg_gen_ori_reg(dest, dest, ctx->privilege); 1948 tcg_gen_movcond_reg(TCG_COND_GTU, dest, dest, offset, dest, offset); 1949 break; 1950 } 1951 return dest; 1952 } 1953 1954 #ifdef CONFIG_USER_ONLY 1955 /* On Linux, page zero is normally marked execute only + gateway. 1956 Therefore normal read or write is supposed to fail, but specific 1957 offsets have kernel code mapped to raise permissions to implement 1958 system calls. Handling this via an explicit check here, rather 1959 in than the "be disp(sr2,r0)" instruction that probably sent us 1960 here, is the easiest way to handle the branch delay slot on the 1961 aforementioned BE. */ 1962 static void do_page_zero(DisasContext *ctx) 1963 { 1964 /* If by some means we get here with PSW[N]=1, that implies that 1965 the B,GATE instruction would be skipped, and we'd fault on the 1966 next insn within the privileged page. */ 1967 switch (ctx->null_cond.c) { 1968 case TCG_COND_NEVER: 1969 break; 1970 case TCG_COND_ALWAYS: 1971 tcg_gen_movi_reg(cpu_psw_n, 0); 1972 goto do_sigill; 1973 default: 1974 /* Since this is always the first (and only) insn within the 1975 TB, we should know the state of PSW[N] from TB->FLAGS. */ 1976 g_assert_not_reached(); 1977 } 1978 1979 /* Check that we didn't arrive here via some means that allowed 1980 non-sequential instruction execution. Normally the PSW[B] bit 1981 detects this by disallowing the B,GATE instruction to execute 1982 under such conditions. */ 1983 if (ctx->iaoq_b != ctx->iaoq_f + 4) { 1984 goto do_sigill; 1985 } 1986 1987 switch (ctx->iaoq_f & -4) { 1988 case 0x00: /* Null pointer call */ 1989 gen_excp_1(EXCP_IMP); 1990 ctx->base.is_jmp = DISAS_NORETURN; 1991 break; 1992 1993 case 0xb0: /* LWS */ 1994 gen_excp_1(EXCP_SYSCALL_LWS); 1995 ctx->base.is_jmp = DISAS_NORETURN; 1996 break; 1997 1998 case 0xe0: /* SET_THREAD_POINTER */ 1999 tcg_gen_st_reg(cpu_gr[26], tcg_env, offsetof(CPUHPPAState, cr[27])); 2000 tcg_gen_ori_reg(cpu_iaoq_f, cpu_gr[31], 3); 2001 tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4); 2002 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED; 2003 break; 2004 2005 case 0x100: /* SYSCALL */ 2006 gen_excp_1(EXCP_SYSCALL); 2007 ctx->base.is_jmp = DISAS_NORETURN; 2008 break; 2009 2010 default: 2011 do_sigill: 2012 gen_excp_1(EXCP_ILL); 2013 ctx->base.is_jmp = DISAS_NORETURN; 2014 break; 2015 } 2016 } 2017 #endif 2018 2019 static bool trans_nop(DisasContext *ctx, arg_nop *a) 2020 { 2021 cond_free(&ctx->null_cond); 2022 return true; 2023 } 2024 2025 static bool trans_break(DisasContext *ctx, arg_break *a) 2026 { 2027 return gen_excp_iir(ctx, EXCP_BREAK); 2028 } 2029 2030 static bool trans_sync(DisasContext *ctx, arg_sync *a) 2031 { 2032 /* No point in nullifying the memory barrier. */ 2033 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL); 2034 2035 cond_free(&ctx->null_cond); 2036 return true; 2037 } 2038 2039 static bool trans_mfia(DisasContext *ctx, arg_mfia *a) 2040 { 2041 unsigned rt = a->t; 2042 TCGv_reg tmp = dest_gpr(ctx, rt); 2043 tcg_gen_movi_reg(tmp, ctx->iaoq_f); 2044 save_gpr(ctx, rt, tmp); 2045 2046 cond_free(&ctx->null_cond); 2047 return true; 2048 } 2049 2050 static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a) 2051 { 2052 unsigned rt = a->t; 2053 unsigned rs = a->sp; 2054 TCGv_i64 t0 = tcg_temp_new_i64(); 2055 TCGv_reg t1 = tcg_temp_new(); 2056 2057 load_spr(ctx, t0, rs); 2058 tcg_gen_shri_i64(t0, t0, 32); 2059 tcg_gen_trunc_i64_reg(t1, t0); 2060 2061 save_gpr(ctx, rt, t1); 2062 2063 cond_free(&ctx->null_cond); 2064 return true; 2065 } 2066 2067 static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a) 2068 { 2069 unsigned rt = a->t; 2070 unsigned ctl = a->r; 2071 TCGv_reg tmp; 2072 2073 switch (ctl) { 2074 case CR_SAR: 2075 #ifdef TARGET_HPPA64 2076 if (a->e == 0) { 2077 /* MFSAR without ,W masks low 5 bits. */ 2078 tmp = dest_gpr(ctx, rt); 2079 tcg_gen_andi_reg(tmp, cpu_sar, 31); 2080 save_gpr(ctx, rt, tmp); 2081 goto done; 2082 } 2083 #endif 2084 save_gpr(ctx, rt, cpu_sar); 2085 goto done; 2086 case CR_IT: /* Interval Timer */ 2087 /* FIXME: Respect PSW_S bit. */ 2088 nullify_over(ctx); 2089 tmp = dest_gpr(ctx, rt); 2090 if (translator_io_start(&ctx->base)) { 2091 gen_helper_read_interval_timer(tmp); 2092 ctx->base.is_jmp = DISAS_IAQ_N_STALE; 2093 } else { 2094 gen_helper_read_interval_timer(tmp); 2095 } 2096 save_gpr(ctx, rt, tmp); 2097 return nullify_end(ctx); 2098 case 26: 2099 case 27: 2100 break; 2101 default: 2102 /* All other control registers are privileged. */ 2103 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG); 2104 break; 2105 } 2106 2107 tmp = get_temp(ctx); 2108 tcg_gen_ld_reg(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl])); 2109 save_gpr(ctx, rt, tmp); 2110 2111 done: 2112 cond_free(&ctx->null_cond); 2113 return true; 2114 } 2115 2116 static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a) 2117 { 2118 unsigned rr = a->r; 2119 unsigned rs = a->sp; 2120 TCGv_i64 t64; 2121 2122 if (rs >= 5) { 2123 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG); 2124 } 2125 nullify_over(ctx); 2126 2127 t64 = tcg_temp_new_i64(); 2128 tcg_gen_extu_reg_i64(t64, load_gpr(ctx, rr)); 2129 tcg_gen_shli_i64(t64, t64, 32); 2130 2131 if (rs >= 4) { 2132 tcg_gen_st_i64(t64, tcg_env, offsetof(CPUHPPAState, sr[rs])); 2133 ctx->tb_flags &= ~TB_FLAG_SR_SAME; 2134 } else { 2135 tcg_gen_mov_i64(cpu_sr[rs], t64); 2136 } 2137 2138 return nullify_end(ctx); 2139 } 2140 2141 static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a) 2142 { 2143 unsigned ctl = a->t; 2144 TCGv_reg reg; 2145 TCGv_reg tmp; 2146 2147 if (ctl == CR_SAR) { 2148 reg = load_gpr(ctx, a->r); 2149 tmp = tcg_temp_new(); 2150 tcg_gen_andi_reg(tmp, reg, TARGET_REGISTER_BITS - 1); 2151 save_or_nullify(ctx, cpu_sar, tmp); 2152 2153 cond_free(&ctx->null_cond); 2154 return true; 2155 } 2156 2157 /* All other control registers are privileged or read-only. */ 2158 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG); 2159 2160 #ifndef CONFIG_USER_ONLY 2161 nullify_over(ctx); 2162 reg = load_gpr(ctx, a->r); 2163 2164 switch (ctl) { 2165 case CR_IT: 2166 gen_helper_write_interval_timer(tcg_env, reg); 2167 break; 2168 case CR_EIRR: 2169 gen_helper_write_eirr(tcg_env, reg); 2170 break; 2171 case CR_EIEM: 2172 gen_helper_write_eiem(tcg_env, reg); 2173 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT; 2174 break; 2175 2176 case CR_IIASQ: 2177 case CR_IIAOQ: 2178 /* FIXME: Respect PSW_Q bit */ 2179 /* The write advances the queue and stores to the back element. */ 2180 tmp = get_temp(ctx); 2181 tcg_gen_ld_reg(tmp, tcg_env, 2182 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ])); 2183 tcg_gen_st_reg(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl])); 2184 tcg_gen_st_reg(reg, tcg_env, 2185 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ])); 2186 break; 2187 2188 case CR_PID1: 2189 case CR_PID2: 2190 case CR_PID3: 2191 case CR_PID4: 2192 tcg_gen_st_reg(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl])); 2193 #ifndef CONFIG_USER_ONLY 2194 gen_helper_change_prot_id(tcg_env); 2195 #endif 2196 break; 2197 2198 default: 2199 tcg_gen_st_reg(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl])); 2200 break; 2201 } 2202 return nullify_end(ctx); 2203 #endif 2204 } 2205 2206 static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a) 2207 { 2208 TCGv_reg tmp = tcg_temp_new(); 2209 2210 tcg_gen_not_reg(tmp, load_gpr(ctx, a->r)); 2211 tcg_gen_andi_reg(tmp, tmp, TARGET_REGISTER_BITS - 1); 2212 save_or_nullify(ctx, cpu_sar, tmp); 2213 2214 cond_free(&ctx->null_cond); 2215 return true; 2216 } 2217 2218 static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a) 2219 { 2220 TCGv_reg dest = dest_gpr(ctx, a->t); 2221 2222 #ifdef CONFIG_USER_ONLY 2223 /* We don't implement space registers in user mode. */ 2224 tcg_gen_movi_reg(dest, 0); 2225 #else 2226 TCGv_i64 t0 = tcg_temp_new_i64(); 2227 2228 tcg_gen_mov_i64(t0, space_select(ctx, a->sp, load_gpr(ctx, a->b))); 2229 tcg_gen_shri_i64(t0, t0, 32); 2230 tcg_gen_trunc_i64_reg(dest, t0); 2231 #endif 2232 save_gpr(ctx, a->t, dest); 2233 2234 cond_free(&ctx->null_cond); 2235 return true; 2236 } 2237 2238 static bool trans_rsm(DisasContext *ctx, arg_rsm *a) 2239 { 2240 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2241 #ifndef CONFIG_USER_ONLY 2242 TCGv_reg tmp; 2243 2244 nullify_over(ctx); 2245 2246 tmp = get_temp(ctx); 2247 tcg_gen_ld_reg(tmp, tcg_env, offsetof(CPUHPPAState, psw)); 2248 tcg_gen_andi_reg(tmp, tmp, ~a->i); 2249 gen_helper_swap_system_mask(tmp, tcg_env, tmp); 2250 save_gpr(ctx, a->t, tmp); 2251 2252 /* Exit the TB to recognize new interrupts, e.g. PSW_M. */ 2253 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT; 2254 return nullify_end(ctx); 2255 #endif 2256 } 2257 2258 static bool trans_ssm(DisasContext *ctx, arg_ssm *a) 2259 { 2260 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2261 #ifndef CONFIG_USER_ONLY 2262 TCGv_reg tmp; 2263 2264 nullify_over(ctx); 2265 2266 tmp = get_temp(ctx); 2267 tcg_gen_ld_reg(tmp, tcg_env, offsetof(CPUHPPAState, psw)); 2268 tcg_gen_ori_reg(tmp, tmp, a->i); 2269 gen_helper_swap_system_mask(tmp, tcg_env, tmp); 2270 save_gpr(ctx, a->t, tmp); 2271 2272 /* Exit the TB to recognize new interrupts, e.g. PSW_I. */ 2273 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT; 2274 return nullify_end(ctx); 2275 #endif 2276 } 2277 2278 static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a) 2279 { 2280 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2281 #ifndef CONFIG_USER_ONLY 2282 TCGv_reg tmp, reg; 2283 nullify_over(ctx); 2284 2285 reg = load_gpr(ctx, a->r); 2286 tmp = get_temp(ctx); 2287 gen_helper_swap_system_mask(tmp, tcg_env, reg); 2288 2289 /* Exit the TB to recognize new interrupts. */ 2290 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT; 2291 return nullify_end(ctx); 2292 #endif 2293 } 2294 2295 static bool do_rfi(DisasContext *ctx, bool rfi_r) 2296 { 2297 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2298 #ifndef CONFIG_USER_ONLY 2299 nullify_over(ctx); 2300 2301 if (rfi_r) { 2302 gen_helper_rfi_r(tcg_env); 2303 } else { 2304 gen_helper_rfi(tcg_env); 2305 } 2306 /* Exit the TB to recognize new interrupts. */ 2307 tcg_gen_exit_tb(NULL, 0); 2308 ctx->base.is_jmp = DISAS_NORETURN; 2309 2310 return nullify_end(ctx); 2311 #endif 2312 } 2313 2314 static bool trans_rfi(DisasContext *ctx, arg_rfi *a) 2315 { 2316 return do_rfi(ctx, false); 2317 } 2318 2319 static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a) 2320 { 2321 return do_rfi(ctx, true); 2322 } 2323 2324 static bool trans_halt(DisasContext *ctx, arg_halt *a) 2325 { 2326 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2327 #ifndef CONFIG_USER_ONLY 2328 nullify_over(ctx); 2329 gen_helper_halt(tcg_env); 2330 ctx->base.is_jmp = DISAS_NORETURN; 2331 return nullify_end(ctx); 2332 #endif 2333 } 2334 2335 static bool trans_reset(DisasContext *ctx, arg_reset *a) 2336 { 2337 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2338 #ifndef CONFIG_USER_ONLY 2339 nullify_over(ctx); 2340 gen_helper_reset(tcg_env); 2341 ctx->base.is_jmp = DISAS_NORETURN; 2342 return nullify_end(ctx); 2343 #endif 2344 } 2345 2346 static bool trans_getshadowregs(DisasContext *ctx, arg_getshadowregs *a) 2347 { 2348 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2349 #ifndef CONFIG_USER_ONLY 2350 nullify_over(ctx); 2351 gen_helper_getshadowregs(tcg_env); 2352 return nullify_end(ctx); 2353 #endif 2354 } 2355 2356 static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a) 2357 { 2358 if (a->m) { 2359 TCGv_reg dest = dest_gpr(ctx, a->b); 2360 TCGv_reg src1 = load_gpr(ctx, a->b); 2361 TCGv_reg src2 = load_gpr(ctx, a->x); 2362 2363 /* The only thing we need to do is the base register modification. */ 2364 tcg_gen_add_reg(dest, src1, src2); 2365 save_gpr(ctx, a->b, dest); 2366 } 2367 cond_free(&ctx->null_cond); 2368 return true; 2369 } 2370 2371 static bool trans_probe(DisasContext *ctx, arg_probe *a) 2372 { 2373 TCGv_reg dest, ofs; 2374 TCGv_i32 level, want; 2375 TCGv_tl addr; 2376 2377 nullify_over(ctx); 2378 2379 dest = dest_gpr(ctx, a->t); 2380 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false); 2381 2382 if (a->imm) { 2383 level = tcg_constant_i32(a->ri); 2384 } else { 2385 level = tcg_temp_new_i32(); 2386 tcg_gen_trunc_reg_i32(level, load_gpr(ctx, a->ri)); 2387 tcg_gen_andi_i32(level, level, 3); 2388 } 2389 want = tcg_constant_i32(a->write ? PAGE_WRITE : PAGE_READ); 2390 2391 gen_helper_probe(dest, tcg_env, addr, level, want); 2392 2393 save_gpr(ctx, a->t, dest); 2394 return nullify_end(ctx); 2395 } 2396 2397 static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a) 2398 { 2399 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2400 #ifndef CONFIG_USER_ONLY 2401 TCGv_tl addr; 2402 TCGv_reg ofs, reg; 2403 2404 nullify_over(ctx); 2405 2406 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false); 2407 reg = load_gpr(ctx, a->r); 2408 if (a->addr) { 2409 gen_helper_itlba(tcg_env, addr, reg); 2410 } else { 2411 gen_helper_itlbp(tcg_env, addr, reg); 2412 } 2413 2414 /* Exit TB for TLB change if mmu is enabled. */ 2415 if (ctx->tb_flags & PSW_C) { 2416 ctx->base.is_jmp = DISAS_IAQ_N_STALE; 2417 } 2418 return nullify_end(ctx); 2419 #endif 2420 } 2421 2422 static bool trans_pxtlbx(DisasContext *ctx, arg_pxtlbx *a) 2423 { 2424 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2425 #ifndef CONFIG_USER_ONLY 2426 TCGv_tl addr; 2427 TCGv_reg ofs; 2428 2429 nullify_over(ctx); 2430 2431 form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false); 2432 if (a->m) { 2433 save_gpr(ctx, a->b, ofs); 2434 } 2435 if (a->local) { 2436 gen_helper_ptlbe(tcg_env); 2437 } else { 2438 gen_helper_ptlb(tcg_env, addr); 2439 } 2440 2441 /* Exit TB for TLB change if mmu is enabled. */ 2442 if (ctx->tb_flags & PSW_C) { 2443 ctx->base.is_jmp = DISAS_IAQ_N_STALE; 2444 } 2445 return nullify_end(ctx); 2446 #endif 2447 } 2448 2449 /* 2450 * Implement the pcxl and pcxl2 Fast TLB Insert instructions. 2451 * See 2452 * https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf 2453 * page 13-9 (195/206) 2454 */ 2455 static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a) 2456 { 2457 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2458 #ifndef CONFIG_USER_ONLY 2459 TCGv_tl addr, atl, stl; 2460 TCGv_reg reg; 2461 2462 nullify_over(ctx); 2463 2464 /* 2465 * FIXME: 2466 * if (not (pcxl or pcxl2)) 2467 * return gen_illegal(ctx); 2468 * 2469 * Note for future: these are 32-bit systems; no hppa64. 2470 */ 2471 2472 atl = tcg_temp_new_tl(); 2473 stl = tcg_temp_new_tl(); 2474 addr = tcg_temp_new_tl(); 2475 2476 tcg_gen_ld32u_i64(stl, tcg_env, 2477 a->data ? offsetof(CPUHPPAState, cr[CR_ISR]) 2478 : offsetof(CPUHPPAState, cr[CR_IIASQ])); 2479 tcg_gen_ld32u_i64(atl, tcg_env, 2480 a->data ? offsetof(CPUHPPAState, cr[CR_IOR]) 2481 : offsetof(CPUHPPAState, cr[CR_IIAOQ])); 2482 tcg_gen_shli_i64(stl, stl, 32); 2483 tcg_gen_or_tl(addr, atl, stl); 2484 2485 reg = load_gpr(ctx, a->r); 2486 if (a->addr) { 2487 gen_helper_itlba(tcg_env, addr, reg); 2488 } else { 2489 gen_helper_itlbp(tcg_env, addr, reg); 2490 } 2491 2492 /* Exit TB for TLB change if mmu is enabled. */ 2493 if (ctx->tb_flags & PSW_C) { 2494 ctx->base.is_jmp = DISAS_IAQ_N_STALE; 2495 } 2496 return nullify_end(ctx); 2497 #endif 2498 } 2499 2500 static bool trans_lpa(DisasContext *ctx, arg_ldst *a) 2501 { 2502 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2503 #ifndef CONFIG_USER_ONLY 2504 TCGv_tl vaddr; 2505 TCGv_reg ofs, paddr; 2506 2507 nullify_over(ctx); 2508 2509 form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false); 2510 2511 paddr = tcg_temp_new(); 2512 gen_helper_lpa(paddr, tcg_env, vaddr); 2513 2514 /* Note that physical address result overrides base modification. */ 2515 if (a->m) { 2516 save_gpr(ctx, a->b, ofs); 2517 } 2518 save_gpr(ctx, a->t, paddr); 2519 2520 return nullify_end(ctx); 2521 #endif 2522 } 2523 2524 static bool trans_lci(DisasContext *ctx, arg_lci *a) 2525 { 2526 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2527 2528 /* The Coherence Index is an implementation-defined function of the 2529 physical address. Two addresses with the same CI have a coherent 2530 view of the cache. Our implementation is to return 0 for all, 2531 since the entire address space is coherent. */ 2532 save_gpr(ctx, a->t, tcg_constant_reg(0)); 2533 2534 cond_free(&ctx->null_cond); 2535 return true; 2536 } 2537 2538 static bool trans_add(DisasContext *ctx, arg_rrr_cf_sh *a) 2539 { 2540 return do_add_reg(ctx, a, false, false, false, false); 2541 } 2542 2543 static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_sh *a) 2544 { 2545 return do_add_reg(ctx, a, true, false, false, false); 2546 } 2547 2548 static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_sh *a) 2549 { 2550 return do_add_reg(ctx, a, false, true, false, false); 2551 } 2552 2553 static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_sh *a) 2554 { 2555 return do_add_reg(ctx, a, false, false, false, true); 2556 } 2557 2558 static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_sh *a) 2559 { 2560 return do_add_reg(ctx, a, false, true, false, true); 2561 } 2562 2563 static bool trans_sub(DisasContext *ctx, arg_rrr_cf *a) 2564 { 2565 return do_sub_reg(ctx, a, false, false, false); 2566 } 2567 2568 static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf *a) 2569 { 2570 return do_sub_reg(ctx, a, true, false, false); 2571 } 2572 2573 static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf *a) 2574 { 2575 return do_sub_reg(ctx, a, false, false, true); 2576 } 2577 2578 static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf *a) 2579 { 2580 return do_sub_reg(ctx, a, true, false, true); 2581 } 2582 2583 static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf *a) 2584 { 2585 return do_sub_reg(ctx, a, false, true, false); 2586 } 2587 2588 static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf *a) 2589 { 2590 return do_sub_reg(ctx, a, true, true, false); 2591 } 2592 2593 static bool trans_andcm(DisasContext *ctx, arg_rrr_cf *a) 2594 { 2595 return do_log_reg(ctx, a, tcg_gen_andc_reg); 2596 } 2597 2598 static bool trans_and(DisasContext *ctx, arg_rrr_cf *a) 2599 { 2600 return do_log_reg(ctx, a, tcg_gen_and_reg); 2601 } 2602 2603 static bool trans_or(DisasContext *ctx, arg_rrr_cf *a) 2604 { 2605 if (a->cf == 0) { 2606 unsigned r2 = a->r2; 2607 unsigned r1 = a->r1; 2608 unsigned rt = a->t; 2609 2610 if (rt == 0) { /* NOP */ 2611 cond_free(&ctx->null_cond); 2612 return true; 2613 } 2614 if (r2 == 0) { /* COPY */ 2615 if (r1 == 0) { 2616 TCGv_reg dest = dest_gpr(ctx, rt); 2617 tcg_gen_movi_reg(dest, 0); 2618 save_gpr(ctx, rt, dest); 2619 } else { 2620 save_gpr(ctx, rt, cpu_gr[r1]); 2621 } 2622 cond_free(&ctx->null_cond); 2623 return true; 2624 } 2625 #ifndef CONFIG_USER_ONLY 2626 /* These are QEMU extensions and are nops in the real architecture: 2627 * 2628 * or %r10,%r10,%r10 -- idle loop; wait for interrupt 2629 * or %r31,%r31,%r31 -- death loop; offline cpu 2630 * currently implemented as idle. 2631 */ 2632 if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */ 2633 /* No need to check for supervisor, as userland can only pause 2634 until the next timer interrupt. */ 2635 nullify_over(ctx); 2636 2637 /* Advance the instruction queue. */ 2638 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b); 2639 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var); 2640 nullify_set(ctx, 0); 2641 2642 /* Tell the qemu main loop to halt until this cpu has work. */ 2643 tcg_gen_st_i32(tcg_constant_i32(1), tcg_env, 2644 offsetof(CPUState, halted) - offsetof(HPPACPU, env)); 2645 gen_excp_1(EXCP_HALTED); 2646 ctx->base.is_jmp = DISAS_NORETURN; 2647 2648 return nullify_end(ctx); 2649 } 2650 #endif 2651 } 2652 return do_log_reg(ctx, a, tcg_gen_or_reg); 2653 } 2654 2655 static bool trans_xor(DisasContext *ctx, arg_rrr_cf *a) 2656 { 2657 return do_log_reg(ctx, a, tcg_gen_xor_reg); 2658 } 2659 2660 static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf *a) 2661 { 2662 TCGv_reg tcg_r1, tcg_r2; 2663 2664 if (a->cf) { 2665 nullify_over(ctx); 2666 } 2667 tcg_r1 = load_gpr(ctx, a->r1); 2668 tcg_r2 = load_gpr(ctx, a->r2); 2669 do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf); 2670 return nullify_end(ctx); 2671 } 2672 2673 static bool trans_uxor(DisasContext *ctx, arg_rrr_cf *a) 2674 { 2675 TCGv_reg tcg_r1, tcg_r2; 2676 2677 if (a->cf) { 2678 nullify_over(ctx); 2679 } 2680 tcg_r1 = load_gpr(ctx, a->r1); 2681 tcg_r2 = load_gpr(ctx, a->r2); 2682 do_unit(ctx, a->t, tcg_r1, tcg_r2, a->cf, false, tcg_gen_xor_reg); 2683 return nullify_end(ctx); 2684 } 2685 2686 static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf *a, bool is_tc) 2687 { 2688 TCGv_reg tcg_r1, tcg_r2, tmp; 2689 2690 if (a->cf) { 2691 nullify_over(ctx); 2692 } 2693 tcg_r1 = load_gpr(ctx, a->r1); 2694 tcg_r2 = load_gpr(ctx, a->r2); 2695 tmp = get_temp(ctx); 2696 tcg_gen_not_reg(tmp, tcg_r2); 2697 do_unit(ctx, a->t, tcg_r1, tmp, a->cf, is_tc, tcg_gen_add_reg); 2698 return nullify_end(ctx); 2699 } 2700 2701 static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf *a) 2702 { 2703 return do_uaddcm(ctx, a, false); 2704 } 2705 2706 static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf *a) 2707 { 2708 return do_uaddcm(ctx, a, true); 2709 } 2710 2711 static bool do_dcor(DisasContext *ctx, arg_rr_cf *a, bool is_i) 2712 { 2713 TCGv_reg tmp; 2714 2715 nullify_over(ctx); 2716 2717 tmp = get_temp(ctx); 2718 tcg_gen_shri_reg(tmp, cpu_psw_cb, 3); 2719 if (!is_i) { 2720 tcg_gen_not_reg(tmp, tmp); 2721 } 2722 tcg_gen_andi_reg(tmp, tmp, 0x11111111); 2723 tcg_gen_muli_reg(tmp, tmp, 6); 2724 do_unit(ctx, a->t, load_gpr(ctx, a->r), tmp, a->cf, false, 2725 is_i ? tcg_gen_add_reg : tcg_gen_sub_reg); 2726 return nullify_end(ctx); 2727 } 2728 2729 static bool trans_dcor(DisasContext *ctx, arg_rr_cf *a) 2730 { 2731 return do_dcor(ctx, a, false); 2732 } 2733 2734 static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf *a) 2735 { 2736 return do_dcor(ctx, a, true); 2737 } 2738 2739 static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a) 2740 { 2741 TCGv_reg dest, add1, add2, addc, zero, in1, in2; 2742 2743 nullify_over(ctx); 2744 2745 in1 = load_gpr(ctx, a->r1); 2746 in2 = load_gpr(ctx, a->r2); 2747 2748 add1 = tcg_temp_new(); 2749 add2 = tcg_temp_new(); 2750 addc = tcg_temp_new(); 2751 dest = tcg_temp_new(); 2752 zero = tcg_constant_reg(0); 2753 2754 /* Form R1 << 1 | PSW[CB]{8}. */ 2755 tcg_gen_add_reg(add1, in1, in1); 2756 tcg_gen_add_reg(add1, add1, cpu_psw_cb_msb); 2757 2758 /* Add or subtract R2, depending on PSW[V]. Proper computation of 2759 carry{8} requires that we subtract via + ~R2 + 1, as described in 2760 the manual. By extracting and masking V, we can produce the 2761 proper inputs to the addition without movcond. */ 2762 tcg_gen_sari_reg(addc, cpu_psw_v, TARGET_REGISTER_BITS - 1); 2763 tcg_gen_xor_reg(add2, in2, addc); 2764 tcg_gen_andi_reg(addc, addc, 1); 2765 /* ??? This is only correct for 32-bit. */ 2766 tcg_gen_add2_i32(dest, cpu_psw_cb_msb, add1, zero, add2, zero); 2767 tcg_gen_add2_i32(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero); 2768 2769 /* Write back the result register. */ 2770 save_gpr(ctx, a->t, dest); 2771 2772 /* Write back PSW[CB]. */ 2773 tcg_gen_xor_reg(cpu_psw_cb, add1, add2); 2774 tcg_gen_xor_reg(cpu_psw_cb, cpu_psw_cb, dest); 2775 2776 /* Write back PSW[V] for the division step. */ 2777 tcg_gen_neg_reg(cpu_psw_v, cpu_psw_cb_msb); 2778 tcg_gen_xor_reg(cpu_psw_v, cpu_psw_v, in2); 2779 2780 /* Install the new nullification. */ 2781 if (a->cf) { 2782 TCGv_reg sv = NULL; 2783 if (cond_need_sv(a->cf >> 1)) { 2784 /* ??? The lshift is supposed to contribute to overflow. */ 2785 sv = do_add_sv(ctx, dest, add1, add2); 2786 } 2787 ctx->null_cond = do_cond(a->cf, dest, cpu_psw_cb_msb, sv); 2788 } 2789 2790 return nullify_end(ctx); 2791 } 2792 2793 static bool trans_addi(DisasContext *ctx, arg_rri_cf *a) 2794 { 2795 return do_add_imm(ctx, a, false, false); 2796 } 2797 2798 static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a) 2799 { 2800 return do_add_imm(ctx, a, true, false); 2801 } 2802 2803 static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a) 2804 { 2805 return do_add_imm(ctx, a, false, true); 2806 } 2807 2808 static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a) 2809 { 2810 return do_add_imm(ctx, a, true, true); 2811 } 2812 2813 static bool trans_subi(DisasContext *ctx, arg_rri_cf *a) 2814 { 2815 return do_sub_imm(ctx, a, false); 2816 } 2817 2818 static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a) 2819 { 2820 return do_sub_imm(ctx, a, true); 2821 } 2822 2823 static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf *a) 2824 { 2825 TCGv_reg tcg_im, tcg_r2; 2826 2827 if (a->cf) { 2828 nullify_over(ctx); 2829 } 2830 2831 tcg_im = load_const(ctx, a->i); 2832 tcg_r2 = load_gpr(ctx, a->r); 2833 do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf); 2834 2835 return nullify_end(ctx); 2836 } 2837 2838 static bool trans_ld(DisasContext *ctx, arg_ldst *a) 2839 { 2840 if (unlikely(TARGET_REGISTER_BITS == 32 && a->size > MO_32)) { 2841 return gen_illegal(ctx); 2842 } else { 2843 return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0, 2844 a->disp, a->sp, a->m, a->size | MO_TE); 2845 } 2846 } 2847 2848 static bool trans_st(DisasContext *ctx, arg_ldst *a) 2849 { 2850 assert(a->x == 0 && a->scale == 0); 2851 if (unlikely(TARGET_REGISTER_BITS == 32 && a->size > MO_32)) { 2852 return gen_illegal(ctx); 2853 } else { 2854 return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE); 2855 } 2856 } 2857 2858 static bool trans_ldc(DisasContext *ctx, arg_ldst *a) 2859 { 2860 MemOp mop = MO_TE | MO_ALIGN | a->size; 2861 TCGv_reg zero, dest, ofs; 2862 TCGv_tl addr; 2863 2864 nullify_over(ctx); 2865 2866 if (a->m) { 2867 /* Base register modification. Make sure if RT == RB, 2868 we see the result of the load. */ 2869 dest = get_temp(ctx); 2870 } else { 2871 dest = dest_gpr(ctx, a->t); 2872 } 2873 2874 form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? a->size : 0, 2875 a->disp, a->sp, a->m, ctx->mmu_idx == MMU_PHYS_IDX); 2876 2877 /* 2878 * For hppa1.1, LDCW is undefined unless aligned mod 16. 2879 * However actual hardware succeeds with aligned mod 4. 2880 * Detect this case and log a GUEST_ERROR. 2881 * 2882 * TODO: HPPA64 relaxes the over-alignment requirement 2883 * with the ,co completer. 2884 */ 2885 gen_helper_ldc_check(addr); 2886 2887 zero = tcg_constant_reg(0); 2888 tcg_gen_atomic_xchg_reg(dest, addr, zero, ctx->mmu_idx, mop); 2889 2890 if (a->m) { 2891 save_gpr(ctx, a->b, ofs); 2892 } 2893 save_gpr(ctx, a->t, dest); 2894 2895 return nullify_end(ctx); 2896 } 2897 2898 static bool trans_stby(DisasContext *ctx, arg_stby *a) 2899 { 2900 TCGv_reg ofs, val; 2901 TCGv_tl addr; 2902 2903 nullify_over(ctx); 2904 2905 form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m, 2906 ctx->mmu_idx == MMU_PHYS_IDX); 2907 val = load_gpr(ctx, a->r); 2908 if (a->a) { 2909 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { 2910 gen_helper_stby_e_parallel(tcg_env, addr, val); 2911 } else { 2912 gen_helper_stby_e(tcg_env, addr, val); 2913 } 2914 } else { 2915 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { 2916 gen_helper_stby_b_parallel(tcg_env, addr, val); 2917 } else { 2918 gen_helper_stby_b(tcg_env, addr, val); 2919 } 2920 } 2921 if (a->m) { 2922 tcg_gen_andi_reg(ofs, ofs, ~3); 2923 save_gpr(ctx, a->b, ofs); 2924 } 2925 2926 return nullify_end(ctx); 2927 } 2928 2929 static bool trans_lda(DisasContext *ctx, arg_ldst *a) 2930 { 2931 int hold_mmu_idx = ctx->mmu_idx; 2932 2933 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2934 ctx->mmu_idx = MMU_PHYS_IDX; 2935 trans_ld(ctx, a); 2936 ctx->mmu_idx = hold_mmu_idx; 2937 return true; 2938 } 2939 2940 static bool trans_sta(DisasContext *ctx, arg_ldst *a) 2941 { 2942 int hold_mmu_idx = ctx->mmu_idx; 2943 2944 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2945 ctx->mmu_idx = MMU_PHYS_IDX; 2946 trans_st(ctx, a); 2947 ctx->mmu_idx = hold_mmu_idx; 2948 return true; 2949 } 2950 2951 static bool trans_ldil(DisasContext *ctx, arg_ldil *a) 2952 { 2953 TCGv_reg tcg_rt = dest_gpr(ctx, a->t); 2954 2955 tcg_gen_movi_reg(tcg_rt, a->i); 2956 save_gpr(ctx, a->t, tcg_rt); 2957 cond_free(&ctx->null_cond); 2958 return true; 2959 } 2960 2961 static bool trans_addil(DisasContext *ctx, arg_addil *a) 2962 { 2963 TCGv_reg tcg_rt = load_gpr(ctx, a->r); 2964 TCGv_reg tcg_r1 = dest_gpr(ctx, 1); 2965 2966 tcg_gen_addi_reg(tcg_r1, tcg_rt, a->i); 2967 save_gpr(ctx, 1, tcg_r1); 2968 cond_free(&ctx->null_cond); 2969 return true; 2970 } 2971 2972 static bool trans_ldo(DisasContext *ctx, arg_ldo *a) 2973 { 2974 TCGv_reg tcg_rt = dest_gpr(ctx, a->t); 2975 2976 /* Special case rb == 0, for the LDI pseudo-op. 2977 The COPY pseudo-op is handled for free within tcg_gen_addi_tl. */ 2978 if (a->b == 0) { 2979 tcg_gen_movi_reg(tcg_rt, a->i); 2980 } else { 2981 tcg_gen_addi_reg(tcg_rt, cpu_gr[a->b], a->i); 2982 } 2983 save_gpr(ctx, a->t, tcg_rt); 2984 cond_free(&ctx->null_cond); 2985 return true; 2986 } 2987 2988 static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_reg in1, 2989 unsigned c, unsigned f, unsigned n, int disp) 2990 { 2991 TCGv_reg dest, in2, sv; 2992 DisasCond cond; 2993 2994 in2 = load_gpr(ctx, r); 2995 dest = get_temp(ctx); 2996 2997 tcg_gen_sub_reg(dest, in1, in2); 2998 2999 sv = NULL; 3000 if (cond_need_sv(c)) { 3001 sv = do_sub_sv(ctx, dest, in1, in2); 3002 } 3003 3004 cond = do_sub_cond(c * 2 + f, dest, in1, in2, sv); 3005 return do_cbranch(ctx, disp, n, &cond); 3006 } 3007 3008 static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a) 3009 { 3010 nullify_over(ctx); 3011 return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp); 3012 } 3013 3014 static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a) 3015 { 3016 nullify_over(ctx); 3017 return do_cmpb(ctx, a->r, load_const(ctx, a->i), a->c, a->f, a->n, a->disp); 3018 } 3019 3020 static bool do_addb(DisasContext *ctx, unsigned r, TCGv_reg in1, 3021 unsigned c, unsigned f, unsigned n, int disp) 3022 { 3023 TCGv_reg dest, in2, sv, cb_msb; 3024 DisasCond cond; 3025 3026 in2 = load_gpr(ctx, r); 3027 dest = tcg_temp_new(); 3028 sv = NULL; 3029 cb_msb = NULL; 3030 3031 if (cond_need_cb(c)) { 3032 cb_msb = get_temp(ctx); 3033 tcg_gen_movi_reg(cb_msb, 0); 3034 tcg_gen_add2_reg(dest, cb_msb, in1, cb_msb, in2, cb_msb); 3035 } else { 3036 tcg_gen_add_reg(dest, in1, in2); 3037 } 3038 if (cond_need_sv(c)) { 3039 sv = do_add_sv(ctx, dest, in1, in2); 3040 } 3041 3042 cond = do_cond(c * 2 + f, dest, cb_msb, sv); 3043 save_gpr(ctx, r, dest); 3044 return do_cbranch(ctx, disp, n, &cond); 3045 } 3046 3047 static bool trans_addb(DisasContext *ctx, arg_addb *a) 3048 { 3049 nullify_over(ctx); 3050 return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp); 3051 } 3052 3053 static bool trans_addbi(DisasContext *ctx, arg_addbi *a) 3054 { 3055 nullify_over(ctx); 3056 return do_addb(ctx, a->r, load_const(ctx, a->i), a->c, a->f, a->n, a->disp); 3057 } 3058 3059 static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a) 3060 { 3061 TCGv_reg tmp, tcg_r; 3062 DisasCond cond; 3063 3064 nullify_over(ctx); 3065 3066 tmp = tcg_temp_new(); 3067 tcg_r = load_gpr(ctx, a->r); 3068 tcg_gen_shl_reg(tmp, tcg_r, cpu_sar); 3069 3070 cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp); 3071 return do_cbranch(ctx, a->disp, a->n, &cond); 3072 } 3073 3074 static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a) 3075 { 3076 TCGv_reg tmp, tcg_r; 3077 DisasCond cond; 3078 3079 nullify_over(ctx); 3080 3081 tmp = tcg_temp_new(); 3082 tcg_r = load_gpr(ctx, a->r); 3083 tcg_gen_shli_reg(tmp, tcg_r, a->p); 3084 3085 cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp); 3086 return do_cbranch(ctx, a->disp, a->n, &cond); 3087 } 3088 3089 static bool trans_movb(DisasContext *ctx, arg_movb *a) 3090 { 3091 TCGv_reg dest; 3092 DisasCond cond; 3093 3094 nullify_over(ctx); 3095 3096 dest = dest_gpr(ctx, a->r2); 3097 if (a->r1 == 0) { 3098 tcg_gen_movi_reg(dest, 0); 3099 } else { 3100 tcg_gen_mov_reg(dest, cpu_gr[a->r1]); 3101 } 3102 3103 cond = do_sed_cond(a->c, dest); 3104 return do_cbranch(ctx, a->disp, a->n, &cond); 3105 } 3106 3107 static bool trans_movbi(DisasContext *ctx, arg_movbi *a) 3108 { 3109 TCGv_reg dest; 3110 DisasCond cond; 3111 3112 nullify_over(ctx); 3113 3114 dest = dest_gpr(ctx, a->r); 3115 tcg_gen_movi_reg(dest, a->i); 3116 3117 cond = do_sed_cond(a->c, dest); 3118 return do_cbranch(ctx, a->disp, a->n, &cond); 3119 } 3120 3121 static bool trans_shrpw_sar(DisasContext *ctx, arg_shrpw_sar *a) 3122 { 3123 TCGv_reg dest; 3124 3125 if (a->c) { 3126 nullify_over(ctx); 3127 } 3128 3129 dest = dest_gpr(ctx, a->t); 3130 if (a->r1 == 0) { 3131 tcg_gen_ext32u_reg(dest, load_gpr(ctx, a->r2)); 3132 tcg_gen_shr_reg(dest, dest, cpu_sar); 3133 } else if (a->r1 == a->r2) { 3134 TCGv_i32 t32 = tcg_temp_new_i32(); 3135 tcg_gen_trunc_reg_i32(t32, load_gpr(ctx, a->r2)); 3136 tcg_gen_rotr_i32(t32, t32, cpu_sar); 3137 tcg_gen_extu_i32_reg(dest, t32); 3138 } else { 3139 TCGv_i64 t = tcg_temp_new_i64(); 3140 TCGv_i64 s = tcg_temp_new_i64(); 3141 3142 tcg_gen_concat_reg_i64(t, load_gpr(ctx, a->r2), load_gpr(ctx, a->r1)); 3143 tcg_gen_extu_reg_i64(s, cpu_sar); 3144 tcg_gen_shr_i64(t, t, s); 3145 tcg_gen_trunc_i64_reg(dest, t); 3146 } 3147 save_gpr(ctx, a->t, dest); 3148 3149 /* Install the new nullification. */ 3150 cond_free(&ctx->null_cond); 3151 if (a->c) { 3152 ctx->null_cond = do_sed_cond(a->c, dest); 3153 } 3154 return nullify_end(ctx); 3155 } 3156 3157 static bool trans_shrpw_imm(DisasContext *ctx, arg_shrpw_imm *a) 3158 { 3159 unsigned sa = 31 - a->cpos; 3160 TCGv_reg dest, t2; 3161 3162 if (a->c) { 3163 nullify_over(ctx); 3164 } 3165 3166 dest = dest_gpr(ctx, a->t); 3167 t2 = load_gpr(ctx, a->r2); 3168 if (a->r1 == 0) { 3169 tcg_gen_extract_reg(dest, t2, sa, 32 - sa); 3170 } else if (TARGET_REGISTER_BITS == 32) { 3171 tcg_gen_extract2_reg(dest, t2, cpu_gr[a->r1], sa); 3172 } else if (a->r1 == a->r2) { 3173 TCGv_i32 t32 = tcg_temp_new_i32(); 3174 tcg_gen_trunc_reg_i32(t32, t2); 3175 tcg_gen_rotri_i32(t32, t32, sa); 3176 tcg_gen_extu_i32_reg(dest, t32); 3177 } else { 3178 TCGv_i64 t64 = tcg_temp_new_i64(); 3179 tcg_gen_concat_reg_i64(t64, t2, cpu_gr[a->r1]); 3180 tcg_gen_shri_i64(t64, t64, sa); 3181 tcg_gen_trunc_i64_reg(dest, t64); 3182 } 3183 save_gpr(ctx, a->t, dest); 3184 3185 /* Install the new nullification. */ 3186 cond_free(&ctx->null_cond); 3187 if (a->c) { 3188 ctx->null_cond = do_sed_cond(a->c, dest); 3189 } 3190 return nullify_end(ctx); 3191 } 3192 3193 static bool trans_extrw_sar(DisasContext *ctx, arg_extrw_sar *a) 3194 { 3195 unsigned len = 32 - a->clen; 3196 TCGv_reg dest, src, tmp; 3197 3198 if (a->c) { 3199 nullify_over(ctx); 3200 } 3201 3202 dest = dest_gpr(ctx, a->t); 3203 src = load_gpr(ctx, a->r); 3204 tmp = tcg_temp_new(); 3205 3206 /* Recall that SAR is using big-endian bit numbering. */ 3207 tcg_gen_xori_reg(tmp, cpu_sar, TARGET_REGISTER_BITS - 1); 3208 if (a->se) { 3209 tcg_gen_sar_reg(dest, src, tmp); 3210 tcg_gen_sextract_reg(dest, dest, 0, len); 3211 } else { 3212 tcg_gen_shr_reg(dest, src, tmp); 3213 tcg_gen_extract_reg(dest, dest, 0, len); 3214 } 3215 save_gpr(ctx, a->t, dest); 3216 3217 /* Install the new nullification. */ 3218 cond_free(&ctx->null_cond); 3219 if (a->c) { 3220 ctx->null_cond = do_sed_cond(a->c, dest); 3221 } 3222 return nullify_end(ctx); 3223 } 3224 3225 static bool trans_extrw_imm(DisasContext *ctx, arg_extrw_imm *a) 3226 { 3227 unsigned len = 32 - a->clen; 3228 unsigned cpos = 31 - a->pos; 3229 TCGv_reg dest, src; 3230 3231 if (a->c) { 3232 nullify_over(ctx); 3233 } 3234 3235 dest = dest_gpr(ctx, a->t); 3236 src = load_gpr(ctx, a->r); 3237 if (a->se) { 3238 tcg_gen_sextract_reg(dest, src, cpos, len); 3239 } else { 3240 tcg_gen_extract_reg(dest, src, cpos, len); 3241 } 3242 save_gpr(ctx, a->t, dest); 3243 3244 /* Install the new nullification. */ 3245 cond_free(&ctx->null_cond); 3246 if (a->c) { 3247 ctx->null_cond = do_sed_cond(a->c, dest); 3248 } 3249 return nullify_end(ctx); 3250 } 3251 3252 static bool trans_depwi_imm(DisasContext *ctx, arg_depwi_imm *a) 3253 { 3254 unsigned len = 32 - a->clen; 3255 target_sreg mask0, mask1; 3256 TCGv_reg dest; 3257 3258 if (a->c) { 3259 nullify_over(ctx); 3260 } 3261 if (a->cpos + len > 32) { 3262 len = 32 - a->cpos; 3263 } 3264 3265 dest = dest_gpr(ctx, a->t); 3266 mask0 = deposit64(0, a->cpos, len, a->i); 3267 mask1 = deposit64(-1, a->cpos, len, a->i); 3268 3269 if (a->nz) { 3270 TCGv_reg src = load_gpr(ctx, a->t); 3271 if (mask1 != -1) { 3272 tcg_gen_andi_reg(dest, src, mask1); 3273 src = dest; 3274 } 3275 tcg_gen_ori_reg(dest, src, mask0); 3276 } else { 3277 tcg_gen_movi_reg(dest, mask0); 3278 } 3279 save_gpr(ctx, a->t, dest); 3280 3281 /* Install the new nullification. */ 3282 cond_free(&ctx->null_cond); 3283 if (a->c) { 3284 ctx->null_cond = do_sed_cond(a->c, dest); 3285 } 3286 return nullify_end(ctx); 3287 } 3288 3289 static bool trans_depw_imm(DisasContext *ctx, arg_depw_imm *a) 3290 { 3291 unsigned rs = a->nz ? a->t : 0; 3292 unsigned len = 32 - a->clen; 3293 TCGv_reg dest, val; 3294 3295 if (a->c) { 3296 nullify_over(ctx); 3297 } 3298 if (a->cpos + len > 32) { 3299 len = 32 - a->cpos; 3300 } 3301 3302 dest = dest_gpr(ctx, a->t); 3303 val = load_gpr(ctx, a->r); 3304 if (rs == 0) { 3305 tcg_gen_deposit_z_reg(dest, val, a->cpos, len); 3306 } else { 3307 tcg_gen_deposit_reg(dest, cpu_gr[rs], val, a->cpos, len); 3308 } 3309 save_gpr(ctx, a->t, dest); 3310 3311 /* Install the new nullification. */ 3312 cond_free(&ctx->null_cond); 3313 if (a->c) { 3314 ctx->null_cond = do_sed_cond(a->c, dest); 3315 } 3316 return nullify_end(ctx); 3317 } 3318 3319 static bool do_depw_sar(DisasContext *ctx, unsigned rt, unsigned c, 3320 unsigned nz, unsigned clen, TCGv_reg val) 3321 { 3322 unsigned rs = nz ? rt : 0; 3323 unsigned len = 32 - clen; 3324 TCGv_reg mask, tmp, shift, dest; 3325 unsigned msb = 1U << (len - 1); 3326 3327 dest = dest_gpr(ctx, rt); 3328 shift = tcg_temp_new(); 3329 tmp = tcg_temp_new(); 3330 3331 /* Convert big-endian bit numbering in SAR to left-shift. */ 3332 tcg_gen_xori_reg(shift, cpu_sar, TARGET_REGISTER_BITS - 1); 3333 3334 mask = tcg_temp_new(); 3335 tcg_gen_movi_reg(mask, msb + (msb - 1)); 3336 tcg_gen_and_reg(tmp, val, mask); 3337 if (rs) { 3338 tcg_gen_shl_reg(mask, mask, shift); 3339 tcg_gen_shl_reg(tmp, tmp, shift); 3340 tcg_gen_andc_reg(dest, cpu_gr[rs], mask); 3341 tcg_gen_or_reg(dest, dest, tmp); 3342 } else { 3343 tcg_gen_shl_reg(dest, tmp, shift); 3344 } 3345 save_gpr(ctx, rt, dest); 3346 3347 /* Install the new nullification. */ 3348 cond_free(&ctx->null_cond); 3349 if (c) { 3350 ctx->null_cond = do_sed_cond(c, dest); 3351 } 3352 return nullify_end(ctx); 3353 } 3354 3355 static bool trans_depw_sar(DisasContext *ctx, arg_depw_sar *a) 3356 { 3357 if (a->c) { 3358 nullify_over(ctx); 3359 } 3360 return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_gpr(ctx, a->r)); 3361 } 3362 3363 static bool trans_depwi_sar(DisasContext *ctx, arg_depwi_sar *a) 3364 { 3365 if (a->c) { 3366 nullify_over(ctx); 3367 } 3368 return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_const(ctx, a->i)); 3369 } 3370 3371 static bool trans_be(DisasContext *ctx, arg_be *a) 3372 { 3373 TCGv_reg tmp; 3374 3375 #ifdef CONFIG_USER_ONLY 3376 /* ??? It seems like there should be a good way of using 3377 "be disp(sr2, r0)", the canonical gateway entry mechanism 3378 to our advantage. But that appears to be inconvenient to 3379 manage along side branch delay slots. Therefore we handle 3380 entry into the gateway page via absolute address. */ 3381 /* Since we don't implement spaces, just branch. Do notice the special 3382 case of "be disp(*,r0)" using a direct branch to disp, so that we can 3383 goto_tb to the TB containing the syscall. */ 3384 if (a->b == 0) { 3385 return do_dbranch(ctx, a->disp, a->l, a->n); 3386 } 3387 #else 3388 nullify_over(ctx); 3389 #endif 3390 3391 tmp = get_temp(ctx); 3392 tcg_gen_addi_reg(tmp, load_gpr(ctx, a->b), a->disp); 3393 tmp = do_ibranch_priv(ctx, tmp); 3394 3395 #ifdef CONFIG_USER_ONLY 3396 return do_ibranch(ctx, tmp, a->l, a->n); 3397 #else 3398 TCGv_i64 new_spc = tcg_temp_new_i64(); 3399 3400 load_spr(ctx, new_spc, a->sp); 3401 if (a->l) { 3402 copy_iaoq_entry(cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var); 3403 tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f); 3404 } 3405 if (a->n && use_nullify_skip(ctx)) { 3406 tcg_gen_mov_reg(cpu_iaoq_f, tmp); 3407 tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4); 3408 tcg_gen_mov_i64(cpu_iasq_f, new_spc); 3409 tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f); 3410 } else { 3411 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b); 3412 if (ctx->iaoq_b == -1) { 3413 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b); 3414 } 3415 tcg_gen_mov_reg(cpu_iaoq_b, tmp); 3416 tcg_gen_mov_i64(cpu_iasq_b, new_spc); 3417 nullify_set(ctx, a->n); 3418 } 3419 tcg_gen_lookup_and_goto_ptr(); 3420 ctx->base.is_jmp = DISAS_NORETURN; 3421 return nullify_end(ctx); 3422 #endif 3423 } 3424 3425 static bool trans_bl(DisasContext *ctx, arg_bl *a) 3426 { 3427 return do_dbranch(ctx, iaoq_dest(ctx, a->disp), a->l, a->n); 3428 } 3429 3430 static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a) 3431 { 3432 target_ureg dest = iaoq_dest(ctx, a->disp); 3433 3434 nullify_over(ctx); 3435 3436 /* Make sure the caller hasn't done something weird with the queue. 3437 * ??? This is not quite the same as the PSW[B] bit, which would be 3438 * expensive to track. Real hardware will trap for 3439 * b gateway 3440 * b gateway+4 (in delay slot of first branch) 3441 * However, checking for a non-sequential instruction queue *will* 3442 * diagnose the security hole 3443 * b gateway 3444 * b evil 3445 * in which instructions at evil would run with increased privs. 3446 */ 3447 if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) { 3448 return gen_illegal(ctx); 3449 } 3450 3451 #ifndef CONFIG_USER_ONLY 3452 if (ctx->tb_flags & PSW_C) { 3453 CPUHPPAState *env = cpu_env(ctx->cs); 3454 int type = hppa_artype_for_page(env, ctx->base.pc_next); 3455 /* If we could not find a TLB entry, then we need to generate an 3456 ITLB miss exception so the kernel will provide it. 3457 The resulting TLB fill operation will invalidate this TB and 3458 we will re-translate, at which point we *will* be able to find 3459 the TLB entry and determine if this is in fact a gateway page. */ 3460 if (type < 0) { 3461 gen_excp(ctx, EXCP_ITLB_MISS); 3462 return true; 3463 } 3464 /* No change for non-gateway pages or for priv decrease. */ 3465 if (type >= 4 && type - 4 < ctx->privilege) { 3466 dest = deposit32(dest, 0, 2, type - 4); 3467 } 3468 } else { 3469 dest &= -4; /* priv = 0 */ 3470 } 3471 #endif 3472 3473 if (a->l) { 3474 TCGv_reg tmp = dest_gpr(ctx, a->l); 3475 if (ctx->privilege < 3) { 3476 tcg_gen_andi_reg(tmp, tmp, -4); 3477 } 3478 tcg_gen_ori_reg(tmp, tmp, ctx->privilege); 3479 save_gpr(ctx, a->l, tmp); 3480 } 3481 3482 return do_dbranch(ctx, dest, 0, a->n); 3483 } 3484 3485 static bool trans_blr(DisasContext *ctx, arg_blr *a) 3486 { 3487 if (a->x) { 3488 TCGv_reg tmp = get_temp(ctx); 3489 tcg_gen_shli_reg(tmp, load_gpr(ctx, a->x), 3); 3490 tcg_gen_addi_reg(tmp, tmp, ctx->iaoq_f + 8); 3491 /* The computation here never changes privilege level. */ 3492 return do_ibranch(ctx, tmp, a->l, a->n); 3493 } else { 3494 /* BLR R0,RX is a good way to load PC+8 into RX. */ 3495 return do_dbranch(ctx, ctx->iaoq_f + 8, a->l, a->n); 3496 } 3497 } 3498 3499 static bool trans_bv(DisasContext *ctx, arg_bv *a) 3500 { 3501 TCGv_reg dest; 3502 3503 if (a->x == 0) { 3504 dest = load_gpr(ctx, a->b); 3505 } else { 3506 dest = get_temp(ctx); 3507 tcg_gen_shli_reg(dest, load_gpr(ctx, a->x), 3); 3508 tcg_gen_add_reg(dest, dest, load_gpr(ctx, a->b)); 3509 } 3510 dest = do_ibranch_priv(ctx, dest); 3511 return do_ibranch(ctx, dest, 0, a->n); 3512 } 3513 3514 static bool trans_bve(DisasContext *ctx, arg_bve *a) 3515 { 3516 TCGv_reg dest; 3517 3518 #ifdef CONFIG_USER_ONLY 3519 dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b)); 3520 return do_ibranch(ctx, dest, a->l, a->n); 3521 #else 3522 nullify_over(ctx); 3523 dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b)); 3524 3525 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b); 3526 if (ctx->iaoq_b == -1) { 3527 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b); 3528 } 3529 copy_iaoq_entry(cpu_iaoq_b, -1, dest); 3530 tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest)); 3531 if (a->l) { 3532 copy_iaoq_entry(cpu_gr[a->l], ctx->iaoq_n, ctx->iaoq_n_var); 3533 } 3534 nullify_set(ctx, a->n); 3535 tcg_gen_lookup_and_goto_ptr(); 3536 ctx->base.is_jmp = DISAS_NORETURN; 3537 return nullify_end(ctx); 3538 #endif 3539 } 3540 3541 /* 3542 * Float class 0 3543 */ 3544 3545 static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src) 3546 { 3547 tcg_gen_mov_i32(dst, src); 3548 } 3549 3550 static bool trans_fid_f(DisasContext *ctx, arg_fid_f *a) 3551 { 3552 uint64_t ret; 3553 3554 if (TARGET_REGISTER_BITS == 64) { 3555 ret = 0x13080000000000ULL; /* PA8700 (PCX-W2) */ 3556 } else { 3557 ret = 0x0f080000000000ULL; /* PA7300LC (PCX-L2) */ 3558 } 3559 3560 nullify_over(ctx); 3561 save_frd(0, tcg_constant_i64(ret)); 3562 return nullify_end(ctx); 3563 } 3564 3565 static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a) 3566 { 3567 return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f); 3568 } 3569 3570 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src) 3571 { 3572 tcg_gen_mov_i64(dst, src); 3573 } 3574 3575 static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a) 3576 { 3577 return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d); 3578 } 3579 3580 static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src) 3581 { 3582 tcg_gen_andi_i32(dst, src, INT32_MAX); 3583 } 3584 3585 static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a) 3586 { 3587 return do_fop_wew(ctx, a->t, a->r, gen_fabs_f); 3588 } 3589 3590 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src) 3591 { 3592 tcg_gen_andi_i64(dst, src, INT64_MAX); 3593 } 3594 3595 static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a) 3596 { 3597 return do_fop_ded(ctx, a->t, a->r, gen_fabs_d); 3598 } 3599 3600 static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a) 3601 { 3602 return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s); 3603 } 3604 3605 static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a) 3606 { 3607 return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d); 3608 } 3609 3610 static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a) 3611 { 3612 return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s); 3613 } 3614 3615 static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a) 3616 { 3617 return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d); 3618 } 3619 3620 static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src) 3621 { 3622 tcg_gen_xori_i32(dst, src, INT32_MIN); 3623 } 3624 3625 static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a) 3626 { 3627 return do_fop_wew(ctx, a->t, a->r, gen_fneg_f); 3628 } 3629 3630 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src) 3631 { 3632 tcg_gen_xori_i64(dst, src, INT64_MIN); 3633 } 3634 3635 static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a) 3636 { 3637 return do_fop_ded(ctx, a->t, a->r, gen_fneg_d); 3638 } 3639 3640 static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src) 3641 { 3642 tcg_gen_ori_i32(dst, src, INT32_MIN); 3643 } 3644 3645 static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a) 3646 { 3647 return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f); 3648 } 3649 3650 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src) 3651 { 3652 tcg_gen_ori_i64(dst, src, INT64_MIN); 3653 } 3654 3655 static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a) 3656 { 3657 return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d); 3658 } 3659 3660 /* 3661 * Float class 1 3662 */ 3663 3664 static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a) 3665 { 3666 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s); 3667 } 3668 3669 static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a) 3670 { 3671 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d); 3672 } 3673 3674 static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a) 3675 { 3676 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s); 3677 } 3678 3679 static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a) 3680 { 3681 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s); 3682 } 3683 3684 static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a) 3685 { 3686 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d); 3687 } 3688 3689 static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a) 3690 { 3691 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d); 3692 } 3693 3694 static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a) 3695 { 3696 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w); 3697 } 3698 3699 static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a) 3700 { 3701 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w); 3702 } 3703 3704 static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a) 3705 { 3706 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw); 3707 } 3708 3709 static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a) 3710 { 3711 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw); 3712 } 3713 3714 static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a) 3715 { 3716 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w); 3717 } 3718 3719 static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a) 3720 { 3721 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w); 3722 } 3723 3724 static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a) 3725 { 3726 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw); 3727 } 3728 3729 static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a) 3730 { 3731 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw); 3732 } 3733 3734 static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a) 3735 { 3736 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s); 3737 } 3738 3739 static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a) 3740 { 3741 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s); 3742 } 3743 3744 static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a) 3745 { 3746 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d); 3747 } 3748 3749 static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a) 3750 { 3751 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d); 3752 } 3753 3754 static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a) 3755 { 3756 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw); 3757 } 3758 3759 static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a) 3760 { 3761 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw); 3762 } 3763 3764 static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a) 3765 { 3766 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw); 3767 } 3768 3769 static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a) 3770 { 3771 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw); 3772 } 3773 3774 static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a) 3775 { 3776 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw); 3777 } 3778 3779 static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a) 3780 { 3781 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw); 3782 } 3783 3784 static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a) 3785 { 3786 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw); 3787 } 3788 3789 static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a) 3790 { 3791 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw); 3792 } 3793 3794 /* 3795 * Float class 2 3796 */ 3797 3798 static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a) 3799 { 3800 TCGv_i32 ta, tb, tc, ty; 3801 3802 nullify_over(ctx); 3803 3804 ta = load_frw0_i32(a->r1); 3805 tb = load_frw0_i32(a->r2); 3806 ty = tcg_constant_i32(a->y); 3807 tc = tcg_constant_i32(a->c); 3808 3809 gen_helper_fcmp_s(tcg_env, ta, tb, ty, tc); 3810 3811 return nullify_end(ctx); 3812 } 3813 3814 static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a) 3815 { 3816 TCGv_i64 ta, tb; 3817 TCGv_i32 tc, ty; 3818 3819 nullify_over(ctx); 3820 3821 ta = load_frd0(a->r1); 3822 tb = load_frd0(a->r2); 3823 ty = tcg_constant_i32(a->y); 3824 tc = tcg_constant_i32(a->c); 3825 3826 gen_helper_fcmp_d(tcg_env, ta, tb, ty, tc); 3827 3828 return nullify_end(ctx); 3829 } 3830 3831 static bool trans_ftest(DisasContext *ctx, arg_ftest *a) 3832 { 3833 TCGv_reg t; 3834 3835 nullify_over(ctx); 3836 3837 t = get_temp(ctx); 3838 tcg_gen_ld32u_reg(t, tcg_env, offsetof(CPUHPPAState, fr0_shadow)); 3839 3840 if (a->y == 1) { 3841 int mask; 3842 bool inv = false; 3843 3844 switch (a->c) { 3845 case 0: /* simple */ 3846 tcg_gen_andi_reg(t, t, 0x4000000); 3847 ctx->null_cond = cond_make_0(TCG_COND_NE, t); 3848 goto done; 3849 case 2: /* rej */ 3850 inv = true; 3851 /* fallthru */ 3852 case 1: /* acc */ 3853 mask = 0x43ff800; 3854 break; 3855 case 6: /* rej8 */ 3856 inv = true; 3857 /* fallthru */ 3858 case 5: /* acc8 */ 3859 mask = 0x43f8000; 3860 break; 3861 case 9: /* acc6 */ 3862 mask = 0x43e0000; 3863 break; 3864 case 13: /* acc4 */ 3865 mask = 0x4380000; 3866 break; 3867 case 17: /* acc2 */ 3868 mask = 0x4200000; 3869 break; 3870 default: 3871 gen_illegal(ctx); 3872 return true; 3873 } 3874 if (inv) { 3875 TCGv_reg c = load_const(ctx, mask); 3876 tcg_gen_or_reg(t, t, c); 3877 ctx->null_cond = cond_make(TCG_COND_EQ, t, c); 3878 } else { 3879 tcg_gen_andi_reg(t, t, mask); 3880 ctx->null_cond = cond_make_0(TCG_COND_EQ, t); 3881 } 3882 } else { 3883 unsigned cbit = (a->y ^ 1) - 1; 3884 3885 tcg_gen_extract_reg(t, t, 21 - cbit, 1); 3886 ctx->null_cond = cond_make_0(TCG_COND_NE, t); 3887 } 3888 3889 done: 3890 return nullify_end(ctx); 3891 } 3892 3893 /* 3894 * Float class 2 3895 */ 3896 3897 static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a) 3898 { 3899 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s); 3900 } 3901 3902 static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a) 3903 { 3904 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d); 3905 } 3906 3907 static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a) 3908 { 3909 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s); 3910 } 3911 3912 static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a) 3913 { 3914 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d); 3915 } 3916 3917 static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a) 3918 { 3919 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s); 3920 } 3921 3922 static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a) 3923 { 3924 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d); 3925 } 3926 3927 static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a) 3928 { 3929 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s); 3930 } 3931 3932 static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a) 3933 { 3934 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d); 3935 } 3936 3937 static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a) 3938 { 3939 TCGv_i64 x, y; 3940 3941 nullify_over(ctx); 3942 3943 x = load_frw0_i64(a->r1); 3944 y = load_frw0_i64(a->r2); 3945 tcg_gen_mul_i64(x, x, y); 3946 save_frd(a->t, x); 3947 3948 return nullify_end(ctx); 3949 } 3950 3951 /* Convert the fmpyadd single-precision register encodings to standard. */ 3952 static inline int fmpyadd_s_reg(unsigned r) 3953 { 3954 return (r & 16) * 2 + 16 + (r & 15); 3955 } 3956 3957 static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub) 3958 { 3959 int tm = fmpyadd_s_reg(a->tm); 3960 int ra = fmpyadd_s_reg(a->ra); 3961 int ta = fmpyadd_s_reg(a->ta); 3962 int rm2 = fmpyadd_s_reg(a->rm2); 3963 int rm1 = fmpyadd_s_reg(a->rm1); 3964 3965 nullify_over(ctx); 3966 3967 do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s); 3968 do_fop_weww(ctx, ta, ta, ra, 3969 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s); 3970 3971 return nullify_end(ctx); 3972 } 3973 3974 static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a) 3975 { 3976 return do_fmpyadd_s(ctx, a, false); 3977 } 3978 3979 static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a) 3980 { 3981 return do_fmpyadd_s(ctx, a, true); 3982 } 3983 3984 static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub) 3985 { 3986 nullify_over(ctx); 3987 3988 do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d); 3989 do_fop_dedd(ctx, a->ta, a->ta, a->ra, 3990 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d); 3991 3992 return nullify_end(ctx); 3993 } 3994 3995 static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a) 3996 { 3997 return do_fmpyadd_d(ctx, a, false); 3998 } 3999 4000 static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a) 4001 { 4002 return do_fmpyadd_d(ctx, a, true); 4003 } 4004 4005 static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a) 4006 { 4007 TCGv_i32 x, y, z; 4008 4009 nullify_over(ctx); 4010 x = load_frw0_i32(a->rm1); 4011 y = load_frw0_i32(a->rm2); 4012 z = load_frw0_i32(a->ra3); 4013 4014 if (a->neg) { 4015 gen_helper_fmpynfadd_s(x, tcg_env, x, y, z); 4016 } else { 4017 gen_helper_fmpyfadd_s(x, tcg_env, x, y, z); 4018 } 4019 4020 save_frw_i32(a->t, x); 4021 return nullify_end(ctx); 4022 } 4023 4024 static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a) 4025 { 4026 TCGv_i64 x, y, z; 4027 4028 nullify_over(ctx); 4029 x = load_frd0(a->rm1); 4030 y = load_frd0(a->rm2); 4031 z = load_frd0(a->ra3); 4032 4033 if (a->neg) { 4034 gen_helper_fmpynfadd_d(x, tcg_env, x, y, z); 4035 } else { 4036 gen_helper_fmpyfadd_d(x, tcg_env, x, y, z); 4037 } 4038 4039 save_frd(a->t, x); 4040 return nullify_end(ctx); 4041 } 4042 4043 static bool trans_diag(DisasContext *ctx, arg_diag *a) 4044 { 4045 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 4046 #ifndef CONFIG_USER_ONLY 4047 if (a->i == 0x100) { 4048 /* emulate PDC BTLB, called by SeaBIOS-hppa */ 4049 nullify_over(ctx); 4050 gen_helper_diag_btlb(tcg_env); 4051 return nullify_end(ctx); 4052 } 4053 #endif 4054 qemu_log_mask(LOG_UNIMP, "DIAG opcode 0x%04x ignored\n", a->i); 4055 return true; 4056 } 4057 4058 static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) 4059 { 4060 DisasContext *ctx = container_of(dcbase, DisasContext, base); 4061 int bound; 4062 4063 ctx->cs = cs; 4064 ctx->tb_flags = ctx->base.tb->flags; 4065 4066 #ifdef CONFIG_USER_ONLY 4067 ctx->privilege = MMU_IDX_TO_PRIV(MMU_USER_IDX); 4068 ctx->mmu_idx = MMU_USER_IDX; 4069 ctx->iaoq_f = ctx->base.pc_first | ctx->privilege; 4070 ctx->iaoq_b = ctx->base.tb->cs_base | ctx->privilege; 4071 ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN); 4072 #else 4073 ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3; 4074 ctx->mmu_idx = (ctx->tb_flags & PSW_D ? 4075 PRIV_TO_MMU_IDX(ctx->privilege) : MMU_PHYS_IDX); 4076 4077 /* Recover the IAOQ values from the GVA + PRIV. */ 4078 uint64_t cs_base = ctx->base.tb->cs_base; 4079 uint64_t iasq_f = cs_base & ~0xffffffffull; 4080 int32_t diff = cs_base; 4081 4082 ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege; 4083 ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1); 4084 #endif 4085 ctx->iaoq_n = -1; 4086 ctx->iaoq_n_var = NULL; 4087 4088 /* Bound the number of instructions by those left on the page. */ 4089 bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4; 4090 ctx->base.max_insns = MIN(ctx->base.max_insns, bound); 4091 4092 ctx->ntempr = 0; 4093 ctx->ntempl = 0; 4094 memset(ctx->tempr, 0, sizeof(ctx->tempr)); 4095 memset(ctx->templ, 0, sizeof(ctx->templ)); 4096 } 4097 4098 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs) 4099 { 4100 DisasContext *ctx = container_of(dcbase, DisasContext, base); 4101 4102 /* Seed the nullification status from PSW[N], as saved in TB->FLAGS. */ 4103 ctx->null_cond = cond_make_f(); 4104 ctx->psw_n_nonzero = false; 4105 if (ctx->tb_flags & PSW_N) { 4106 ctx->null_cond.c = TCG_COND_ALWAYS; 4107 ctx->psw_n_nonzero = true; 4108 } 4109 ctx->null_lab = NULL; 4110 } 4111 4112 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs) 4113 { 4114 DisasContext *ctx = container_of(dcbase, DisasContext, base); 4115 4116 tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b); 4117 } 4118 4119 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) 4120 { 4121 DisasContext *ctx = container_of(dcbase, DisasContext, base); 4122 CPUHPPAState *env = cpu_env(cs); 4123 DisasJumpType ret; 4124 int i, n; 4125 4126 /* Execute one insn. */ 4127 #ifdef CONFIG_USER_ONLY 4128 if (ctx->base.pc_next < TARGET_PAGE_SIZE) { 4129 do_page_zero(ctx); 4130 ret = ctx->base.is_jmp; 4131 assert(ret != DISAS_NEXT); 4132 } else 4133 #endif 4134 { 4135 /* Always fetch the insn, even if nullified, so that we check 4136 the page permissions for execute. */ 4137 uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next); 4138 4139 /* Set up the IA queue for the next insn. 4140 This will be overwritten by a branch. */ 4141 if (ctx->iaoq_b == -1) { 4142 ctx->iaoq_n = -1; 4143 ctx->iaoq_n_var = get_temp(ctx); 4144 tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4); 4145 } else { 4146 ctx->iaoq_n = ctx->iaoq_b + 4; 4147 ctx->iaoq_n_var = NULL; 4148 } 4149 4150 if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) { 4151 ctx->null_cond.c = TCG_COND_NEVER; 4152 ret = DISAS_NEXT; 4153 } else { 4154 ctx->insn = insn; 4155 if (!decode(ctx, insn)) { 4156 gen_illegal(ctx); 4157 } 4158 ret = ctx->base.is_jmp; 4159 assert(ctx->null_lab == NULL); 4160 } 4161 } 4162 4163 /* Forget any temporaries allocated. */ 4164 for (i = 0, n = ctx->ntempr; i < n; ++i) { 4165 ctx->tempr[i] = NULL; 4166 } 4167 for (i = 0, n = ctx->ntempl; i < n; ++i) { 4168 ctx->templ[i] = NULL; 4169 } 4170 ctx->ntempr = 0; 4171 ctx->ntempl = 0; 4172 4173 /* Advance the insn queue. Note that this check also detects 4174 a priority change within the instruction queue. */ 4175 if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) { 4176 if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1 4177 && use_goto_tb(ctx, ctx->iaoq_b) 4178 && (ctx->null_cond.c == TCG_COND_NEVER 4179 || ctx->null_cond.c == TCG_COND_ALWAYS)) { 4180 nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS); 4181 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n); 4182 ctx->base.is_jmp = ret = DISAS_NORETURN; 4183 } else { 4184 ctx->base.is_jmp = ret = DISAS_IAQ_N_STALE; 4185 } 4186 } 4187 ctx->iaoq_f = ctx->iaoq_b; 4188 ctx->iaoq_b = ctx->iaoq_n; 4189 ctx->base.pc_next += 4; 4190 4191 switch (ret) { 4192 case DISAS_NORETURN: 4193 case DISAS_IAQ_N_UPDATED: 4194 break; 4195 4196 case DISAS_NEXT: 4197 case DISAS_IAQ_N_STALE: 4198 case DISAS_IAQ_N_STALE_EXIT: 4199 if (ctx->iaoq_f == -1) { 4200 tcg_gen_mov_reg(cpu_iaoq_f, cpu_iaoq_b); 4201 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var); 4202 #ifndef CONFIG_USER_ONLY 4203 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b); 4204 #endif 4205 nullify_save(ctx); 4206 ctx->base.is_jmp = (ret == DISAS_IAQ_N_STALE_EXIT 4207 ? DISAS_EXIT 4208 : DISAS_IAQ_N_UPDATED); 4209 } else if (ctx->iaoq_b == -1) { 4210 tcg_gen_mov_reg(cpu_iaoq_b, ctx->iaoq_n_var); 4211 } 4212 break; 4213 4214 default: 4215 g_assert_not_reached(); 4216 } 4217 } 4218 4219 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) 4220 { 4221 DisasContext *ctx = container_of(dcbase, DisasContext, base); 4222 DisasJumpType is_jmp = ctx->base.is_jmp; 4223 4224 switch (is_jmp) { 4225 case DISAS_NORETURN: 4226 break; 4227 case DISAS_TOO_MANY: 4228 case DISAS_IAQ_N_STALE: 4229 case DISAS_IAQ_N_STALE_EXIT: 4230 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f); 4231 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b); 4232 nullify_save(ctx); 4233 /* FALLTHRU */ 4234 case DISAS_IAQ_N_UPDATED: 4235 if (is_jmp != DISAS_IAQ_N_STALE_EXIT) { 4236 tcg_gen_lookup_and_goto_ptr(); 4237 break; 4238 } 4239 /* FALLTHRU */ 4240 case DISAS_EXIT: 4241 tcg_gen_exit_tb(NULL, 0); 4242 break; 4243 default: 4244 g_assert_not_reached(); 4245 } 4246 } 4247 4248 static void hppa_tr_disas_log(const DisasContextBase *dcbase, 4249 CPUState *cs, FILE *logfile) 4250 { 4251 target_ulong pc = dcbase->pc_first; 4252 4253 #ifdef CONFIG_USER_ONLY 4254 switch (pc) { 4255 case 0x00: 4256 fprintf(logfile, "IN:\n0x00000000: (null)\n"); 4257 return; 4258 case 0xb0: 4259 fprintf(logfile, "IN:\n0x000000b0: light-weight-syscall\n"); 4260 return; 4261 case 0xe0: 4262 fprintf(logfile, "IN:\n0x000000e0: set-thread-pointer-syscall\n"); 4263 return; 4264 case 0x100: 4265 fprintf(logfile, "IN:\n0x00000100: syscall\n"); 4266 return; 4267 } 4268 #endif 4269 4270 fprintf(logfile, "IN: %s\n", lookup_symbol(pc)); 4271 target_disas(logfile, cs, pc, dcbase->tb->size); 4272 } 4273 4274 static const TranslatorOps hppa_tr_ops = { 4275 .init_disas_context = hppa_tr_init_disas_context, 4276 .tb_start = hppa_tr_tb_start, 4277 .insn_start = hppa_tr_insn_start, 4278 .translate_insn = hppa_tr_translate_insn, 4279 .tb_stop = hppa_tr_tb_stop, 4280 .disas_log = hppa_tr_disas_log, 4281 }; 4282 4283 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, 4284 target_ulong pc, void *host_pc) 4285 { 4286 DisasContext ctx; 4287 translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base); 4288 } 4289