1 /* 2 * HPPA emulation cpu translation for qemu. 3 * 4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net> 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "cpu.h" 22 #include "disas/disas.h" 23 #include "qemu/host-utils.h" 24 #include "exec/exec-all.h" 25 #include "tcg-op.h" 26 #include "exec/cpu_ldst.h" 27 #include "exec/helper-proto.h" 28 #include "exec/helper-gen.h" 29 #include "exec/translator.h" 30 #include "trace-tcg.h" 31 #include "exec/log.h" 32 33 /* Since we have a distinction between register size and address size, 34 we need to redefine all of these. */ 35 36 #undef TCGv 37 #undef tcg_temp_new 38 #undef tcg_global_reg_new 39 #undef tcg_global_mem_new 40 #undef tcg_temp_local_new 41 #undef tcg_temp_free 42 43 #if TARGET_LONG_BITS == 64 44 #define TCGv_tl TCGv_i64 45 #define tcg_temp_new_tl tcg_temp_new_i64 46 #define tcg_temp_free_tl tcg_temp_free_i64 47 #if TARGET_REGISTER_BITS == 64 48 #define tcg_gen_extu_reg_tl tcg_gen_mov_i64 49 #else 50 #define tcg_gen_extu_reg_tl tcg_gen_extu_i32_i64 51 #endif 52 #else 53 #define TCGv_tl TCGv_i32 54 #define tcg_temp_new_tl tcg_temp_new_i32 55 #define tcg_temp_free_tl tcg_temp_free_i32 56 #define tcg_gen_extu_reg_tl tcg_gen_mov_i32 57 #endif 58 59 #if TARGET_REGISTER_BITS == 64 60 #define TCGv_reg TCGv_i64 61 62 #define tcg_temp_new tcg_temp_new_i64 63 #define tcg_global_reg_new tcg_global_reg_new_i64 64 #define tcg_global_mem_new tcg_global_mem_new_i64 65 #define tcg_temp_local_new tcg_temp_local_new_i64 66 #define tcg_temp_free tcg_temp_free_i64 67 68 #define tcg_gen_movi_reg tcg_gen_movi_i64 69 #define tcg_gen_mov_reg tcg_gen_mov_i64 70 #define tcg_gen_ld8u_reg tcg_gen_ld8u_i64 71 #define tcg_gen_ld8s_reg tcg_gen_ld8s_i64 72 #define tcg_gen_ld16u_reg tcg_gen_ld16u_i64 73 #define tcg_gen_ld16s_reg tcg_gen_ld16s_i64 74 #define tcg_gen_ld32u_reg tcg_gen_ld32u_i64 75 #define tcg_gen_ld32s_reg tcg_gen_ld32s_i64 76 #define tcg_gen_ld_reg tcg_gen_ld_i64 77 #define tcg_gen_st8_reg tcg_gen_st8_i64 78 #define tcg_gen_st16_reg tcg_gen_st16_i64 79 #define tcg_gen_st32_reg tcg_gen_st32_i64 80 #define tcg_gen_st_reg tcg_gen_st_i64 81 #define tcg_gen_add_reg tcg_gen_add_i64 82 #define tcg_gen_addi_reg tcg_gen_addi_i64 83 #define tcg_gen_sub_reg tcg_gen_sub_i64 84 #define tcg_gen_neg_reg tcg_gen_neg_i64 85 #define tcg_gen_subfi_reg tcg_gen_subfi_i64 86 #define tcg_gen_subi_reg tcg_gen_subi_i64 87 #define tcg_gen_and_reg tcg_gen_and_i64 88 #define tcg_gen_andi_reg tcg_gen_andi_i64 89 #define tcg_gen_or_reg tcg_gen_or_i64 90 #define tcg_gen_ori_reg tcg_gen_ori_i64 91 #define tcg_gen_xor_reg tcg_gen_xor_i64 92 #define tcg_gen_xori_reg tcg_gen_xori_i64 93 #define tcg_gen_not_reg tcg_gen_not_i64 94 #define tcg_gen_shl_reg tcg_gen_shl_i64 95 #define tcg_gen_shli_reg tcg_gen_shli_i64 96 #define tcg_gen_shr_reg tcg_gen_shr_i64 97 #define tcg_gen_shri_reg tcg_gen_shri_i64 98 #define tcg_gen_sar_reg tcg_gen_sar_i64 99 #define tcg_gen_sari_reg tcg_gen_sari_i64 100 #define tcg_gen_brcond_reg tcg_gen_brcond_i64 101 #define tcg_gen_brcondi_reg tcg_gen_brcondi_i64 102 #define tcg_gen_setcond_reg tcg_gen_setcond_i64 103 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i64 104 #define tcg_gen_mul_reg tcg_gen_mul_i64 105 #define tcg_gen_muli_reg tcg_gen_muli_i64 106 #define tcg_gen_div_reg tcg_gen_div_i64 107 #define tcg_gen_rem_reg tcg_gen_rem_i64 108 #define tcg_gen_divu_reg tcg_gen_divu_i64 109 #define tcg_gen_remu_reg tcg_gen_remu_i64 110 #define tcg_gen_discard_reg tcg_gen_discard_i64 111 #define tcg_gen_trunc_reg_i32 tcg_gen_extrl_i64_i32 112 #define tcg_gen_trunc_i64_reg tcg_gen_mov_i64 113 #define tcg_gen_extu_i32_reg tcg_gen_extu_i32_i64 114 #define tcg_gen_ext_i32_reg tcg_gen_ext_i32_i64 115 #define tcg_gen_extu_reg_i64 tcg_gen_mov_i64 116 #define tcg_gen_ext_reg_i64 tcg_gen_mov_i64 117 #define tcg_gen_ext8u_reg tcg_gen_ext8u_i64 118 #define tcg_gen_ext8s_reg tcg_gen_ext8s_i64 119 #define tcg_gen_ext16u_reg tcg_gen_ext16u_i64 120 #define tcg_gen_ext16s_reg tcg_gen_ext16s_i64 121 #define tcg_gen_ext32u_reg tcg_gen_ext32u_i64 122 #define tcg_gen_ext32s_reg tcg_gen_ext32s_i64 123 #define tcg_gen_bswap16_reg tcg_gen_bswap16_i64 124 #define tcg_gen_bswap32_reg tcg_gen_bswap32_i64 125 #define tcg_gen_bswap64_reg tcg_gen_bswap64_i64 126 #define tcg_gen_concat_reg_i64 tcg_gen_concat32_i64 127 #define tcg_gen_andc_reg tcg_gen_andc_i64 128 #define tcg_gen_eqv_reg tcg_gen_eqv_i64 129 #define tcg_gen_nand_reg tcg_gen_nand_i64 130 #define tcg_gen_nor_reg tcg_gen_nor_i64 131 #define tcg_gen_orc_reg tcg_gen_orc_i64 132 #define tcg_gen_clz_reg tcg_gen_clz_i64 133 #define tcg_gen_ctz_reg tcg_gen_ctz_i64 134 #define tcg_gen_clzi_reg tcg_gen_clzi_i64 135 #define tcg_gen_ctzi_reg tcg_gen_ctzi_i64 136 #define tcg_gen_clrsb_reg tcg_gen_clrsb_i64 137 #define tcg_gen_ctpop_reg tcg_gen_ctpop_i64 138 #define tcg_gen_rotl_reg tcg_gen_rotl_i64 139 #define tcg_gen_rotli_reg tcg_gen_rotli_i64 140 #define tcg_gen_rotr_reg tcg_gen_rotr_i64 141 #define tcg_gen_rotri_reg tcg_gen_rotri_i64 142 #define tcg_gen_deposit_reg tcg_gen_deposit_i64 143 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i64 144 #define tcg_gen_extract_reg tcg_gen_extract_i64 145 #define tcg_gen_sextract_reg tcg_gen_sextract_i64 146 #define tcg_const_reg tcg_const_i64 147 #define tcg_const_local_reg tcg_const_local_i64 148 #define tcg_gen_movcond_reg tcg_gen_movcond_i64 149 #define tcg_gen_add2_reg tcg_gen_add2_i64 150 #define tcg_gen_sub2_reg tcg_gen_sub2_i64 151 #define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i64 152 #define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i64 153 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i64 154 #define tcg_gen_trunc_reg_ptr tcg_gen_trunc_i64_ptr 155 #else 156 #define TCGv_reg TCGv_i32 157 #define tcg_temp_new tcg_temp_new_i32 158 #define tcg_global_reg_new tcg_global_reg_new_i32 159 #define tcg_global_mem_new tcg_global_mem_new_i32 160 #define tcg_temp_local_new tcg_temp_local_new_i32 161 #define tcg_temp_free tcg_temp_free_i32 162 163 #define tcg_gen_movi_reg tcg_gen_movi_i32 164 #define tcg_gen_mov_reg tcg_gen_mov_i32 165 #define tcg_gen_ld8u_reg tcg_gen_ld8u_i32 166 #define tcg_gen_ld8s_reg tcg_gen_ld8s_i32 167 #define tcg_gen_ld16u_reg tcg_gen_ld16u_i32 168 #define tcg_gen_ld16s_reg tcg_gen_ld16s_i32 169 #define tcg_gen_ld32u_reg tcg_gen_ld_i32 170 #define tcg_gen_ld32s_reg tcg_gen_ld_i32 171 #define tcg_gen_ld_reg tcg_gen_ld_i32 172 #define tcg_gen_st8_reg tcg_gen_st8_i32 173 #define tcg_gen_st16_reg tcg_gen_st16_i32 174 #define tcg_gen_st32_reg tcg_gen_st32_i32 175 #define tcg_gen_st_reg tcg_gen_st_i32 176 #define tcg_gen_add_reg tcg_gen_add_i32 177 #define tcg_gen_addi_reg tcg_gen_addi_i32 178 #define tcg_gen_sub_reg tcg_gen_sub_i32 179 #define tcg_gen_neg_reg tcg_gen_neg_i32 180 #define tcg_gen_subfi_reg tcg_gen_subfi_i32 181 #define tcg_gen_subi_reg tcg_gen_subi_i32 182 #define tcg_gen_and_reg tcg_gen_and_i32 183 #define tcg_gen_andi_reg tcg_gen_andi_i32 184 #define tcg_gen_or_reg tcg_gen_or_i32 185 #define tcg_gen_ori_reg tcg_gen_ori_i32 186 #define tcg_gen_xor_reg tcg_gen_xor_i32 187 #define tcg_gen_xori_reg tcg_gen_xori_i32 188 #define tcg_gen_not_reg tcg_gen_not_i32 189 #define tcg_gen_shl_reg tcg_gen_shl_i32 190 #define tcg_gen_shli_reg tcg_gen_shli_i32 191 #define tcg_gen_shr_reg tcg_gen_shr_i32 192 #define tcg_gen_shri_reg tcg_gen_shri_i32 193 #define tcg_gen_sar_reg tcg_gen_sar_i32 194 #define tcg_gen_sari_reg tcg_gen_sari_i32 195 #define tcg_gen_brcond_reg tcg_gen_brcond_i32 196 #define tcg_gen_brcondi_reg tcg_gen_brcondi_i32 197 #define tcg_gen_setcond_reg tcg_gen_setcond_i32 198 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i32 199 #define tcg_gen_mul_reg tcg_gen_mul_i32 200 #define tcg_gen_muli_reg tcg_gen_muli_i32 201 #define tcg_gen_div_reg tcg_gen_div_i32 202 #define tcg_gen_rem_reg tcg_gen_rem_i32 203 #define tcg_gen_divu_reg tcg_gen_divu_i32 204 #define tcg_gen_remu_reg tcg_gen_remu_i32 205 #define tcg_gen_discard_reg tcg_gen_discard_i32 206 #define tcg_gen_trunc_reg_i32 tcg_gen_mov_i32 207 #define tcg_gen_trunc_i64_reg tcg_gen_extrl_i64_i32 208 #define tcg_gen_extu_i32_reg tcg_gen_mov_i32 209 #define tcg_gen_ext_i32_reg tcg_gen_mov_i32 210 #define tcg_gen_extu_reg_i64 tcg_gen_extu_i32_i64 211 #define tcg_gen_ext_reg_i64 tcg_gen_ext_i32_i64 212 #define tcg_gen_ext8u_reg tcg_gen_ext8u_i32 213 #define tcg_gen_ext8s_reg tcg_gen_ext8s_i32 214 #define tcg_gen_ext16u_reg tcg_gen_ext16u_i32 215 #define tcg_gen_ext16s_reg tcg_gen_ext16s_i32 216 #define tcg_gen_ext32u_reg tcg_gen_mov_i32 217 #define tcg_gen_ext32s_reg tcg_gen_mov_i32 218 #define tcg_gen_bswap16_reg tcg_gen_bswap16_i32 219 #define tcg_gen_bswap32_reg tcg_gen_bswap32_i32 220 #define tcg_gen_concat_reg_i64 tcg_gen_concat_i32_i64 221 #define tcg_gen_andc_reg tcg_gen_andc_i32 222 #define tcg_gen_eqv_reg tcg_gen_eqv_i32 223 #define tcg_gen_nand_reg tcg_gen_nand_i32 224 #define tcg_gen_nor_reg tcg_gen_nor_i32 225 #define tcg_gen_orc_reg tcg_gen_orc_i32 226 #define tcg_gen_clz_reg tcg_gen_clz_i32 227 #define tcg_gen_ctz_reg tcg_gen_ctz_i32 228 #define tcg_gen_clzi_reg tcg_gen_clzi_i32 229 #define tcg_gen_ctzi_reg tcg_gen_ctzi_i32 230 #define tcg_gen_clrsb_reg tcg_gen_clrsb_i32 231 #define tcg_gen_ctpop_reg tcg_gen_ctpop_i32 232 #define tcg_gen_rotl_reg tcg_gen_rotl_i32 233 #define tcg_gen_rotli_reg tcg_gen_rotli_i32 234 #define tcg_gen_rotr_reg tcg_gen_rotr_i32 235 #define tcg_gen_rotri_reg tcg_gen_rotri_i32 236 #define tcg_gen_deposit_reg tcg_gen_deposit_i32 237 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i32 238 #define tcg_gen_extract_reg tcg_gen_extract_i32 239 #define tcg_gen_sextract_reg tcg_gen_sextract_i32 240 #define tcg_const_reg tcg_const_i32 241 #define tcg_const_local_reg tcg_const_local_i32 242 #define tcg_gen_movcond_reg tcg_gen_movcond_i32 243 #define tcg_gen_add2_reg tcg_gen_add2_i32 244 #define tcg_gen_sub2_reg tcg_gen_sub2_i32 245 #define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i32 246 #define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i32 247 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i32 248 #define tcg_gen_trunc_reg_ptr tcg_gen_ext_i32_ptr 249 #endif /* TARGET_REGISTER_BITS */ 250 251 typedef struct DisasCond { 252 TCGCond c; 253 TCGv_reg a0, a1; 254 bool a0_is_n; 255 bool a1_is_0; 256 } DisasCond; 257 258 typedef struct DisasContext { 259 DisasContextBase base; 260 CPUState *cs; 261 262 target_ureg iaoq_f; 263 target_ureg iaoq_b; 264 target_ureg iaoq_n; 265 TCGv_reg iaoq_n_var; 266 267 int ntempr, ntempl; 268 TCGv_reg tempr[8]; 269 TCGv_tl templ[4]; 270 271 DisasCond null_cond; 272 TCGLabel *null_lab; 273 274 uint32_t insn; 275 uint32_t tb_flags; 276 int mmu_idx; 277 int privilege; 278 bool psw_n_nonzero; 279 } DisasContext; 280 281 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */ 282 static int expand_sm_imm(int val) 283 { 284 if (val & PSW_SM_E) { 285 val = (val & ~PSW_SM_E) | PSW_E; 286 } 287 if (val & PSW_SM_W) { 288 val = (val & ~PSW_SM_W) | PSW_W; 289 } 290 return val; 291 } 292 293 /* Inverted space register indicates 0 means sr0 not inferred from base. */ 294 static int expand_sr3x(int val) 295 { 296 return ~val; 297 } 298 299 /* Convert the M:A bits within a memory insn to the tri-state value 300 we use for the final M. */ 301 static int ma_to_m(int val) 302 { 303 return val & 2 ? (val & 1 ? -1 : 1) : 0; 304 } 305 306 /* Convert the sign of the displacement to a pre or post-modify. */ 307 static int pos_to_m(int val) 308 { 309 return val ? 1 : -1; 310 } 311 312 static int neg_to_m(int val) 313 { 314 return val ? -1 : 1; 315 } 316 317 /* Used for branch targets and fp memory ops. */ 318 static int expand_shl2(int val) 319 { 320 return val << 2; 321 } 322 323 /* Used for fp memory ops. */ 324 static int expand_shl3(int val) 325 { 326 return val << 3; 327 } 328 329 /* Used for assemble_21. */ 330 static int expand_shl11(int val) 331 { 332 return val << 11; 333 } 334 335 336 /* Include the auto-generated decoder. */ 337 #include "decode.inc.c" 338 339 /* We are not using a goto_tb (for whatever reason), but have updated 340 the iaq (for whatever reason), so don't do it again on exit. */ 341 #define DISAS_IAQ_N_UPDATED DISAS_TARGET_0 342 343 /* We are exiting the TB, but have neither emitted a goto_tb, nor 344 updated the iaq for the next instruction to be executed. */ 345 #define DISAS_IAQ_N_STALE DISAS_TARGET_1 346 347 /* Similarly, but we want to return to the main loop immediately 348 to recognize unmasked interrupts. */ 349 #define DISAS_IAQ_N_STALE_EXIT DISAS_TARGET_2 350 351 /* global register indexes */ 352 static TCGv_reg cpu_gr[32]; 353 static TCGv_i64 cpu_sr[4]; 354 static TCGv_i64 cpu_srH; 355 static TCGv_reg cpu_iaoq_f; 356 static TCGv_reg cpu_iaoq_b; 357 static TCGv_i64 cpu_iasq_f; 358 static TCGv_i64 cpu_iasq_b; 359 static TCGv_reg cpu_sar; 360 static TCGv_reg cpu_psw_n; 361 static TCGv_reg cpu_psw_v; 362 static TCGv_reg cpu_psw_cb; 363 static TCGv_reg cpu_psw_cb_msb; 364 365 #include "exec/gen-icount.h" 366 367 void hppa_translate_init(void) 368 { 369 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) } 370 371 typedef struct { TCGv_reg *var; const char *name; int ofs; } GlobalVar; 372 static const GlobalVar vars[] = { 373 { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) }, 374 DEF_VAR(psw_n), 375 DEF_VAR(psw_v), 376 DEF_VAR(psw_cb), 377 DEF_VAR(psw_cb_msb), 378 DEF_VAR(iaoq_f), 379 DEF_VAR(iaoq_b), 380 }; 381 382 #undef DEF_VAR 383 384 /* Use the symbolic register names that match the disassembler. */ 385 static const char gr_names[32][4] = { 386 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", 387 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", 388 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", 389 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31" 390 }; 391 /* SR[4-7] are not global registers so that we can index them. */ 392 static const char sr_names[5][4] = { 393 "sr0", "sr1", "sr2", "sr3", "srH" 394 }; 395 396 int i; 397 398 cpu_gr[0] = NULL; 399 for (i = 1; i < 32; i++) { 400 cpu_gr[i] = tcg_global_mem_new(cpu_env, 401 offsetof(CPUHPPAState, gr[i]), 402 gr_names[i]); 403 } 404 for (i = 0; i < 4; i++) { 405 cpu_sr[i] = tcg_global_mem_new_i64(cpu_env, 406 offsetof(CPUHPPAState, sr[i]), 407 sr_names[i]); 408 } 409 cpu_srH = tcg_global_mem_new_i64(cpu_env, 410 offsetof(CPUHPPAState, sr[4]), 411 sr_names[4]); 412 413 for (i = 0; i < ARRAY_SIZE(vars); ++i) { 414 const GlobalVar *v = &vars[i]; 415 *v->var = tcg_global_mem_new(cpu_env, v->ofs, v->name); 416 } 417 418 cpu_iasq_f = tcg_global_mem_new_i64(cpu_env, 419 offsetof(CPUHPPAState, iasq_f), 420 "iasq_f"); 421 cpu_iasq_b = tcg_global_mem_new_i64(cpu_env, 422 offsetof(CPUHPPAState, iasq_b), 423 "iasq_b"); 424 } 425 426 static DisasCond cond_make_f(void) 427 { 428 return (DisasCond){ 429 .c = TCG_COND_NEVER, 430 .a0 = NULL, 431 .a1 = NULL, 432 }; 433 } 434 435 static DisasCond cond_make_t(void) 436 { 437 return (DisasCond){ 438 .c = TCG_COND_ALWAYS, 439 .a0 = NULL, 440 .a1 = NULL, 441 }; 442 } 443 444 static DisasCond cond_make_n(void) 445 { 446 return (DisasCond){ 447 .c = TCG_COND_NE, 448 .a0 = cpu_psw_n, 449 .a0_is_n = true, 450 .a1 = NULL, 451 .a1_is_0 = true 452 }; 453 } 454 455 static DisasCond cond_make_0_tmp(TCGCond c, TCGv_reg a0) 456 { 457 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS); 458 return (DisasCond){ 459 .c = c, .a0 = a0, .a1_is_0 = true 460 }; 461 } 462 463 static DisasCond cond_make_0(TCGCond c, TCGv_reg a0) 464 { 465 TCGv_reg tmp = tcg_temp_new(); 466 tcg_gen_mov_reg(tmp, a0); 467 return cond_make_0_tmp(c, tmp); 468 } 469 470 static DisasCond cond_make(TCGCond c, TCGv_reg a0, TCGv_reg a1) 471 { 472 DisasCond r = { .c = c }; 473 474 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS); 475 r.a0 = tcg_temp_new(); 476 tcg_gen_mov_reg(r.a0, a0); 477 r.a1 = tcg_temp_new(); 478 tcg_gen_mov_reg(r.a1, a1); 479 480 return r; 481 } 482 483 static void cond_prep(DisasCond *cond) 484 { 485 if (cond->a1_is_0) { 486 cond->a1_is_0 = false; 487 cond->a1 = tcg_const_reg(0); 488 } 489 } 490 491 static void cond_free(DisasCond *cond) 492 { 493 switch (cond->c) { 494 default: 495 if (!cond->a0_is_n) { 496 tcg_temp_free(cond->a0); 497 } 498 if (!cond->a1_is_0) { 499 tcg_temp_free(cond->a1); 500 } 501 cond->a0_is_n = false; 502 cond->a1_is_0 = false; 503 cond->a0 = NULL; 504 cond->a1 = NULL; 505 /* fallthru */ 506 case TCG_COND_ALWAYS: 507 cond->c = TCG_COND_NEVER; 508 break; 509 case TCG_COND_NEVER: 510 break; 511 } 512 } 513 514 static TCGv_reg get_temp(DisasContext *ctx) 515 { 516 unsigned i = ctx->ntempr++; 517 g_assert(i < ARRAY_SIZE(ctx->tempr)); 518 return ctx->tempr[i] = tcg_temp_new(); 519 } 520 521 #ifndef CONFIG_USER_ONLY 522 static TCGv_tl get_temp_tl(DisasContext *ctx) 523 { 524 unsigned i = ctx->ntempl++; 525 g_assert(i < ARRAY_SIZE(ctx->templ)); 526 return ctx->templ[i] = tcg_temp_new_tl(); 527 } 528 #endif 529 530 static TCGv_reg load_const(DisasContext *ctx, target_sreg v) 531 { 532 TCGv_reg t = get_temp(ctx); 533 tcg_gen_movi_reg(t, v); 534 return t; 535 } 536 537 static TCGv_reg load_gpr(DisasContext *ctx, unsigned reg) 538 { 539 if (reg == 0) { 540 TCGv_reg t = get_temp(ctx); 541 tcg_gen_movi_reg(t, 0); 542 return t; 543 } else { 544 return cpu_gr[reg]; 545 } 546 } 547 548 static TCGv_reg dest_gpr(DisasContext *ctx, unsigned reg) 549 { 550 if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) { 551 return get_temp(ctx); 552 } else { 553 return cpu_gr[reg]; 554 } 555 } 556 557 static void save_or_nullify(DisasContext *ctx, TCGv_reg dest, TCGv_reg t) 558 { 559 if (ctx->null_cond.c != TCG_COND_NEVER) { 560 cond_prep(&ctx->null_cond); 561 tcg_gen_movcond_reg(ctx->null_cond.c, dest, ctx->null_cond.a0, 562 ctx->null_cond.a1, dest, t); 563 } else { 564 tcg_gen_mov_reg(dest, t); 565 } 566 } 567 568 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_reg t) 569 { 570 if (reg != 0) { 571 save_or_nullify(ctx, cpu_gr[reg], t); 572 } 573 } 574 575 #ifdef HOST_WORDS_BIGENDIAN 576 # define HI_OFS 0 577 # define LO_OFS 4 578 #else 579 # define HI_OFS 4 580 # define LO_OFS 0 581 #endif 582 583 static TCGv_i32 load_frw_i32(unsigned rt) 584 { 585 TCGv_i32 ret = tcg_temp_new_i32(); 586 tcg_gen_ld_i32(ret, cpu_env, 587 offsetof(CPUHPPAState, fr[rt & 31]) 588 + (rt & 32 ? LO_OFS : HI_OFS)); 589 return ret; 590 } 591 592 static TCGv_i32 load_frw0_i32(unsigned rt) 593 { 594 if (rt == 0) { 595 return tcg_const_i32(0); 596 } else { 597 return load_frw_i32(rt); 598 } 599 } 600 601 static TCGv_i64 load_frw0_i64(unsigned rt) 602 { 603 if (rt == 0) { 604 return tcg_const_i64(0); 605 } else { 606 TCGv_i64 ret = tcg_temp_new_i64(); 607 tcg_gen_ld32u_i64(ret, cpu_env, 608 offsetof(CPUHPPAState, fr[rt & 31]) 609 + (rt & 32 ? LO_OFS : HI_OFS)); 610 return ret; 611 } 612 } 613 614 static void save_frw_i32(unsigned rt, TCGv_i32 val) 615 { 616 tcg_gen_st_i32(val, cpu_env, 617 offsetof(CPUHPPAState, fr[rt & 31]) 618 + (rt & 32 ? LO_OFS : HI_OFS)); 619 } 620 621 #undef HI_OFS 622 #undef LO_OFS 623 624 static TCGv_i64 load_frd(unsigned rt) 625 { 626 TCGv_i64 ret = tcg_temp_new_i64(); 627 tcg_gen_ld_i64(ret, cpu_env, offsetof(CPUHPPAState, fr[rt])); 628 return ret; 629 } 630 631 static TCGv_i64 load_frd0(unsigned rt) 632 { 633 if (rt == 0) { 634 return tcg_const_i64(0); 635 } else { 636 return load_frd(rt); 637 } 638 } 639 640 static void save_frd(unsigned rt, TCGv_i64 val) 641 { 642 tcg_gen_st_i64(val, cpu_env, offsetof(CPUHPPAState, fr[rt])); 643 } 644 645 static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg) 646 { 647 #ifdef CONFIG_USER_ONLY 648 tcg_gen_movi_i64(dest, 0); 649 #else 650 if (reg < 4) { 651 tcg_gen_mov_i64(dest, cpu_sr[reg]); 652 } else if (ctx->tb_flags & TB_FLAG_SR_SAME) { 653 tcg_gen_mov_i64(dest, cpu_srH); 654 } else { 655 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUHPPAState, sr[reg])); 656 } 657 #endif 658 } 659 660 /* Skip over the implementation of an insn that has been nullified. 661 Use this when the insn is too complex for a conditional move. */ 662 static void nullify_over(DisasContext *ctx) 663 { 664 if (ctx->null_cond.c != TCG_COND_NEVER) { 665 /* The always condition should have been handled in the main loop. */ 666 assert(ctx->null_cond.c != TCG_COND_ALWAYS); 667 668 ctx->null_lab = gen_new_label(); 669 cond_prep(&ctx->null_cond); 670 671 /* If we're using PSW[N], copy it to a temp because... */ 672 if (ctx->null_cond.a0_is_n) { 673 ctx->null_cond.a0_is_n = false; 674 ctx->null_cond.a0 = tcg_temp_new(); 675 tcg_gen_mov_reg(ctx->null_cond.a0, cpu_psw_n); 676 } 677 /* ... we clear it before branching over the implementation, 678 so that (1) it's clear after nullifying this insn and 679 (2) if this insn nullifies the next, PSW[N] is valid. */ 680 if (ctx->psw_n_nonzero) { 681 ctx->psw_n_nonzero = false; 682 tcg_gen_movi_reg(cpu_psw_n, 0); 683 } 684 685 tcg_gen_brcond_reg(ctx->null_cond.c, ctx->null_cond.a0, 686 ctx->null_cond.a1, ctx->null_lab); 687 cond_free(&ctx->null_cond); 688 } 689 } 690 691 /* Save the current nullification state to PSW[N]. */ 692 static void nullify_save(DisasContext *ctx) 693 { 694 if (ctx->null_cond.c == TCG_COND_NEVER) { 695 if (ctx->psw_n_nonzero) { 696 tcg_gen_movi_reg(cpu_psw_n, 0); 697 } 698 return; 699 } 700 if (!ctx->null_cond.a0_is_n) { 701 cond_prep(&ctx->null_cond); 702 tcg_gen_setcond_reg(ctx->null_cond.c, cpu_psw_n, 703 ctx->null_cond.a0, ctx->null_cond.a1); 704 ctx->psw_n_nonzero = true; 705 } 706 cond_free(&ctx->null_cond); 707 } 708 709 /* Set a PSW[N] to X. The intention is that this is used immediately 710 before a goto_tb/exit_tb, so that there is no fallthru path to other 711 code within the TB. Therefore we do not update psw_n_nonzero. */ 712 static void nullify_set(DisasContext *ctx, bool x) 713 { 714 if (ctx->psw_n_nonzero || x) { 715 tcg_gen_movi_reg(cpu_psw_n, x); 716 } 717 } 718 719 /* Mark the end of an instruction that may have been nullified. 720 This is the pair to nullify_over. Always returns true so that 721 it may be tail-called from a translate function. */ 722 static bool nullify_end(DisasContext *ctx) 723 { 724 TCGLabel *null_lab = ctx->null_lab; 725 DisasJumpType status = ctx->base.is_jmp; 726 727 /* For NEXT, NORETURN, STALE, we can easily continue (or exit). 728 For UPDATED, we cannot update on the nullified path. */ 729 assert(status != DISAS_IAQ_N_UPDATED); 730 731 if (likely(null_lab == NULL)) { 732 /* The current insn wasn't conditional or handled the condition 733 applied to it without a branch, so the (new) setting of 734 NULL_COND can be applied directly to the next insn. */ 735 return true; 736 } 737 ctx->null_lab = NULL; 738 739 if (likely(ctx->null_cond.c == TCG_COND_NEVER)) { 740 /* The next instruction will be unconditional, 741 and NULL_COND already reflects that. */ 742 gen_set_label(null_lab); 743 } else { 744 /* The insn that we just executed is itself nullifying the next 745 instruction. Store the condition in the PSW[N] global. 746 We asserted PSW[N] = 0 in nullify_over, so that after the 747 label we have the proper value in place. */ 748 nullify_save(ctx); 749 gen_set_label(null_lab); 750 ctx->null_cond = cond_make_n(); 751 } 752 if (status == DISAS_NORETURN) { 753 ctx->base.is_jmp = DISAS_NEXT; 754 } 755 return true; 756 } 757 758 static void copy_iaoq_entry(TCGv_reg dest, target_ureg ival, TCGv_reg vval) 759 { 760 if (unlikely(ival == -1)) { 761 tcg_gen_mov_reg(dest, vval); 762 } else { 763 tcg_gen_movi_reg(dest, ival); 764 } 765 } 766 767 static inline target_ureg iaoq_dest(DisasContext *ctx, target_sreg disp) 768 { 769 return ctx->iaoq_f + disp + 8; 770 } 771 772 static void gen_excp_1(int exception) 773 { 774 TCGv_i32 t = tcg_const_i32(exception); 775 gen_helper_excp(cpu_env, t); 776 tcg_temp_free_i32(t); 777 } 778 779 static void gen_excp(DisasContext *ctx, int exception) 780 { 781 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f); 782 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b); 783 nullify_save(ctx); 784 gen_excp_1(exception); 785 ctx->base.is_jmp = DISAS_NORETURN; 786 } 787 788 static bool gen_excp_iir(DisasContext *ctx, int exc) 789 { 790 TCGv_reg tmp; 791 792 nullify_over(ctx); 793 tmp = tcg_const_reg(ctx->insn); 794 tcg_gen_st_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[CR_IIR])); 795 tcg_temp_free(tmp); 796 gen_excp(ctx, exc); 797 return nullify_end(ctx); 798 } 799 800 static bool gen_illegal(DisasContext *ctx) 801 { 802 return gen_excp_iir(ctx, EXCP_ILL); 803 } 804 805 #ifdef CONFIG_USER_ONLY 806 #define CHECK_MOST_PRIVILEGED(EXCP) \ 807 return gen_excp_iir(ctx, EXCP) 808 #else 809 #define CHECK_MOST_PRIVILEGED(EXCP) \ 810 do { \ 811 if (ctx->privilege != 0) { \ 812 return gen_excp_iir(ctx, EXCP); \ 813 } \ 814 } while (0) 815 #endif 816 817 static bool use_goto_tb(DisasContext *ctx, target_ureg dest) 818 { 819 /* Suppress goto_tb for page crossing, IO, or single-steping. */ 820 return !(((ctx->base.pc_first ^ dest) & TARGET_PAGE_MASK) 821 || (tb_cflags(ctx->base.tb) & CF_LAST_IO) 822 || ctx->base.singlestep_enabled); 823 } 824 825 /* If the next insn is to be nullified, and it's on the same page, 826 and we're not attempting to set a breakpoint on it, then we can 827 totally skip the nullified insn. This avoids creating and 828 executing a TB that merely branches to the next TB. */ 829 static bool use_nullify_skip(DisasContext *ctx) 830 { 831 return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0 832 && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY)); 833 } 834 835 static void gen_goto_tb(DisasContext *ctx, int which, 836 target_ureg f, target_ureg b) 837 { 838 if (f != -1 && b != -1 && use_goto_tb(ctx, f)) { 839 tcg_gen_goto_tb(which); 840 tcg_gen_movi_reg(cpu_iaoq_f, f); 841 tcg_gen_movi_reg(cpu_iaoq_b, b); 842 tcg_gen_exit_tb(ctx->base.tb, which); 843 } else { 844 copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b); 845 copy_iaoq_entry(cpu_iaoq_b, b, ctx->iaoq_n_var); 846 if (ctx->base.singlestep_enabled) { 847 gen_excp_1(EXCP_DEBUG); 848 } else { 849 tcg_gen_lookup_and_goto_ptr(); 850 } 851 } 852 } 853 854 static bool cond_need_sv(int c) 855 { 856 return c == 2 || c == 3 || c == 6; 857 } 858 859 static bool cond_need_cb(int c) 860 { 861 return c == 4 || c == 5; 862 } 863 864 /* 865 * Compute conditional for arithmetic. See Page 5-3, Table 5-1, of 866 * the Parisc 1.1 Architecture Reference Manual for details. 867 */ 868 869 static DisasCond do_cond(unsigned cf, TCGv_reg res, 870 TCGv_reg cb_msb, TCGv_reg sv) 871 { 872 DisasCond cond; 873 TCGv_reg tmp; 874 875 switch (cf >> 1) { 876 case 0: /* Never / TR (0 / 1) */ 877 cond = cond_make_f(); 878 break; 879 case 1: /* = / <> (Z / !Z) */ 880 cond = cond_make_0(TCG_COND_EQ, res); 881 break; 882 case 2: /* < / >= (N ^ V / !(N ^ V) */ 883 tmp = tcg_temp_new(); 884 tcg_gen_xor_reg(tmp, res, sv); 885 cond = cond_make_0_tmp(TCG_COND_LT, tmp); 886 break; 887 case 3: /* <= / > (N ^ V) | Z / !((N ^ V) | Z) */ 888 /* 889 * Simplify: 890 * (N ^ V) | Z 891 * ((res < 0) ^ (sv < 0)) | !res 892 * ((res ^ sv) < 0) | !res 893 * (~(res ^ sv) >= 0) | !res 894 * !(~(res ^ sv) >> 31) | !res 895 * !(~(res ^ sv) >> 31 & res) 896 */ 897 tmp = tcg_temp_new(); 898 tcg_gen_eqv_reg(tmp, res, sv); 899 tcg_gen_sari_reg(tmp, tmp, TARGET_REGISTER_BITS - 1); 900 tcg_gen_and_reg(tmp, tmp, res); 901 cond = cond_make_0_tmp(TCG_COND_EQ, tmp); 902 break; 903 case 4: /* NUV / UV (!C / C) */ 904 cond = cond_make_0(TCG_COND_EQ, cb_msb); 905 break; 906 case 5: /* ZNV / VNZ (!C | Z / C & !Z) */ 907 tmp = tcg_temp_new(); 908 tcg_gen_neg_reg(tmp, cb_msb); 909 tcg_gen_and_reg(tmp, tmp, res); 910 cond = cond_make_0_tmp(TCG_COND_EQ, tmp); 911 break; 912 case 6: /* SV / NSV (V / !V) */ 913 cond = cond_make_0(TCG_COND_LT, sv); 914 break; 915 case 7: /* OD / EV */ 916 tmp = tcg_temp_new(); 917 tcg_gen_andi_reg(tmp, res, 1); 918 cond = cond_make_0_tmp(TCG_COND_NE, tmp); 919 break; 920 default: 921 g_assert_not_reached(); 922 } 923 if (cf & 1) { 924 cond.c = tcg_invert_cond(cond.c); 925 } 926 927 return cond; 928 } 929 930 /* Similar, but for the special case of subtraction without borrow, we 931 can use the inputs directly. This can allow other computation to be 932 deleted as unused. */ 933 934 static DisasCond do_sub_cond(unsigned cf, TCGv_reg res, 935 TCGv_reg in1, TCGv_reg in2, TCGv_reg sv) 936 { 937 DisasCond cond; 938 939 switch (cf >> 1) { 940 case 1: /* = / <> */ 941 cond = cond_make(TCG_COND_EQ, in1, in2); 942 break; 943 case 2: /* < / >= */ 944 cond = cond_make(TCG_COND_LT, in1, in2); 945 break; 946 case 3: /* <= / > */ 947 cond = cond_make(TCG_COND_LE, in1, in2); 948 break; 949 case 4: /* << / >>= */ 950 cond = cond_make(TCG_COND_LTU, in1, in2); 951 break; 952 case 5: /* <<= / >> */ 953 cond = cond_make(TCG_COND_LEU, in1, in2); 954 break; 955 default: 956 return do_cond(cf, res, NULL, sv); 957 } 958 if (cf & 1) { 959 cond.c = tcg_invert_cond(cond.c); 960 } 961 962 return cond; 963 } 964 965 /* 966 * Similar, but for logicals, where the carry and overflow bits are not 967 * computed, and use of them is undefined. 968 * 969 * Undefined or not, hardware does not trap. It seems reasonable to 970 * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's 971 * how cases c={2,3} are treated. 972 */ 973 974 static DisasCond do_log_cond(unsigned cf, TCGv_reg res) 975 { 976 switch (cf) { 977 case 0: /* never */ 978 case 9: /* undef, C */ 979 case 11: /* undef, C & !Z */ 980 case 12: /* undef, V */ 981 return cond_make_f(); 982 983 case 1: /* true */ 984 case 8: /* undef, !C */ 985 case 10: /* undef, !C | Z */ 986 case 13: /* undef, !V */ 987 return cond_make_t(); 988 989 case 2: /* == */ 990 return cond_make_0(TCG_COND_EQ, res); 991 case 3: /* <> */ 992 return cond_make_0(TCG_COND_NE, res); 993 case 4: /* < */ 994 return cond_make_0(TCG_COND_LT, res); 995 case 5: /* >= */ 996 return cond_make_0(TCG_COND_GE, res); 997 case 6: /* <= */ 998 return cond_make_0(TCG_COND_LE, res); 999 case 7: /* > */ 1000 return cond_make_0(TCG_COND_GT, res); 1001 1002 case 14: /* OD */ 1003 case 15: /* EV */ 1004 return do_cond(cf, res, NULL, NULL); 1005 1006 default: 1007 g_assert_not_reached(); 1008 } 1009 } 1010 1011 /* Similar, but for shift/extract/deposit conditions. */ 1012 1013 static DisasCond do_sed_cond(unsigned orig, TCGv_reg res) 1014 { 1015 unsigned c, f; 1016 1017 /* Convert the compressed condition codes to standard. 1018 0-2 are the same as logicals (nv,<,<=), while 3 is OD. 1019 4-7 are the reverse of 0-3. */ 1020 c = orig & 3; 1021 if (c == 3) { 1022 c = 7; 1023 } 1024 f = (orig & 4) / 4; 1025 1026 return do_log_cond(c * 2 + f, res); 1027 } 1028 1029 /* Similar, but for unit conditions. */ 1030 1031 static DisasCond do_unit_cond(unsigned cf, TCGv_reg res, 1032 TCGv_reg in1, TCGv_reg in2) 1033 { 1034 DisasCond cond; 1035 TCGv_reg tmp, cb = NULL; 1036 1037 if (cf & 8) { 1038 /* Since we want to test lots of carry-out bits all at once, do not 1039 * do our normal thing and compute carry-in of bit B+1 since that 1040 * leaves us with carry bits spread across two words. 1041 */ 1042 cb = tcg_temp_new(); 1043 tmp = tcg_temp_new(); 1044 tcg_gen_or_reg(cb, in1, in2); 1045 tcg_gen_and_reg(tmp, in1, in2); 1046 tcg_gen_andc_reg(cb, cb, res); 1047 tcg_gen_or_reg(cb, cb, tmp); 1048 tcg_temp_free(tmp); 1049 } 1050 1051 switch (cf >> 1) { 1052 case 0: /* never / TR */ 1053 case 1: /* undefined */ 1054 case 5: /* undefined */ 1055 cond = cond_make_f(); 1056 break; 1057 1058 case 2: /* SBZ / NBZ */ 1059 /* See hasless(v,1) from 1060 * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord 1061 */ 1062 tmp = tcg_temp_new(); 1063 tcg_gen_subi_reg(tmp, res, 0x01010101u); 1064 tcg_gen_andc_reg(tmp, tmp, res); 1065 tcg_gen_andi_reg(tmp, tmp, 0x80808080u); 1066 cond = cond_make_0(TCG_COND_NE, tmp); 1067 tcg_temp_free(tmp); 1068 break; 1069 1070 case 3: /* SHZ / NHZ */ 1071 tmp = tcg_temp_new(); 1072 tcg_gen_subi_reg(tmp, res, 0x00010001u); 1073 tcg_gen_andc_reg(tmp, tmp, res); 1074 tcg_gen_andi_reg(tmp, tmp, 0x80008000u); 1075 cond = cond_make_0(TCG_COND_NE, tmp); 1076 tcg_temp_free(tmp); 1077 break; 1078 1079 case 4: /* SDC / NDC */ 1080 tcg_gen_andi_reg(cb, cb, 0x88888888u); 1081 cond = cond_make_0(TCG_COND_NE, cb); 1082 break; 1083 1084 case 6: /* SBC / NBC */ 1085 tcg_gen_andi_reg(cb, cb, 0x80808080u); 1086 cond = cond_make_0(TCG_COND_NE, cb); 1087 break; 1088 1089 case 7: /* SHC / NHC */ 1090 tcg_gen_andi_reg(cb, cb, 0x80008000u); 1091 cond = cond_make_0(TCG_COND_NE, cb); 1092 break; 1093 1094 default: 1095 g_assert_not_reached(); 1096 } 1097 if (cf & 8) { 1098 tcg_temp_free(cb); 1099 } 1100 if (cf & 1) { 1101 cond.c = tcg_invert_cond(cond.c); 1102 } 1103 1104 return cond; 1105 } 1106 1107 /* Compute signed overflow for addition. */ 1108 static TCGv_reg do_add_sv(DisasContext *ctx, TCGv_reg res, 1109 TCGv_reg in1, TCGv_reg in2) 1110 { 1111 TCGv_reg sv = get_temp(ctx); 1112 TCGv_reg tmp = tcg_temp_new(); 1113 1114 tcg_gen_xor_reg(sv, res, in1); 1115 tcg_gen_xor_reg(tmp, in1, in2); 1116 tcg_gen_andc_reg(sv, sv, tmp); 1117 tcg_temp_free(tmp); 1118 1119 return sv; 1120 } 1121 1122 /* Compute signed overflow for subtraction. */ 1123 static TCGv_reg do_sub_sv(DisasContext *ctx, TCGv_reg res, 1124 TCGv_reg in1, TCGv_reg in2) 1125 { 1126 TCGv_reg sv = get_temp(ctx); 1127 TCGv_reg tmp = tcg_temp_new(); 1128 1129 tcg_gen_xor_reg(sv, res, in1); 1130 tcg_gen_xor_reg(tmp, in1, in2); 1131 tcg_gen_and_reg(sv, sv, tmp); 1132 tcg_temp_free(tmp); 1133 1134 return sv; 1135 } 1136 1137 static void do_add(DisasContext *ctx, unsigned rt, TCGv_reg in1, 1138 TCGv_reg in2, unsigned shift, bool is_l, 1139 bool is_tsv, bool is_tc, bool is_c, unsigned cf) 1140 { 1141 TCGv_reg dest, cb, cb_msb, sv, tmp; 1142 unsigned c = cf >> 1; 1143 DisasCond cond; 1144 1145 dest = tcg_temp_new(); 1146 cb = NULL; 1147 cb_msb = NULL; 1148 1149 if (shift) { 1150 tmp = get_temp(ctx); 1151 tcg_gen_shli_reg(tmp, in1, shift); 1152 in1 = tmp; 1153 } 1154 1155 if (!is_l || cond_need_cb(c)) { 1156 TCGv_reg zero = tcg_const_reg(0); 1157 cb_msb = get_temp(ctx); 1158 tcg_gen_add2_reg(dest, cb_msb, in1, zero, in2, zero); 1159 if (is_c) { 1160 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cpu_psw_cb_msb, zero); 1161 } 1162 tcg_temp_free(zero); 1163 if (!is_l) { 1164 cb = get_temp(ctx); 1165 tcg_gen_xor_reg(cb, in1, in2); 1166 tcg_gen_xor_reg(cb, cb, dest); 1167 } 1168 } else { 1169 tcg_gen_add_reg(dest, in1, in2); 1170 if (is_c) { 1171 tcg_gen_add_reg(dest, dest, cpu_psw_cb_msb); 1172 } 1173 } 1174 1175 /* Compute signed overflow if required. */ 1176 sv = NULL; 1177 if (is_tsv || cond_need_sv(c)) { 1178 sv = do_add_sv(ctx, dest, in1, in2); 1179 if (is_tsv) { 1180 /* ??? Need to include overflow from shift. */ 1181 gen_helper_tsv(cpu_env, sv); 1182 } 1183 } 1184 1185 /* Emit any conditional trap before any writeback. */ 1186 cond = do_cond(cf, dest, cb_msb, sv); 1187 if (is_tc) { 1188 cond_prep(&cond); 1189 tmp = tcg_temp_new(); 1190 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1); 1191 gen_helper_tcond(cpu_env, tmp); 1192 tcg_temp_free(tmp); 1193 } 1194 1195 /* Write back the result. */ 1196 if (!is_l) { 1197 save_or_nullify(ctx, cpu_psw_cb, cb); 1198 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb); 1199 } 1200 save_gpr(ctx, rt, dest); 1201 tcg_temp_free(dest); 1202 1203 /* Install the new nullification. */ 1204 cond_free(&ctx->null_cond); 1205 ctx->null_cond = cond; 1206 } 1207 1208 static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_sh *a, 1209 bool is_l, bool is_tsv, bool is_tc, bool is_c) 1210 { 1211 TCGv_reg tcg_r1, tcg_r2; 1212 1213 if (a->cf) { 1214 nullify_over(ctx); 1215 } 1216 tcg_r1 = load_gpr(ctx, a->r1); 1217 tcg_r2 = load_gpr(ctx, a->r2); 1218 do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l, is_tsv, is_tc, is_c, a->cf); 1219 return nullify_end(ctx); 1220 } 1221 1222 static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a, 1223 bool is_tsv, bool is_tc) 1224 { 1225 TCGv_reg tcg_im, tcg_r2; 1226 1227 if (a->cf) { 1228 nullify_over(ctx); 1229 } 1230 tcg_im = load_const(ctx, a->i); 1231 tcg_r2 = load_gpr(ctx, a->r); 1232 do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf); 1233 return nullify_end(ctx); 1234 } 1235 1236 static void do_sub(DisasContext *ctx, unsigned rt, TCGv_reg in1, 1237 TCGv_reg in2, bool is_tsv, bool is_b, 1238 bool is_tc, unsigned cf) 1239 { 1240 TCGv_reg dest, sv, cb, cb_msb, zero, tmp; 1241 unsigned c = cf >> 1; 1242 DisasCond cond; 1243 1244 dest = tcg_temp_new(); 1245 cb = tcg_temp_new(); 1246 cb_msb = tcg_temp_new(); 1247 1248 zero = tcg_const_reg(0); 1249 if (is_b) { 1250 /* DEST,C = IN1 + ~IN2 + C. */ 1251 tcg_gen_not_reg(cb, in2); 1252 tcg_gen_add2_reg(dest, cb_msb, in1, zero, cpu_psw_cb_msb, zero); 1253 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cb, zero); 1254 tcg_gen_xor_reg(cb, cb, in1); 1255 tcg_gen_xor_reg(cb, cb, dest); 1256 } else { 1257 /* DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer 1258 operations by seeding the high word with 1 and subtracting. */ 1259 tcg_gen_movi_reg(cb_msb, 1); 1260 tcg_gen_sub2_reg(dest, cb_msb, in1, cb_msb, in2, zero); 1261 tcg_gen_eqv_reg(cb, in1, in2); 1262 tcg_gen_xor_reg(cb, cb, dest); 1263 } 1264 tcg_temp_free(zero); 1265 1266 /* Compute signed overflow if required. */ 1267 sv = NULL; 1268 if (is_tsv || cond_need_sv(c)) { 1269 sv = do_sub_sv(ctx, dest, in1, in2); 1270 if (is_tsv) { 1271 gen_helper_tsv(cpu_env, sv); 1272 } 1273 } 1274 1275 /* Compute the condition. We cannot use the special case for borrow. */ 1276 if (!is_b) { 1277 cond = do_sub_cond(cf, dest, in1, in2, sv); 1278 } else { 1279 cond = do_cond(cf, dest, cb_msb, sv); 1280 } 1281 1282 /* Emit any conditional trap before any writeback. */ 1283 if (is_tc) { 1284 cond_prep(&cond); 1285 tmp = tcg_temp_new(); 1286 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1); 1287 gen_helper_tcond(cpu_env, tmp); 1288 tcg_temp_free(tmp); 1289 } 1290 1291 /* Write back the result. */ 1292 save_or_nullify(ctx, cpu_psw_cb, cb); 1293 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb); 1294 save_gpr(ctx, rt, dest); 1295 tcg_temp_free(dest); 1296 1297 /* Install the new nullification. */ 1298 cond_free(&ctx->null_cond); 1299 ctx->null_cond = cond; 1300 } 1301 1302 static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf *a, 1303 bool is_tsv, bool is_b, bool is_tc) 1304 { 1305 TCGv_reg tcg_r1, tcg_r2; 1306 1307 if (a->cf) { 1308 nullify_over(ctx); 1309 } 1310 tcg_r1 = load_gpr(ctx, a->r1); 1311 tcg_r2 = load_gpr(ctx, a->r2); 1312 do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf); 1313 return nullify_end(ctx); 1314 } 1315 1316 static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv) 1317 { 1318 TCGv_reg tcg_im, tcg_r2; 1319 1320 if (a->cf) { 1321 nullify_over(ctx); 1322 } 1323 tcg_im = load_const(ctx, a->i); 1324 tcg_r2 = load_gpr(ctx, a->r); 1325 do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf); 1326 return nullify_end(ctx); 1327 } 1328 1329 static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_reg in1, 1330 TCGv_reg in2, unsigned cf) 1331 { 1332 TCGv_reg dest, sv; 1333 DisasCond cond; 1334 1335 dest = tcg_temp_new(); 1336 tcg_gen_sub_reg(dest, in1, in2); 1337 1338 /* Compute signed overflow if required. */ 1339 sv = NULL; 1340 if (cond_need_sv(cf >> 1)) { 1341 sv = do_sub_sv(ctx, dest, in1, in2); 1342 } 1343 1344 /* Form the condition for the compare. */ 1345 cond = do_sub_cond(cf, dest, in1, in2, sv); 1346 1347 /* Clear. */ 1348 tcg_gen_movi_reg(dest, 0); 1349 save_gpr(ctx, rt, dest); 1350 tcg_temp_free(dest); 1351 1352 /* Install the new nullification. */ 1353 cond_free(&ctx->null_cond); 1354 ctx->null_cond = cond; 1355 } 1356 1357 static void do_log(DisasContext *ctx, unsigned rt, TCGv_reg in1, 1358 TCGv_reg in2, unsigned cf, 1359 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg)) 1360 { 1361 TCGv_reg dest = dest_gpr(ctx, rt); 1362 1363 /* Perform the operation, and writeback. */ 1364 fn(dest, in1, in2); 1365 save_gpr(ctx, rt, dest); 1366 1367 /* Install the new nullification. */ 1368 cond_free(&ctx->null_cond); 1369 if (cf) { 1370 ctx->null_cond = do_log_cond(cf, dest); 1371 } 1372 } 1373 1374 static bool do_log_reg(DisasContext *ctx, arg_rrr_cf *a, 1375 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg)) 1376 { 1377 TCGv_reg tcg_r1, tcg_r2; 1378 1379 if (a->cf) { 1380 nullify_over(ctx); 1381 } 1382 tcg_r1 = load_gpr(ctx, a->r1); 1383 tcg_r2 = load_gpr(ctx, a->r2); 1384 do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, fn); 1385 return nullify_end(ctx); 1386 } 1387 1388 static void do_unit(DisasContext *ctx, unsigned rt, TCGv_reg in1, 1389 TCGv_reg in2, unsigned cf, bool is_tc, 1390 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg)) 1391 { 1392 TCGv_reg dest; 1393 DisasCond cond; 1394 1395 if (cf == 0) { 1396 dest = dest_gpr(ctx, rt); 1397 fn(dest, in1, in2); 1398 save_gpr(ctx, rt, dest); 1399 cond_free(&ctx->null_cond); 1400 } else { 1401 dest = tcg_temp_new(); 1402 fn(dest, in1, in2); 1403 1404 cond = do_unit_cond(cf, dest, in1, in2); 1405 1406 if (is_tc) { 1407 TCGv_reg tmp = tcg_temp_new(); 1408 cond_prep(&cond); 1409 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1); 1410 gen_helper_tcond(cpu_env, tmp); 1411 tcg_temp_free(tmp); 1412 } 1413 save_gpr(ctx, rt, dest); 1414 1415 cond_free(&ctx->null_cond); 1416 ctx->null_cond = cond; 1417 } 1418 } 1419 1420 #ifndef CONFIG_USER_ONLY 1421 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space 1422 from the top 2 bits of the base register. There are a few system 1423 instructions that have a 3-bit space specifier, for which SR0 is 1424 not special. To handle this, pass ~SP. */ 1425 static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_reg base) 1426 { 1427 TCGv_ptr ptr; 1428 TCGv_reg tmp; 1429 TCGv_i64 spc; 1430 1431 if (sp != 0) { 1432 if (sp < 0) { 1433 sp = ~sp; 1434 } 1435 spc = get_temp_tl(ctx); 1436 load_spr(ctx, spc, sp); 1437 return spc; 1438 } 1439 if (ctx->tb_flags & TB_FLAG_SR_SAME) { 1440 return cpu_srH; 1441 } 1442 1443 ptr = tcg_temp_new_ptr(); 1444 tmp = tcg_temp_new(); 1445 spc = get_temp_tl(ctx); 1446 1447 tcg_gen_shri_reg(tmp, base, TARGET_REGISTER_BITS - 5); 1448 tcg_gen_andi_reg(tmp, tmp, 030); 1449 tcg_gen_trunc_reg_ptr(ptr, tmp); 1450 tcg_temp_free(tmp); 1451 1452 tcg_gen_add_ptr(ptr, ptr, cpu_env); 1453 tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4])); 1454 tcg_temp_free_ptr(ptr); 1455 1456 return spc; 1457 } 1458 #endif 1459 1460 static void form_gva(DisasContext *ctx, TCGv_tl *pgva, TCGv_reg *pofs, 1461 unsigned rb, unsigned rx, int scale, target_sreg disp, 1462 unsigned sp, int modify, bool is_phys) 1463 { 1464 TCGv_reg base = load_gpr(ctx, rb); 1465 TCGv_reg ofs; 1466 1467 /* Note that RX is mutually exclusive with DISP. */ 1468 if (rx) { 1469 ofs = get_temp(ctx); 1470 tcg_gen_shli_reg(ofs, cpu_gr[rx], scale); 1471 tcg_gen_add_reg(ofs, ofs, base); 1472 } else if (disp || modify) { 1473 ofs = get_temp(ctx); 1474 tcg_gen_addi_reg(ofs, base, disp); 1475 } else { 1476 ofs = base; 1477 } 1478 1479 *pofs = ofs; 1480 #ifdef CONFIG_USER_ONLY 1481 *pgva = (modify <= 0 ? ofs : base); 1482 #else 1483 TCGv_tl addr = get_temp_tl(ctx); 1484 tcg_gen_extu_reg_tl(addr, modify <= 0 ? ofs : base); 1485 if (ctx->tb_flags & PSW_W) { 1486 tcg_gen_andi_tl(addr, addr, 0x3fffffffffffffffull); 1487 } 1488 if (!is_phys) { 1489 tcg_gen_or_tl(addr, addr, space_select(ctx, sp, base)); 1490 } 1491 *pgva = addr; 1492 #endif 1493 } 1494 1495 /* Emit a memory load. The modify parameter should be 1496 * < 0 for pre-modify, 1497 * > 0 for post-modify, 1498 * = 0 for no base register update. 1499 */ 1500 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb, 1501 unsigned rx, int scale, target_sreg disp, 1502 unsigned sp, int modify, TCGMemOp mop) 1503 { 1504 TCGv_reg ofs; 1505 TCGv_tl addr; 1506 1507 /* Caller uses nullify_over/nullify_end. */ 1508 assert(ctx->null_cond.c == TCG_COND_NEVER); 1509 1510 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify, 1511 ctx->mmu_idx == MMU_PHYS_IDX); 1512 tcg_gen_qemu_ld_reg(dest, addr, ctx->mmu_idx, mop); 1513 if (modify) { 1514 save_gpr(ctx, rb, ofs); 1515 } 1516 } 1517 1518 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb, 1519 unsigned rx, int scale, target_sreg disp, 1520 unsigned sp, int modify, TCGMemOp mop) 1521 { 1522 TCGv_reg ofs; 1523 TCGv_tl addr; 1524 1525 /* Caller uses nullify_over/nullify_end. */ 1526 assert(ctx->null_cond.c == TCG_COND_NEVER); 1527 1528 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify, 1529 ctx->mmu_idx == MMU_PHYS_IDX); 1530 tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop); 1531 if (modify) { 1532 save_gpr(ctx, rb, ofs); 1533 } 1534 } 1535 1536 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb, 1537 unsigned rx, int scale, target_sreg disp, 1538 unsigned sp, int modify, TCGMemOp mop) 1539 { 1540 TCGv_reg ofs; 1541 TCGv_tl addr; 1542 1543 /* Caller uses nullify_over/nullify_end. */ 1544 assert(ctx->null_cond.c == TCG_COND_NEVER); 1545 1546 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify, 1547 ctx->mmu_idx == MMU_PHYS_IDX); 1548 tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop); 1549 if (modify) { 1550 save_gpr(ctx, rb, ofs); 1551 } 1552 } 1553 1554 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb, 1555 unsigned rx, int scale, target_sreg disp, 1556 unsigned sp, int modify, TCGMemOp mop) 1557 { 1558 TCGv_reg ofs; 1559 TCGv_tl addr; 1560 1561 /* Caller uses nullify_over/nullify_end. */ 1562 assert(ctx->null_cond.c == TCG_COND_NEVER); 1563 1564 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify, 1565 ctx->mmu_idx == MMU_PHYS_IDX); 1566 tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop); 1567 if (modify) { 1568 save_gpr(ctx, rb, ofs); 1569 } 1570 } 1571 1572 #if TARGET_REGISTER_BITS == 64 1573 #define do_load_reg do_load_64 1574 #define do_store_reg do_store_64 1575 #else 1576 #define do_load_reg do_load_32 1577 #define do_store_reg do_store_32 1578 #endif 1579 1580 static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb, 1581 unsigned rx, int scale, target_sreg disp, 1582 unsigned sp, int modify, TCGMemOp mop) 1583 { 1584 TCGv_reg dest; 1585 1586 nullify_over(ctx); 1587 1588 if (modify == 0) { 1589 /* No base register update. */ 1590 dest = dest_gpr(ctx, rt); 1591 } else { 1592 /* Make sure if RT == RB, we see the result of the load. */ 1593 dest = get_temp(ctx); 1594 } 1595 do_load_reg(ctx, dest, rb, rx, scale, disp, sp, modify, mop); 1596 save_gpr(ctx, rt, dest); 1597 1598 return nullify_end(ctx); 1599 } 1600 1601 static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb, 1602 unsigned rx, int scale, target_sreg disp, 1603 unsigned sp, int modify) 1604 { 1605 TCGv_i32 tmp; 1606 1607 nullify_over(ctx); 1608 1609 tmp = tcg_temp_new_i32(); 1610 do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL); 1611 save_frw_i32(rt, tmp); 1612 tcg_temp_free_i32(tmp); 1613 1614 if (rt == 0) { 1615 gen_helper_loaded_fr0(cpu_env); 1616 } 1617 1618 return nullify_end(ctx); 1619 } 1620 1621 static bool trans_fldw(DisasContext *ctx, arg_ldst *a) 1622 { 1623 return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0, 1624 a->disp, a->sp, a->m); 1625 } 1626 1627 static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb, 1628 unsigned rx, int scale, target_sreg disp, 1629 unsigned sp, int modify) 1630 { 1631 TCGv_i64 tmp; 1632 1633 nullify_over(ctx); 1634 1635 tmp = tcg_temp_new_i64(); 1636 do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEQ); 1637 save_frd(rt, tmp); 1638 tcg_temp_free_i64(tmp); 1639 1640 if (rt == 0) { 1641 gen_helper_loaded_fr0(cpu_env); 1642 } 1643 1644 return nullify_end(ctx); 1645 } 1646 1647 static bool trans_fldd(DisasContext *ctx, arg_ldst *a) 1648 { 1649 return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0, 1650 a->disp, a->sp, a->m); 1651 } 1652 1653 static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb, 1654 target_sreg disp, unsigned sp, 1655 int modify, TCGMemOp mop) 1656 { 1657 nullify_over(ctx); 1658 do_store_reg(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop); 1659 return nullify_end(ctx); 1660 } 1661 1662 static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb, 1663 unsigned rx, int scale, target_sreg disp, 1664 unsigned sp, int modify) 1665 { 1666 TCGv_i32 tmp; 1667 1668 nullify_over(ctx); 1669 1670 tmp = load_frw_i32(rt); 1671 do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL); 1672 tcg_temp_free_i32(tmp); 1673 1674 return nullify_end(ctx); 1675 } 1676 1677 static bool trans_fstw(DisasContext *ctx, arg_ldst *a) 1678 { 1679 return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0, 1680 a->disp, a->sp, a->m); 1681 } 1682 1683 static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb, 1684 unsigned rx, int scale, target_sreg disp, 1685 unsigned sp, int modify) 1686 { 1687 TCGv_i64 tmp; 1688 1689 nullify_over(ctx); 1690 1691 tmp = load_frd(rt); 1692 do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEQ); 1693 tcg_temp_free_i64(tmp); 1694 1695 return nullify_end(ctx); 1696 } 1697 1698 static bool trans_fstd(DisasContext *ctx, arg_ldst *a) 1699 { 1700 return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0, 1701 a->disp, a->sp, a->m); 1702 } 1703 1704 static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra, 1705 void (*func)(TCGv_i32, TCGv_env, TCGv_i32)) 1706 { 1707 TCGv_i32 tmp; 1708 1709 nullify_over(ctx); 1710 tmp = load_frw0_i32(ra); 1711 1712 func(tmp, cpu_env, tmp); 1713 1714 save_frw_i32(rt, tmp); 1715 tcg_temp_free_i32(tmp); 1716 return nullify_end(ctx); 1717 } 1718 1719 static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra, 1720 void (*func)(TCGv_i32, TCGv_env, TCGv_i64)) 1721 { 1722 TCGv_i32 dst; 1723 TCGv_i64 src; 1724 1725 nullify_over(ctx); 1726 src = load_frd(ra); 1727 dst = tcg_temp_new_i32(); 1728 1729 func(dst, cpu_env, src); 1730 1731 tcg_temp_free_i64(src); 1732 save_frw_i32(rt, dst); 1733 tcg_temp_free_i32(dst); 1734 return nullify_end(ctx); 1735 } 1736 1737 static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra, 1738 void (*func)(TCGv_i64, TCGv_env, TCGv_i64)) 1739 { 1740 TCGv_i64 tmp; 1741 1742 nullify_over(ctx); 1743 tmp = load_frd0(ra); 1744 1745 func(tmp, cpu_env, tmp); 1746 1747 save_frd(rt, tmp); 1748 tcg_temp_free_i64(tmp); 1749 return nullify_end(ctx); 1750 } 1751 1752 static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra, 1753 void (*func)(TCGv_i64, TCGv_env, TCGv_i32)) 1754 { 1755 TCGv_i32 src; 1756 TCGv_i64 dst; 1757 1758 nullify_over(ctx); 1759 src = load_frw0_i32(ra); 1760 dst = tcg_temp_new_i64(); 1761 1762 func(dst, cpu_env, src); 1763 1764 tcg_temp_free_i32(src); 1765 save_frd(rt, dst); 1766 tcg_temp_free_i64(dst); 1767 return nullify_end(ctx); 1768 } 1769 1770 static bool do_fop_weww(DisasContext *ctx, unsigned rt, 1771 unsigned ra, unsigned rb, 1772 void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32)) 1773 { 1774 TCGv_i32 a, b; 1775 1776 nullify_over(ctx); 1777 a = load_frw0_i32(ra); 1778 b = load_frw0_i32(rb); 1779 1780 func(a, cpu_env, a, b); 1781 1782 tcg_temp_free_i32(b); 1783 save_frw_i32(rt, a); 1784 tcg_temp_free_i32(a); 1785 return nullify_end(ctx); 1786 } 1787 1788 static bool do_fop_dedd(DisasContext *ctx, unsigned rt, 1789 unsigned ra, unsigned rb, 1790 void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64)) 1791 { 1792 TCGv_i64 a, b; 1793 1794 nullify_over(ctx); 1795 a = load_frd0(ra); 1796 b = load_frd0(rb); 1797 1798 func(a, cpu_env, a, b); 1799 1800 tcg_temp_free_i64(b); 1801 save_frd(rt, a); 1802 tcg_temp_free_i64(a); 1803 return nullify_end(ctx); 1804 } 1805 1806 /* Emit an unconditional branch to a direct target, which may or may not 1807 have already had nullification handled. */ 1808 static bool do_dbranch(DisasContext *ctx, target_ureg dest, 1809 unsigned link, bool is_n) 1810 { 1811 if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) { 1812 if (link != 0) { 1813 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var); 1814 } 1815 ctx->iaoq_n = dest; 1816 if (is_n) { 1817 ctx->null_cond.c = TCG_COND_ALWAYS; 1818 } 1819 } else { 1820 nullify_over(ctx); 1821 1822 if (link != 0) { 1823 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var); 1824 } 1825 1826 if (is_n && use_nullify_skip(ctx)) { 1827 nullify_set(ctx, 0); 1828 gen_goto_tb(ctx, 0, dest, dest + 4); 1829 } else { 1830 nullify_set(ctx, is_n); 1831 gen_goto_tb(ctx, 0, ctx->iaoq_b, dest); 1832 } 1833 1834 nullify_end(ctx); 1835 1836 nullify_set(ctx, 0); 1837 gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n); 1838 ctx->base.is_jmp = DISAS_NORETURN; 1839 } 1840 return true; 1841 } 1842 1843 /* Emit a conditional branch to a direct target. If the branch itself 1844 is nullified, we should have already used nullify_over. */ 1845 static bool do_cbranch(DisasContext *ctx, target_sreg disp, bool is_n, 1846 DisasCond *cond) 1847 { 1848 target_ureg dest = iaoq_dest(ctx, disp); 1849 TCGLabel *taken = NULL; 1850 TCGCond c = cond->c; 1851 bool n; 1852 1853 assert(ctx->null_cond.c == TCG_COND_NEVER); 1854 1855 /* Handle TRUE and NEVER as direct branches. */ 1856 if (c == TCG_COND_ALWAYS) { 1857 return do_dbranch(ctx, dest, 0, is_n && disp >= 0); 1858 } 1859 if (c == TCG_COND_NEVER) { 1860 return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0); 1861 } 1862 1863 taken = gen_new_label(); 1864 cond_prep(cond); 1865 tcg_gen_brcond_reg(c, cond->a0, cond->a1, taken); 1866 cond_free(cond); 1867 1868 /* Not taken: Condition not satisfied; nullify on backward branches. */ 1869 n = is_n && disp < 0; 1870 if (n && use_nullify_skip(ctx)) { 1871 nullify_set(ctx, 0); 1872 gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4); 1873 } else { 1874 if (!n && ctx->null_lab) { 1875 gen_set_label(ctx->null_lab); 1876 ctx->null_lab = NULL; 1877 } 1878 nullify_set(ctx, n); 1879 if (ctx->iaoq_n == -1) { 1880 /* The temporary iaoq_n_var died at the branch above. 1881 Regenerate it here instead of saving it. */ 1882 tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4); 1883 } 1884 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n); 1885 } 1886 1887 gen_set_label(taken); 1888 1889 /* Taken: Condition satisfied; nullify on forward branches. */ 1890 n = is_n && disp >= 0; 1891 if (n && use_nullify_skip(ctx)) { 1892 nullify_set(ctx, 0); 1893 gen_goto_tb(ctx, 1, dest, dest + 4); 1894 } else { 1895 nullify_set(ctx, n); 1896 gen_goto_tb(ctx, 1, ctx->iaoq_b, dest); 1897 } 1898 1899 /* Not taken: the branch itself was nullified. */ 1900 if (ctx->null_lab) { 1901 gen_set_label(ctx->null_lab); 1902 ctx->null_lab = NULL; 1903 ctx->base.is_jmp = DISAS_IAQ_N_STALE; 1904 } else { 1905 ctx->base.is_jmp = DISAS_NORETURN; 1906 } 1907 return true; 1908 } 1909 1910 /* Emit an unconditional branch to an indirect target. This handles 1911 nullification of the branch itself. */ 1912 static bool do_ibranch(DisasContext *ctx, TCGv_reg dest, 1913 unsigned link, bool is_n) 1914 { 1915 TCGv_reg a0, a1, next, tmp; 1916 TCGCond c; 1917 1918 assert(ctx->null_lab == NULL); 1919 1920 if (ctx->null_cond.c == TCG_COND_NEVER) { 1921 if (link != 0) { 1922 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var); 1923 } 1924 next = get_temp(ctx); 1925 tcg_gen_mov_reg(next, dest); 1926 if (is_n) { 1927 if (use_nullify_skip(ctx)) { 1928 tcg_gen_mov_reg(cpu_iaoq_f, next); 1929 tcg_gen_addi_reg(cpu_iaoq_b, next, 4); 1930 nullify_set(ctx, 0); 1931 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED; 1932 return true; 1933 } 1934 ctx->null_cond.c = TCG_COND_ALWAYS; 1935 } 1936 ctx->iaoq_n = -1; 1937 ctx->iaoq_n_var = next; 1938 } else if (is_n && use_nullify_skip(ctx)) { 1939 /* The (conditional) branch, B, nullifies the next insn, N, 1940 and we're allowed to skip execution N (no single-step or 1941 tracepoint in effect). Since the goto_ptr that we must use 1942 for the indirect branch consumes no special resources, we 1943 can (conditionally) skip B and continue execution. */ 1944 /* The use_nullify_skip test implies we have a known control path. */ 1945 tcg_debug_assert(ctx->iaoq_b != -1); 1946 tcg_debug_assert(ctx->iaoq_n != -1); 1947 1948 /* We do have to handle the non-local temporary, DEST, before 1949 branching. Since IOAQ_F is not really live at this point, we 1950 can simply store DEST optimistically. Similarly with IAOQ_B. */ 1951 tcg_gen_mov_reg(cpu_iaoq_f, dest); 1952 tcg_gen_addi_reg(cpu_iaoq_b, dest, 4); 1953 1954 nullify_over(ctx); 1955 if (link != 0) { 1956 tcg_gen_movi_reg(cpu_gr[link], ctx->iaoq_n); 1957 } 1958 tcg_gen_lookup_and_goto_ptr(); 1959 return nullify_end(ctx); 1960 } else { 1961 cond_prep(&ctx->null_cond); 1962 c = ctx->null_cond.c; 1963 a0 = ctx->null_cond.a0; 1964 a1 = ctx->null_cond.a1; 1965 1966 tmp = tcg_temp_new(); 1967 next = get_temp(ctx); 1968 1969 copy_iaoq_entry(tmp, ctx->iaoq_n, ctx->iaoq_n_var); 1970 tcg_gen_movcond_reg(c, next, a0, a1, tmp, dest); 1971 ctx->iaoq_n = -1; 1972 ctx->iaoq_n_var = next; 1973 1974 if (link != 0) { 1975 tcg_gen_movcond_reg(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp); 1976 } 1977 1978 if (is_n) { 1979 /* The branch nullifies the next insn, which means the state of N 1980 after the branch is the inverse of the state of N that applied 1981 to the branch. */ 1982 tcg_gen_setcond_reg(tcg_invert_cond(c), cpu_psw_n, a0, a1); 1983 cond_free(&ctx->null_cond); 1984 ctx->null_cond = cond_make_n(); 1985 ctx->psw_n_nonzero = true; 1986 } else { 1987 cond_free(&ctx->null_cond); 1988 } 1989 } 1990 return true; 1991 } 1992 1993 /* Implement 1994 * if (IAOQ_Front{30..31} < GR[b]{30..31}) 1995 * IAOQ_Next{30..31} ← GR[b]{30..31}; 1996 * else 1997 * IAOQ_Next{30..31} ← IAOQ_Front{30..31}; 1998 * which keeps the privilege level from being increased. 1999 */ 2000 static TCGv_reg do_ibranch_priv(DisasContext *ctx, TCGv_reg offset) 2001 { 2002 TCGv_reg dest; 2003 switch (ctx->privilege) { 2004 case 0: 2005 /* Privilege 0 is maximum and is allowed to decrease. */ 2006 return offset; 2007 case 3: 2008 /* Privilege 3 is minimum and is never allowed to increase. */ 2009 dest = get_temp(ctx); 2010 tcg_gen_ori_reg(dest, offset, 3); 2011 break; 2012 default: 2013 dest = get_temp(ctx); 2014 tcg_gen_andi_reg(dest, offset, -4); 2015 tcg_gen_ori_reg(dest, dest, ctx->privilege); 2016 tcg_gen_movcond_reg(TCG_COND_GTU, dest, dest, offset, dest, offset); 2017 break; 2018 } 2019 return dest; 2020 } 2021 2022 #ifdef CONFIG_USER_ONLY 2023 /* On Linux, page zero is normally marked execute only + gateway. 2024 Therefore normal read or write is supposed to fail, but specific 2025 offsets have kernel code mapped to raise permissions to implement 2026 system calls. Handling this via an explicit check here, rather 2027 in than the "be disp(sr2,r0)" instruction that probably sent us 2028 here, is the easiest way to handle the branch delay slot on the 2029 aforementioned BE. */ 2030 static void do_page_zero(DisasContext *ctx) 2031 { 2032 /* If by some means we get here with PSW[N]=1, that implies that 2033 the B,GATE instruction would be skipped, and we'd fault on the 2034 next insn within the privilaged page. */ 2035 switch (ctx->null_cond.c) { 2036 case TCG_COND_NEVER: 2037 break; 2038 case TCG_COND_ALWAYS: 2039 tcg_gen_movi_reg(cpu_psw_n, 0); 2040 goto do_sigill; 2041 default: 2042 /* Since this is always the first (and only) insn within the 2043 TB, we should know the state of PSW[N] from TB->FLAGS. */ 2044 g_assert_not_reached(); 2045 } 2046 2047 /* Check that we didn't arrive here via some means that allowed 2048 non-sequential instruction execution. Normally the PSW[B] bit 2049 detects this by disallowing the B,GATE instruction to execute 2050 under such conditions. */ 2051 if (ctx->iaoq_b != ctx->iaoq_f + 4) { 2052 goto do_sigill; 2053 } 2054 2055 switch (ctx->iaoq_f & -4) { 2056 case 0x00: /* Null pointer call */ 2057 gen_excp_1(EXCP_IMP); 2058 ctx->base.is_jmp = DISAS_NORETURN; 2059 break; 2060 2061 case 0xb0: /* LWS */ 2062 gen_excp_1(EXCP_SYSCALL_LWS); 2063 ctx->base.is_jmp = DISAS_NORETURN; 2064 break; 2065 2066 case 0xe0: /* SET_THREAD_POINTER */ 2067 tcg_gen_st_reg(cpu_gr[26], cpu_env, offsetof(CPUHPPAState, cr[27])); 2068 tcg_gen_ori_reg(cpu_iaoq_f, cpu_gr[31], 3); 2069 tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4); 2070 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED; 2071 break; 2072 2073 case 0x100: /* SYSCALL */ 2074 gen_excp_1(EXCP_SYSCALL); 2075 ctx->base.is_jmp = DISAS_NORETURN; 2076 break; 2077 2078 default: 2079 do_sigill: 2080 gen_excp_1(EXCP_ILL); 2081 ctx->base.is_jmp = DISAS_NORETURN; 2082 break; 2083 } 2084 } 2085 #endif 2086 2087 static bool trans_nop(DisasContext *ctx, arg_nop *a) 2088 { 2089 cond_free(&ctx->null_cond); 2090 return true; 2091 } 2092 2093 static bool trans_break(DisasContext *ctx, arg_break *a) 2094 { 2095 return gen_excp_iir(ctx, EXCP_BREAK); 2096 } 2097 2098 static bool trans_sync(DisasContext *ctx, arg_sync *a) 2099 { 2100 /* No point in nullifying the memory barrier. */ 2101 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL); 2102 2103 cond_free(&ctx->null_cond); 2104 return true; 2105 } 2106 2107 static bool trans_mfia(DisasContext *ctx, arg_mfia *a) 2108 { 2109 unsigned rt = a->t; 2110 TCGv_reg tmp = dest_gpr(ctx, rt); 2111 tcg_gen_movi_reg(tmp, ctx->iaoq_f); 2112 save_gpr(ctx, rt, tmp); 2113 2114 cond_free(&ctx->null_cond); 2115 return true; 2116 } 2117 2118 static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a) 2119 { 2120 unsigned rt = a->t; 2121 unsigned rs = a->sp; 2122 TCGv_i64 t0 = tcg_temp_new_i64(); 2123 TCGv_reg t1 = tcg_temp_new(); 2124 2125 load_spr(ctx, t0, rs); 2126 tcg_gen_shri_i64(t0, t0, 32); 2127 tcg_gen_trunc_i64_reg(t1, t0); 2128 2129 save_gpr(ctx, rt, t1); 2130 tcg_temp_free(t1); 2131 tcg_temp_free_i64(t0); 2132 2133 cond_free(&ctx->null_cond); 2134 return true; 2135 } 2136 2137 static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a) 2138 { 2139 unsigned rt = a->t; 2140 unsigned ctl = a->r; 2141 TCGv_reg tmp; 2142 2143 switch (ctl) { 2144 case CR_SAR: 2145 #ifdef TARGET_HPPA64 2146 if (a->e == 0) { 2147 /* MFSAR without ,W masks low 5 bits. */ 2148 tmp = dest_gpr(ctx, rt); 2149 tcg_gen_andi_reg(tmp, cpu_sar, 31); 2150 save_gpr(ctx, rt, tmp); 2151 goto done; 2152 } 2153 #endif 2154 save_gpr(ctx, rt, cpu_sar); 2155 goto done; 2156 case CR_IT: /* Interval Timer */ 2157 /* FIXME: Respect PSW_S bit. */ 2158 nullify_over(ctx); 2159 tmp = dest_gpr(ctx, rt); 2160 if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { 2161 gen_io_start(); 2162 gen_helper_read_interval_timer(tmp); 2163 gen_io_end(); 2164 ctx->base.is_jmp = DISAS_IAQ_N_STALE; 2165 } else { 2166 gen_helper_read_interval_timer(tmp); 2167 } 2168 save_gpr(ctx, rt, tmp); 2169 return nullify_end(ctx); 2170 case 26: 2171 case 27: 2172 break; 2173 default: 2174 /* All other control registers are privileged. */ 2175 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG); 2176 break; 2177 } 2178 2179 tmp = get_temp(ctx); 2180 tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl])); 2181 save_gpr(ctx, rt, tmp); 2182 2183 done: 2184 cond_free(&ctx->null_cond); 2185 return true; 2186 } 2187 2188 static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a) 2189 { 2190 unsigned rr = a->r; 2191 unsigned rs = a->sp; 2192 TCGv_i64 t64; 2193 2194 if (rs >= 5) { 2195 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG); 2196 } 2197 nullify_over(ctx); 2198 2199 t64 = tcg_temp_new_i64(); 2200 tcg_gen_extu_reg_i64(t64, load_gpr(ctx, rr)); 2201 tcg_gen_shli_i64(t64, t64, 32); 2202 2203 if (rs >= 4) { 2204 tcg_gen_st_i64(t64, cpu_env, offsetof(CPUHPPAState, sr[rs])); 2205 ctx->tb_flags &= ~TB_FLAG_SR_SAME; 2206 } else { 2207 tcg_gen_mov_i64(cpu_sr[rs], t64); 2208 } 2209 tcg_temp_free_i64(t64); 2210 2211 return nullify_end(ctx); 2212 } 2213 2214 static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a) 2215 { 2216 unsigned ctl = a->t; 2217 TCGv_reg reg = load_gpr(ctx, a->r); 2218 TCGv_reg tmp; 2219 2220 if (ctl == CR_SAR) { 2221 tmp = tcg_temp_new(); 2222 tcg_gen_andi_reg(tmp, reg, TARGET_REGISTER_BITS - 1); 2223 save_or_nullify(ctx, cpu_sar, tmp); 2224 tcg_temp_free(tmp); 2225 2226 cond_free(&ctx->null_cond); 2227 return true; 2228 } 2229 2230 /* All other control registers are privileged or read-only. */ 2231 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG); 2232 2233 #ifndef CONFIG_USER_ONLY 2234 nullify_over(ctx); 2235 switch (ctl) { 2236 case CR_IT: 2237 gen_helper_write_interval_timer(cpu_env, reg); 2238 break; 2239 case CR_EIRR: 2240 gen_helper_write_eirr(cpu_env, reg); 2241 break; 2242 case CR_EIEM: 2243 gen_helper_write_eiem(cpu_env, reg); 2244 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT; 2245 break; 2246 2247 case CR_IIASQ: 2248 case CR_IIAOQ: 2249 /* FIXME: Respect PSW_Q bit */ 2250 /* The write advances the queue and stores to the back element. */ 2251 tmp = get_temp(ctx); 2252 tcg_gen_ld_reg(tmp, cpu_env, 2253 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ])); 2254 tcg_gen_st_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl])); 2255 tcg_gen_st_reg(reg, cpu_env, 2256 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ])); 2257 break; 2258 2259 case CR_PID1: 2260 case CR_PID2: 2261 case CR_PID3: 2262 case CR_PID4: 2263 tcg_gen_st_reg(reg, cpu_env, offsetof(CPUHPPAState, cr[ctl])); 2264 #ifndef CONFIG_USER_ONLY 2265 gen_helper_change_prot_id(cpu_env); 2266 #endif 2267 break; 2268 2269 default: 2270 tcg_gen_st_reg(reg, cpu_env, offsetof(CPUHPPAState, cr[ctl])); 2271 break; 2272 } 2273 return nullify_end(ctx); 2274 #endif 2275 } 2276 2277 static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a) 2278 { 2279 TCGv_reg tmp = tcg_temp_new(); 2280 2281 tcg_gen_not_reg(tmp, load_gpr(ctx, a->r)); 2282 tcg_gen_andi_reg(tmp, tmp, TARGET_REGISTER_BITS - 1); 2283 save_or_nullify(ctx, cpu_sar, tmp); 2284 tcg_temp_free(tmp); 2285 2286 cond_free(&ctx->null_cond); 2287 return true; 2288 } 2289 2290 static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a) 2291 { 2292 TCGv_reg dest = dest_gpr(ctx, a->t); 2293 2294 #ifdef CONFIG_USER_ONLY 2295 /* We don't implement space registers in user mode. */ 2296 tcg_gen_movi_reg(dest, 0); 2297 #else 2298 TCGv_i64 t0 = tcg_temp_new_i64(); 2299 2300 tcg_gen_mov_i64(t0, space_select(ctx, a->sp, load_gpr(ctx, a->b))); 2301 tcg_gen_shri_i64(t0, t0, 32); 2302 tcg_gen_trunc_i64_reg(dest, t0); 2303 2304 tcg_temp_free_i64(t0); 2305 #endif 2306 save_gpr(ctx, a->t, dest); 2307 2308 cond_free(&ctx->null_cond); 2309 return true; 2310 } 2311 2312 static bool trans_rsm(DisasContext *ctx, arg_rsm *a) 2313 { 2314 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2315 #ifndef CONFIG_USER_ONLY 2316 TCGv_reg tmp; 2317 2318 nullify_over(ctx); 2319 2320 tmp = get_temp(ctx); 2321 tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw)); 2322 tcg_gen_andi_reg(tmp, tmp, ~a->i); 2323 gen_helper_swap_system_mask(tmp, cpu_env, tmp); 2324 save_gpr(ctx, a->t, tmp); 2325 2326 /* Exit the TB to recognize new interrupts, e.g. PSW_M. */ 2327 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT; 2328 return nullify_end(ctx); 2329 #endif 2330 } 2331 2332 static bool trans_ssm(DisasContext *ctx, arg_ssm *a) 2333 { 2334 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2335 #ifndef CONFIG_USER_ONLY 2336 TCGv_reg tmp; 2337 2338 nullify_over(ctx); 2339 2340 tmp = get_temp(ctx); 2341 tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw)); 2342 tcg_gen_ori_reg(tmp, tmp, a->i); 2343 gen_helper_swap_system_mask(tmp, cpu_env, tmp); 2344 save_gpr(ctx, a->t, tmp); 2345 2346 /* Exit the TB to recognize new interrupts, e.g. PSW_I. */ 2347 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT; 2348 return nullify_end(ctx); 2349 #endif 2350 } 2351 2352 static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a) 2353 { 2354 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2355 #ifndef CONFIG_USER_ONLY 2356 TCGv_reg tmp, reg; 2357 nullify_over(ctx); 2358 2359 reg = load_gpr(ctx, a->r); 2360 tmp = get_temp(ctx); 2361 gen_helper_swap_system_mask(tmp, cpu_env, reg); 2362 2363 /* Exit the TB to recognize new interrupts. */ 2364 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT; 2365 return nullify_end(ctx); 2366 #endif 2367 } 2368 2369 static bool do_rfi(DisasContext *ctx, bool rfi_r) 2370 { 2371 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2372 #ifndef CONFIG_USER_ONLY 2373 nullify_over(ctx); 2374 2375 if (rfi_r) { 2376 gen_helper_rfi_r(cpu_env); 2377 } else { 2378 gen_helper_rfi(cpu_env); 2379 } 2380 /* Exit the TB to recognize new interrupts. */ 2381 if (ctx->base.singlestep_enabled) { 2382 gen_excp_1(EXCP_DEBUG); 2383 } else { 2384 tcg_gen_exit_tb(NULL, 0); 2385 } 2386 ctx->base.is_jmp = DISAS_NORETURN; 2387 2388 return nullify_end(ctx); 2389 #endif 2390 } 2391 2392 static bool trans_rfi(DisasContext *ctx, arg_rfi *a) 2393 { 2394 return do_rfi(ctx, false); 2395 } 2396 2397 static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a) 2398 { 2399 return do_rfi(ctx, true); 2400 } 2401 2402 static bool trans_halt(DisasContext *ctx, arg_halt *a) 2403 { 2404 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2405 #ifndef CONFIG_USER_ONLY 2406 nullify_over(ctx); 2407 gen_helper_halt(cpu_env); 2408 ctx->base.is_jmp = DISAS_NORETURN; 2409 return nullify_end(ctx); 2410 #endif 2411 } 2412 2413 static bool trans_reset(DisasContext *ctx, arg_reset *a) 2414 { 2415 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2416 #ifndef CONFIG_USER_ONLY 2417 nullify_over(ctx); 2418 gen_helper_reset(cpu_env); 2419 ctx->base.is_jmp = DISAS_NORETURN; 2420 return nullify_end(ctx); 2421 #endif 2422 } 2423 2424 static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a) 2425 { 2426 if (a->m) { 2427 TCGv_reg dest = dest_gpr(ctx, a->b); 2428 TCGv_reg src1 = load_gpr(ctx, a->b); 2429 TCGv_reg src2 = load_gpr(ctx, a->x); 2430 2431 /* The only thing we need to do is the base register modification. */ 2432 tcg_gen_add_reg(dest, src1, src2); 2433 save_gpr(ctx, a->b, dest); 2434 } 2435 cond_free(&ctx->null_cond); 2436 return true; 2437 } 2438 2439 static bool trans_probe(DisasContext *ctx, arg_probe *a) 2440 { 2441 TCGv_reg dest, ofs; 2442 TCGv_i32 level, want; 2443 TCGv_tl addr; 2444 2445 nullify_over(ctx); 2446 2447 dest = dest_gpr(ctx, a->t); 2448 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false); 2449 2450 if (a->imm) { 2451 level = tcg_const_i32(a->ri); 2452 } else { 2453 level = tcg_temp_new_i32(); 2454 tcg_gen_trunc_reg_i32(level, load_gpr(ctx, a->ri)); 2455 tcg_gen_andi_i32(level, level, 3); 2456 } 2457 want = tcg_const_i32(a->write ? PAGE_WRITE : PAGE_READ); 2458 2459 gen_helper_probe(dest, cpu_env, addr, level, want); 2460 2461 tcg_temp_free_i32(want); 2462 tcg_temp_free_i32(level); 2463 2464 save_gpr(ctx, a->t, dest); 2465 return nullify_end(ctx); 2466 } 2467 2468 static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a) 2469 { 2470 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2471 #ifndef CONFIG_USER_ONLY 2472 TCGv_tl addr; 2473 TCGv_reg ofs, reg; 2474 2475 nullify_over(ctx); 2476 2477 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false); 2478 reg = load_gpr(ctx, a->r); 2479 if (a->addr) { 2480 gen_helper_itlba(cpu_env, addr, reg); 2481 } else { 2482 gen_helper_itlbp(cpu_env, addr, reg); 2483 } 2484 2485 /* Exit TB for TLB change if mmu is enabled. */ 2486 if (ctx->tb_flags & PSW_C) { 2487 ctx->base.is_jmp = DISAS_IAQ_N_STALE; 2488 } 2489 return nullify_end(ctx); 2490 #endif 2491 } 2492 2493 static bool trans_pxtlbx(DisasContext *ctx, arg_pxtlbx *a) 2494 { 2495 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2496 #ifndef CONFIG_USER_ONLY 2497 TCGv_tl addr; 2498 TCGv_reg ofs; 2499 2500 nullify_over(ctx); 2501 2502 form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false); 2503 if (a->m) { 2504 save_gpr(ctx, a->b, ofs); 2505 } 2506 if (a->local) { 2507 gen_helper_ptlbe(cpu_env); 2508 } else { 2509 gen_helper_ptlb(cpu_env, addr); 2510 } 2511 2512 /* Exit TB for TLB change if mmu is enabled. */ 2513 if (ctx->tb_flags & PSW_C) { 2514 ctx->base.is_jmp = DISAS_IAQ_N_STALE; 2515 } 2516 return nullify_end(ctx); 2517 #endif 2518 } 2519 2520 static bool trans_lpa(DisasContext *ctx, arg_ldst *a) 2521 { 2522 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2523 #ifndef CONFIG_USER_ONLY 2524 TCGv_tl vaddr; 2525 TCGv_reg ofs, paddr; 2526 2527 nullify_over(ctx); 2528 2529 form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false); 2530 2531 paddr = tcg_temp_new(); 2532 gen_helper_lpa(paddr, cpu_env, vaddr); 2533 2534 /* Note that physical address result overrides base modification. */ 2535 if (a->m) { 2536 save_gpr(ctx, a->b, ofs); 2537 } 2538 save_gpr(ctx, a->t, paddr); 2539 tcg_temp_free(paddr); 2540 2541 return nullify_end(ctx); 2542 #endif 2543 } 2544 2545 static bool trans_lci(DisasContext *ctx, arg_lci *a) 2546 { 2547 TCGv_reg ci; 2548 2549 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2550 2551 /* The Coherence Index is an implementation-defined function of the 2552 physical address. Two addresses with the same CI have a coherent 2553 view of the cache. Our implementation is to return 0 for all, 2554 since the entire address space is coherent. */ 2555 ci = tcg_const_reg(0); 2556 save_gpr(ctx, a->t, ci); 2557 tcg_temp_free(ci); 2558 2559 cond_free(&ctx->null_cond); 2560 return true; 2561 } 2562 2563 static bool trans_add(DisasContext *ctx, arg_rrr_cf_sh *a) 2564 { 2565 return do_add_reg(ctx, a, false, false, false, false); 2566 } 2567 2568 static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_sh *a) 2569 { 2570 return do_add_reg(ctx, a, true, false, false, false); 2571 } 2572 2573 static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_sh *a) 2574 { 2575 return do_add_reg(ctx, a, false, true, false, false); 2576 } 2577 2578 static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_sh *a) 2579 { 2580 return do_add_reg(ctx, a, false, false, false, true); 2581 } 2582 2583 static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_sh *a) 2584 { 2585 return do_add_reg(ctx, a, false, true, false, true); 2586 } 2587 2588 static bool trans_sub(DisasContext *ctx, arg_rrr_cf *a) 2589 { 2590 return do_sub_reg(ctx, a, false, false, false); 2591 } 2592 2593 static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf *a) 2594 { 2595 return do_sub_reg(ctx, a, true, false, false); 2596 } 2597 2598 static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf *a) 2599 { 2600 return do_sub_reg(ctx, a, false, false, true); 2601 } 2602 2603 static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf *a) 2604 { 2605 return do_sub_reg(ctx, a, true, false, true); 2606 } 2607 2608 static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf *a) 2609 { 2610 return do_sub_reg(ctx, a, false, true, false); 2611 } 2612 2613 static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf *a) 2614 { 2615 return do_sub_reg(ctx, a, true, true, false); 2616 } 2617 2618 static bool trans_andcm(DisasContext *ctx, arg_rrr_cf *a) 2619 { 2620 return do_log_reg(ctx, a, tcg_gen_andc_reg); 2621 } 2622 2623 static bool trans_and(DisasContext *ctx, arg_rrr_cf *a) 2624 { 2625 return do_log_reg(ctx, a, tcg_gen_and_reg); 2626 } 2627 2628 static bool trans_or(DisasContext *ctx, arg_rrr_cf *a) 2629 { 2630 if (a->cf == 0) { 2631 unsigned r2 = a->r2; 2632 unsigned r1 = a->r1; 2633 unsigned rt = a->t; 2634 2635 if (rt == 0) { /* NOP */ 2636 cond_free(&ctx->null_cond); 2637 return true; 2638 } 2639 if (r2 == 0) { /* COPY */ 2640 if (r1 == 0) { 2641 TCGv_reg dest = dest_gpr(ctx, rt); 2642 tcg_gen_movi_reg(dest, 0); 2643 save_gpr(ctx, rt, dest); 2644 } else { 2645 save_gpr(ctx, rt, cpu_gr[r1]); 2646 } 2647 cond_free(&ctx->null_cond); 2648 return true; 2649 } 2650 #ifndef CONFIG_USER_ONLY 2651 /* These are QEMU extensions and are nops in the real architecture: 2652 * 2653 * or %r10,%r10,%r10 -- idle loop; wait for interrupt 2654 * or %r31,%r31,%r31 -- death loop; offline cpu 2655 * currently implemented as idle. 2656 */ 2657 if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */ 2658 TCGv_i32 tmp; 2659 2660 /* No need to check for supervisor, as userland can only pause 2661 until the next timer interrupt. */ 2662 nullify_over(ctx); 2663 2664 /* Advance the instruction queue. */ 2665 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b); 2666 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var); 2667 nullify_set(ctx, 0); 2668 2669 /* Tell the qemu main loop to halt until this cpu has work. */ 2670 tmp = tcg_const_i32(1); 2671 tcg_gen_st_i32(tmp, cpu_env, -offsetof(HPPACPU, env) + 2672 offsetof(CPUState, halted)); 2673 tcg_temp_free_i32(tmp); 2674 gen_excp_1(EXCP_HALTED); 2675 ctx->base.is_jmp = DISAS_NORETURN; 2676 2677 return nullify_end(ctx); 2678 } 2679 #endif 2680 } 2681 return do_log_reg(ctx, a, tcg_gen_or_reg); 2682 } 2683 2684 static bool trans_xor(DisasContext *ctx, arg_rrr_cf *a) 2685 { 2686 return do_log_reg(ctx, a, tcg_gen_xor_reg); 2687 } 2688 2689 static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf *a) 2690 { 2691 TCGv_reg tcg_r1, tcg_r2; 2692 2693 if (a->cf) { 2694 nullify_over(ctx); 2695 } 2696 tcg_r1 = load_gpr(ctx, a->r1); 2697 tcg_r2 = load_gpr(ctx, a->r2); 2698 do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf); 2699 return nullify_end(ctx); 2700 } 2701 2702 static bool trans_uxor(DisasContext *ctx, arg_rrr_cf *a) 2703 { 2704 TCGv_reg tcg_r1, tcg_r2; 2705 2706 if (a->cf) { 2707 nullify_over(ctx); 2708 } 2709 tcg_r1 = load_gpr(ctx, a->r1); 2710 tcg_r2 = load_gpr(ctx, a->r2); 2711 do_unit(ctx, a->t, tcg_r1, tcg_r2, a->cf, false, tcg_gen_xor_reg); 2712 return nullify_end(ctx); 2713 } 2714 2715 static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf *a, bool is_tc) 2716 { 2717 TCGv_reg tcg_r1, tcg_r2, tmp; 2718 2719 if (a->cf) { 2720 nullify_over(ctx); 2721 } 2722 tcg_r1 = load_gpr(ctx, a->r1); 2723 tcg_r2 = load_gpr(ctx, a->r2); 2724 tmp = get_temp(ctx); 2725 tcg_gen_not_reg(tmp, tcg_r2); 2726 do_unit(ctx, a->t, tcg_r1, tmp, a->cf, is_tc, tcg_gen_add_reg); 2727 return nullify_end(ctx); 2728 } 2729 2730 static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf *a) 2731 { 2732 return do_uaddcm(ctx, a, false); 2733 } 2734 2735 static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf *a) 2736 { 2737 return do_uaddcm(ctx, a, true); 2738 } 2739 2740 static bool do_dcor(DisasContext *ctx, arg_rr_cf *a, bool is_i) 2741 { 2742 TCGv_reg tmp; 2743 2744 nullify_over(ctx); 2745 2746 tmp = get_temp(ctx); 2747 tcg_gen_shri_reg(tmp, cpu_psw_cb, 3); 2748 if (!is_i) { 2749 tcg_gen_not_reg(tmp, tmp); 2750 } 2751 tcg_gen_andi_reg(tmp, tmp, 0x11111111); 2752 tcg_gen_muli_reg(tmp, tmp, 6); 2753 do_unit(ctx, a->t, load_gpr(ctx, a->r), tmp, a->cf, false, 2754 is_i ? tcg_gen_add_reg : tcg_gen_sub_reg); 2755 return nullify_end(ctx); 2756 } 2757 2758 static bool trans_dcor(DisasContext *ctx, arg_rr_cf *a) 2759 { 2760 return do_dcor(ctx, a, false); 2761 } 2762 2763 static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf *a) 2764 { 2765 return do_dcor(ctx, a, true); 2766 } 2767 2768 static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a) 2769 { 2770 TCGv_reg dest, add1, add2, addc, zero, in1, in2; 2771 2772 nullify_over(ctx); 2773 2774 in1 = load_gpr(ctx, a->r1); 2775 in2 = load_gpr(ctx, a->r2); 2776 2777 add1 = tcg_temp_new(); 2778 add2 = tcg_temp_new(); 2779 addc = tcg_temp_new(); 2780 dest = tcg_temp_new(); 2781 zero = tcg_const_reg(0); 2782 2783 /* Form R1 << 1 | PSW[CB]{8}. */ 2784 tcg_gen_add_reg(add1, in1, in1); 2785 tcg_gen_add_reg(add1, add1, cpu_psw_cb_msb); 2786 2787 /* Add or subtract R2, depending on PSW[V]. Proper computation of 2788 carry{8} requires that we subtract via + ~R2 + 1, as described in 2789 the manual. By extracting and masking V, we can produce the 2790 proper inputs to the addition without movcond. */ 2791 tcg_gen_sari_reg(addc, cpu_psw_v, TARGET_REGISTER_BITS - 1); 2792 tcg_gen_xor_reg(add2, in2, addc); 2793 tcg_gen_andi_reg(addc, addc, 1); 2794 /* ??? This is only correct for 32-bit. */ 2795 tcg_gen_add2_i32(dest, cpu_psw_cb_msb, add1, zero, add2, zero); 2796 tcg_gen_add2_i32(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero); 2797 2798 tcg_temp_free(addc); 2799 tcg_temp_free(zero); 2800 2801 /* Write back the result register. */ 2802 save_gpr(ctx, a->t, dest); 2803 2804 /* Write back PSW[CB]. */ 2805 tcg_gen_xor_reg(cpu_psw_cb, add1, add2); 2806 tcg_gen_xor_reg(cpu_psw_cb, cpu_psw_cb, dest); 2807 2808 /* Write back PSW[V] for the division step. */ 2809 tcg_gen_neg_reg(cpu_psw_v, cpu_psw_cb_msb); 2810 tcg_gen_xor_reg(cpu_psw_v, cpu_psw_v, in2); 2811 2812 /* Install the new nullification. */ 2813 if (a->cf) { 2814 TCGv_reg sv = NULL; 2815 if (cond_need_sv(a->cf >> 1)) { 2816 /* ??? The lshift is supposed to contribute to overflow. */ 2817 sv = do_add_sv(ctx, dest, add1, add2); 2818 } 2819 ctx->null_cond = do_cond(a->cf, dest, cpu_psw_cb_msb, sv); 2820 } 2821 2822 tcg_temp_free(add1); 2823 tcg_temp_free(add2); 2824 tcg_temp_free(dest); 2825 2826 return nullify_end(ctx); 2827 } 2828 2829 static bool trans_addi(DisasContext *ctx, arg_rri_cf *a) 2830 { 2831 return do_add_imm(ctx, a, false, false); 2832 } 2833 2834 static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a) 2835 { 2836 return do_add_imm(ctx, a, true, false); 2837 } 2838 2839 static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a) 2840 { 2841 return do_add_imm(ctx, a, false, true); 2842 } 2843 2844 static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a) 2845 { 2846 return do_add_imm(ctx, a, true, true); 2847 } 2848 2849 static bool trans_subi(DisasContext *ctx, arg_rri_cf *a) 2850 { 2851 return do_sub_imm(ctx, a, false); 2852 } 2853 2854 static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a) 2855 { 2856 return do_sub_imm(ctx, a, true); 2857 } 2858 2859 static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf *a) 2860 { 2861 TCGv_reg tcg_im, tcg_r2; 2862 2863 if (a->cf) { 2864 nullify_over(ctx); 2865 } 2866 2867 tcg_im = load_const(ctx, a->i); 2868 tcg_r2 = load_gpr(ctx, a->r); 2869 do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf); 2870 2871 return nullify_end(ctx); 2872 } 2873 2874 static bool trans_ld(DisasContext *ctx, arg_ldst *a) 2875 { 2876 return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0, 2877 a->disp, a->sp, a->m, a->size | MO_TE); 2878 } 2879 2880 static bool trans_st(DisasContext *ctx, arg_ldst *a) 2881 { 2882 assert(a->x == 0 && a->scale == 0); 2883 return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE); 2884 } 2885 2886 static bool trans_ldc(DisasContext *ctx, arg_ldst *a) 2887 { 2888 TCGMemOp mop = MO_TEUL | MO_ALIGN_16 | a->size; 2889 TCGv_reg zero, dest, ofs; 2890 TCGv_tl addr; 2891 2892 nullify_over(ctx); 2893 2894 if (a->m) { 2895 /* Base register modification. Make sure if RT == RB, 2896 we see the result of the load. */ 2897 dest = get_temp(ctx); 2898 } else { 2899 dest = dest_gpr(ctx, a->t); 2900 } 2901 2902 form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? a->size : 0, 2903 a->disp, a->sp, a->m, ctx->mmu_idx == MMU_PHYS_IDX); 2904 zero = tcg_const_reg(0); 2905 tcg_gen_atomic_xchg_reg(dest, addr, zero, ctx->mmu_idx, mop); 2906 if (a->m) { 2907 save_gpr(ctx, a->b, ofs); 2908 } 2909 save_gpr(ctx, a->t, dest); 2910 2911 return nullify_end(ctx); 2912 } 2913 2914 static bool trans_stby(DisasContext *ctx, arg_stby *a) 2915 { 2916 TCGv_reg ofs, val; 2917 TCGv_tl addr; 2918 2919 nullify_over(ctx); 2920 2921 form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m, 2922 ctx->mmu_idx == MMU_PHYS_IDX); 2923 val = load_gpr(ctx, a->r); 2924 if (a->a) { 2925 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { 2926 gen_helper_stby_e_parallel(cpu_env, addr, val); 2927 } else { 2928 gen_helper_stby_e(cpu_env, addr, val); 2929 } 2930 } else { 2931 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { 2932 gen_helper_stby_b_parallel(cpu_env, addr, val); 2933 } else { 2934 gen_helper_stby_b(cpu_env, addr, val); 2935 } 2936 } 2937 if (a->m) { 2938 tcg_gen_andi_reg(ofs, ofs, ~3); 2939 save_gpr(ctx, a->b, ofs); 2940 } 2941 2942 return nullify_end(ctx); 2943 } 2944 2945 static bool trans_lda(DisasContext *ctx, arg_ldst *a) 2946 { 2947 int hold_mmu_idx = ctx->mmu_idx; 2948 2949 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2950 ctx->mmu_idx = MMU_PHYS_IDX; 2951 trans_ld(ctx, a); 2952 ctx->mmu_idx = hold_mmu_idx; 2953 return true; 2954 } 2955 2956 static bool trans_sta(DisasContext *ctx, arg_ldst *a) 2957 { 2958 int hold_mmu_idx = ctx->mmu_idx; 2959 2960 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2961 ctx->mmu_idx = MMU_PHYS_IDX; 2962 trans_st(ctx, a); 2963 ctx->mmu_idx = hold_mmu_idx; 2964 return true; 2965 } 2966 2967 static bool trans_ldil(DisasContext *ctx, arg_ldil *a) 2968 { 2969 TCGv_reg tcg_rt = dest_gpr(ctx, a->t); 2970 2971 tcg_gen_movi_reg(tcg_rt, a->i); 2972 save_gpr(ctx, a->t, tcg_rt); 2973 cond_free(&ctx->null_cond); 2974 return true; 2975 } 2976 2977 static bool trans_addil(DisasContext *ctx, arg_addil *a) 2978 { 2979 TCGv_reg tcg_rt = load_gpr(ctx, a->r); 2980 TCGv_reg tcg_r1 = dest_gpr(ctx, 1); 2981 2982 tcg_gen_addi_reg(tcg_r1, tcg_rt, a->i); 2983 save_gpr(ctx, 1, tcg_r1); 2984 cond_free(&ctx->null_cond); 2985 return true; 2986 } 2987 2988 static bool trans_ldo(DisasContext *ctx, arg_ldo *a) 2989 { 2990 TCGv_reg tcg_rt = dest_gpr(ctx, a->t); 2991 2992 /* Special case rb == 0, for the LDI pseudo-op. 2993 The COPY pseudo-op is handled for free within tcg_gen_addi_tl. */ 2994 if (a->b == 0) { 2995 tcg_gen_movi_reg(tcg_rt, a->i); 2996 } else { 2997 tcg_gen_addi_reg(tcg_rt, cpu_gr[a->b], a->i); 2998 } 2999 save_gpr(ctx, a->t, tcg_rt); 3000 cond_free(&ctx->null_cond); 3001 return true; 3002 } 3003 3004 static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_reg in1, 3005 unsigned c, unsigned f, unsigned n, int disp) 3006 { 3007 TCGv_reg dest, in2, sv; 3008 DisasCond cond; 3009 3010 in2 = load_gpr(ctx, r); 3011 dest = get_temp(ctx); 3012 3013 tcg_gen_sub_reg(dest, in1, in2); 3014 3015 sv = NULL; 3016 if (cond_need_sv(c)) { 3017 sv = do_sub_sv(ctx, dest, in1, in2); 3018 } 3019 3020 cond = do_sub_cond(c * 2 + f, dest, in1, in2, sv); 3021 return do_cbranch(ctx, disp, n, &cond); 3022 } 3023 3024 static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a) 3025 { 3026 nullify_over(ctx); 3027 return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp); 3028 } 3029 3030 static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a) 3031 { 3032 nullify_over(ctx); 3033 return do_cmpb(ctx, a->r, load_const(ctx, a->i), a->c, a->f, a->n, a->disp); 3034 } 3035 3036 static bool do_addb(DisasContext *ctx, unsigned r, TCGv_reg in1, 3037 unsigned c, unsigned f, unsigned n, int disp) 3038 { 3039 TCGv_reg dest, in2, sv, cb_msb; 3040 DisasCond cond; 3041 3042 in2 = load_gpr(ctx, r); 3043 dest = tcg_temp_new(); 3044 sv = NULL; 3045 cb_msb = NULL; 3046 3047 if (cond_need_cb(c)) { 3048 cb_msb = get_temp(ctx); 3049 tcg_gen_movi_reg(cb_msb, 0); 3050 tcg_gen_add2_reg(dest, cb_msb, in1, cb_msb, in2, cb_msb); 3051 } else { 3052 tcg_gen_add_reg(dest, in1, in2); 3053 } 3054 if (cond_need_sv(c)) { 3055 sv = do_add_sv(ctx, dest, in1, in2); 3056 } 3057 3058 cond = do_cond(c * 2 + f, dest, cb_msb, sv); 3059 save_gpr(ctx, r, dest); 3060 tcg_temp_free(dest); 3061 return do_cbranch(ctx, disp, n, &cond); 3062 } 3063 3064 static bool trans_addb(DisasContext *ctx, arg_addb *a) 3065 { 3066 nullify_over(ctx); 3067 return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp); 3068 } 3069 3070 static bool trans_addbi(DisasContext *ctx, arg_addbi *a) 3071 { 3072 nullify_over(ctx); 3073 return do_addb(ctx, a->r, load_const(ctx, a->i), a->c, a->f, a->n, a->disp); 3074 } 3075 3076 static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a) 3077 { 3078 TCGv_reg tmp, tcg_r; 3079 DisasCond cond; 3080 3081 nullify_over(ctx); 3082 3083 tmp = tcg_temp_new(); 3084 tcg_r = load_gpr(ctx, a->r); 3085 tcg_gen_shl_reg(tmp, tcg_r, cpu_sar); 3086 3087 cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp); 3088 tcg_temp_free(tmp); 3089 return do_cbranch(ctx, a->disp, a->n, &cond); 3090 } 3091 3092 static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a) 3093 { 3094 TCGv_reg tmp, tcg_r; 3095 DisasCond cond; 3096 3097 nullify_over(ctx); 3098 3099 tmp = tcg_temp_new(); 3100 tcg_r = load_gpr(ctx, a->r); 3101 tcg_gen_shli_reg(tmp, tcg_r, a->p); 3102 3103 cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp); 3104 tcg_temp_free(tmp); 3105 return do_cbranch(ctx, a->disp, a->n, &cond); 3106 } 3107 3108 static bool trans_movb(DisasContext *ctx, arg_movb *a) 3109 { 3110 TCGv_reg dest; 3111 DisasCond cond; 3112 3113 nullify_over(ctx); 3114 3115 dest = dest_gpr(ctx, a->r2); 3116 if (a->r1 == 0) { 3117 tcg_gen_movi_reg(dest, 0); 3118 } else { 3119 tcg_gen_mov_reg(dest, cpu_gr[a->r1]); 3120 } 3121 3122 cond = do_sed_cond(a->c, dest); 3123 return do_cbranch(ctx, a->disp, a->n, &cond); 3124 } 3125 3126 static bool trans_movbi(DisasContext *ctx, arg_movbi *a) 3127 { 3128 TCGv_reg dest; 3129 DisasCond cond; 3130 3131 nullify_over(ctx); 3132 3133 dest = dest_gpr(ctx, a->r); 3134 tcg_gen_movi_reg(dest, a->i); 3135 3136 cond = do_sed_cond(a->c, dest); 3137 return do_cbranch(ctx, a->disp, a->n, &cond); 3138 } 3139 3140 static bool trans_shrpw_sar(DisasContext *ctx, arg_shrpw_sar *a) 3141 { 3142 TCGv_reg dest; 3143 3144 if (a->c) { 3145 nullify_over(ctx); 3146 } 3147 3148 dest = dest_gpr(ctx, a->t); 3149 if (a->r1 == 0) { 3150 tcg_gen_ext32u_reg(dest, load_gpr(ctx, a->r2)); 3151 tcg_gen_shr_reg(dest, dest, cpu_sar); 3152 } else if (a->r1 == a->r2) { 3153 TCGv_i32 t32 = tcg_temp_new_i32(); 3154 tcg_gen_trunc_reg_i32(t32, load_gpr(ctx, a->r2)); 3155 tcg_gen_rotr_i32(t32, t32, cpu_sar); 3156 tcg_gen_extu_i32_reg(dest, t32); 3157 tcg_temp_free_i32(t32); 3158 } else { 3159 TCGv_i64 t = tcg_temp_new_i64(); 3160 TCGv_i64 s = tcg_temp_new_i64(); 3161 3162 tcg_gen_concat_reg_i64(t, load_gpr(ctx, a->r2), load_gpr(ctx, a->r1)); 3163 tcg_gen_extu_reg_i64(s, cpu_sar); 3164 tcg_gen_shr_i64(t, t, s); 3165 tcg_gen_trunc_i64_reg(dest, t); 3166 3167 tcg_temp_free_i64(t); 3168 tcg_temp_free_i64(s); 3169 } 3170 save_gpr(ctx, a->t, dest); 3171 3172 /* Install the new nullification. */ 3173 cond_free(&ctx->null_cond); 3174 if (a->c) { 3175 ctx->null_cond = do_sed_cond(a->c, dest); 3176 } 3177 return nullify_end(ctx); 3178 } 3179 3180 static bool trans_shrpw_imm(DisasContext *ctx, arg_shrpw_imm *a) 3181 { 3182 unsigned sa = 31 - a->cpos; 3183 TCGv_reg dest, t2; 3184 3185 if (a->c) { 3186 nullify_over(ctx); 3187 } 3188 3189 dest = dest_gpr(ctx, a->t); 3190 t2 = load_gpr(ctx, a->r2); 3191 if (a->r1 == a->r2) { 3192 TCGv_i32 t32 = tcg_temp_new_i32(); 3193 tcg_gen_trunc_reg_i32(t32, t2); 3194 tcg_gen_rotri_i32(t32, t32, sa); 3195 tcg_gen_extu_i32_reg(dest, t32); 3196 tcg_temp_free_i32(t32); 3197 } else if (a->r1 == 0) { 3198 tcg_gen_extract_reg(dest, t2, sa, 32 - sa); 3199 } else { 3200 TCGv_reg t0 = tcg_temp_new(); 3201 tcg_gen_extract_reg(t0, t2, sa, 32 - sa); 3202 tcg_gen_deposit_reg(dest, t0, cpu_gr[a->r1], 32 - sa, sa); 3203 tcg_temp_free(t0); 3204 } 3205 save_gpr(ctx, a->t, dest); 3206 3207 /* Install the new nullification. */ 3208 cond_free(&ctx->null_cond); 3209 if (a->c) { 3210 ctx->null_cond = do_sed_cond(a->c, dest); 3211 } 3212 return nullify_end(ctx); 3213 } 3214 3215 static bool trans_extrw_sar(DisasContext *ctx, arg_extrw_sar *a) 3216 { 3217 unsigned len = 32 - a->clen; 3218 TCGv_reg dest, src, tmp; 3219 3220 if (a->c) { 3221 nullify_over(ctx); 3222 } 3223 3224 dest = dest_gpr(ctx, a->t); 3225 src = load_gpr(ctx, a->r); 3226 tmp = tcg_temp_new(); 3227 3228 /* Recall that SAR is using big-endian bit numbering. */ 3229 tcg_gen_xori_reg(tmp, cpu_sar, TARGET_REGISTER_BITS - 1); 3230 if (a->se) { 3231 tcg_gen_sar_reg(dest, src, tmp); 3232 tcg_gen_sextract_reg(dest, dest, 0, len); 3233 } else { 3234 tcg_gen_shr_reg(dest, src, tmp); 3235 tcg_gen_extract_reg(dest, dest, 0, len); 3236 } 3237 tcg_temp_free(tmp); 3238 save_gpr(ctx, a->t, dest); 3239 3240 /* Install the new nullification. */ 3241 cond_free(&ctx->null_cond); 3242 if (a->c) { 3243 ctx->null_cond = do_sed_cond(a->c, dest); 3244 } 3245 return nullify_end(ctx); 3246 } 3247 3248 static bool trans_extrw_imm(DisasContext *ctx, arg_extrw_imm *a) 3249 { 3250 unsigned len = 32 - a->clen; 3251 unsigned cpos = 31 - a->pos; 3252 TCGv_reg dest, src; 3253 3254 if (a->c) { 3255 nullify_over(ctx); 3256 } 3257 3258 dest = dest_gpr(ctx, a->t); 3259 src = load_gpr(ctx, a->r); 3260 if (a->se) { 3261 tcg_gen_sextract_reg(dest, src, cpos, len); 3262 } else { 3263 tcg_gen_extract_reg(dest, src, cpos, len); 3264 } 3265 save_gpr(ctx, a->t, dest); 3266 3267 /* Install the new nullification. */ 3268 cond_free(&ctx->null_cond); 3269 if (a->c) { 3270 ctx->null_cond = do_sed_cond(a->c, dest); 3271 } 3272 return nullify_end(ctx); 3273 } 3274 3275 static bool trans_depwi_imm(DisasContext *ctx, arg_depwi_imm *a) 3276 { 3277 unsigned len = 32 - a->clen; 3278 target_sreg mask0, mask1; 3279 TCGv_reg dest; 3280 3281 if (a->c) { 3282 nullify_over(ctx); 3283 } 3284 if (a->cpos + len > 32) { 3285 len = 32 - a->cpos; 3286 } 3287 3288 dest = dest_gpr(ctx, a->t); 3289 mask0 = deposit64(0, a->cpos, len, a->i); 3290 mask1 = deposit64(-1, a->cpos, len, a->i); 3291 3292 if (a->nz) { 3293 TCGv_reg src = load_gpr(ctx, a->t); 3294 if (mask1 != -1) { 3295 tcg_gen_andi_reg(dest, src, mask1); 3296 src = dest; 3297 } 3298 tcg_gen_ori_reg(dest, src, mask0); 3299 } else { 3300 tcg_gen_movi_reg(dest, mask0); 3301 } 3302 save_gpr(ctx, a->t, dest); 3303 3304 /* Install the new nullification. */ 3305 cond_free(&ctx->null_cond); 3306 if (a->c) { 3307 ctx->null_cond = do_sed_cond(a->c, dest); 3308 } 3309 return nullify_end(ctx); 3310 } 3311 3312 static bool trans_depw_imm(DisasContext *ctx, arg_depw_imm *a) 3313 { 3314 unsigned rs = a->nz ? a->t : 0; 3315 unsigned len = 32 - a->clen; 3316 TCGv_reg dest, val; 3317 3318 if (a->c) { 3319 nullify_over(ctx); 3320 } 3321 if (a->cpos + len > 32) { 3322 len = 32 - a->cpos; 3323 } 3324 3325 dest = dest_gpr(ctx, a->t); 3326 val = load_gpr(ctx, a->r); 3327 if (rs == 0) { 3328 tcg_gen_deposit_z_reg(dest, val, a->cpos, len); 3329 } else { 3330 tcg_gen_deposit_reg(dest, cpu_gr[rs], val, a->cpos, len); 3331 } 3332 save_gpr(ctx, a->t, dest); 3333 3334 /* Install the new nullification. */ 3335 cond_free(&ctx->null_cond); 3336 if (a->c) { 3337 ctx->null_cond = do_sed_cond(a->c, dest); 3338 } 3339 return nullify_end(ctx); 3340 } 3341 3342 static bool do_depw_sar(DisasContext *ctx, unsigned rt, unsigned c, 3343 unsigned nz, unsigned clen, TCGv_reg val) 3344 { 3345 unsigned rs = nz ? rt : 0; 3346 unsigned len = 32 - clen; 3347 TCGv_reg mask, tmp, shift, dest; 3348 unsigned msb = 1U << (len - 1); 3349 3350 if (c) { 3351 nullify_over(ctx); 3352 } 3353 3354 dest = dest_gpr(ctx, rt); 3355 shift = tcg_temp_new(); 3356 tmp = tcg_temp_new(); 3357 3358 /* Convert big-endian bit numbering in SAR to left-shift. */ 3359 tcg_gen_xori_reg(shift, cpu_sar, TARGET_REGISTER_BITS - 1); 3360 3361 mask = tcg_const_reg(msb + (msb - 1)); 3362 tcg_gen_and_reg(tmp, val, mask); 3363 if (rs) { 3364 tcg_gen_shl_reg(mask, mask, shift); 3365 tcg_gen_shl_reg(tmp, tmp, shift); 3366 tcg_gen_andc_reg(dest, cpu_gr[rs], mask); 3367 tcg_gen_or_reg(dest, dest, tmp); 3368 } else { 3369 tcg_gen_shl_reg(dest, tmp, shift); 3370 } 3371 tcg_temp_free(shift); 3372 tcg_temp_free(mask); 3373 tcg_temp_free(tmp); 3374 save_gpr(ctx, rt, dest); 3375 3376 /* Install the new nullification. */ 3377 cond_free(&ctx->null_cond); 3378 if (c) { 3379 ctx->null_cond = do_sed_cond(c, dest); 3380 } 3381 return nullify_end(ctx); 3382 } 3383 3384 static bool trans_depw_sar(DisasContext *ctx, arg_depw_sar *a) 3385 { 3386 return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_gpr(ctx, a->r)); 3387 } 3388 3389 static bool trans_depwi_sar(DisasContext *ctx, arg_depwi_sar *a) 3390 { 3391 return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_const(ctx, a->i)); 3392 } 3393 3394 static bool trans_be(DisasContext *ctx, arg_be *a) 3395 { 3396 TCGv_reg tmp; 3397 3398 #ifdef CONFIG_USER_ONLY 3399 /* ??? It seems like there should be a good way of using 3400 "be disp(sr2, r0)", the canonical gateway entry mechanism 3401 to our advantage. But that appears to be inconvenient to 3402 manage along side branch delay slots. Therefore we handle 3403 entry into the gateway page via absolute address. */ 3404 /* Since we don't implement spaces, just branch. Do notice the special 3405 case of "be disp(*,r0)" using a direct branch to disp, so that we can 3406 goto_tb to the TB containing the syscall. */ 3407 if (a->b == 0) { 3408 return do_dbranch(ctx, a->disp, a->l, a->n); 3409 } 3410 #else 3411 nullify_over(ctx); 3412 #endif 3413 3414 tmp = get_temp(ctx); 3415 tcg_gen_addi_reg(tmp, load_gpr(ctx, a->b), a->disp); 3416 tmp = do_ibranch_priv(ctx, tmp); 3417 3418 #ifdef CONFIG_USER_ONLY 3419 return do_ibranch(ctx, tmp, a->l, a->n); 3420 #else 3421 TCGv_i64 new_spc = tcg_temp_new_i64(); 3422 3423 load_spr(ctx, new_spc, a->sp); 3424 if (a->l) { 3425 copy_iaoq_entry(cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var); 3426 tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f); 3427 } 3428 if (a->n && use_nullify_skip(ctx)) { 3429 tcg_gen_mov_reg(cpu_iaoq_f, tmp); 3430 tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4); 3431 tcg_gen_mov_i64(cpu_iasq_f, new_spc); 3432 tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f); 3433 } else { 3434 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b); 3435 if (ctx->iaoq_b == -1) { 3436 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b); 3437 } 3438 tcg_gen_mov_reg(cpu_iaoq_b, tmp); 3439 tcg_gen_mov_i64(cpu_iasq_b, new_spc); 3440 nullify_set(ctx, a->n); 3441 } 3442 tcg_temp_free_i64(new_spc); 3443 tcg_gen_lookup_and_goto_ptr(); 3444 ctx->base.is_jmp = DISAS_NORETURN; 3445 return nullify_end(ctx); 3446 #endif 3447 } 3448 3449 static bool trans_bl(DisasContext *ctx, arg_bl *a) 3450 { 3451 return do_dbranch(ctx, iaoq_dest(ctx, a->disp), a->l, a->n); 3452 } 3453 3454 static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a) 3455 { 3456 target_ureg dest = iaoq_dest(ctx, a->disp); 3457 3458 nullify_over(ctx); 3459 3460 /* Make sure the caller hasn't done something weird with the queue. 3461 * ??? This is not quite the same as the PSW[B] bit, which would be 3462 * expensive to track. Real hardware will trap for 3463 * b gateway 3464 * b gateway+4 (in delay slot of first branch) 3465 * However, checking for a non-sequential instruction queue *will* 3466 * diagnose the security hole 3467 * b gateway 3468 * b evil 3469 * in which instructions at evil would run with increased privs. 3470 */ 3471 if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) { 3472 return gen_illegal(ctx); 3473 } 3474 3475 #ifndef CONFIG_USER_ONLY 3476 if (ctx->tb_flags & PSW_C) { 3477 CPUHPPAState *env = ctx->cs->env_ptr; 3478 int type = hppa_artype_for_page(env, ctx->base.pc_next); 3479 /* If we could not find a TLB entry, then we need to generate an 3480 ITLB miss exception so the kernel will provide it. 3481 The resulting TLB fill operation will invalidate this TB and 3482 we will re-translate, at which point we *will* be able to find 3483 the TLB entry and determine if this is in fact a gateway page. */ 3484 if (type < 0) { 3485 gen_excp(ctx, EXCP_ITLB_MISS); 3486 return true; 3487 } 3488 /* No change for non-gateway pages or for priv decrease. */ 3489 if (type >= 4 && type - 4 < ctx->privilege) { 3490 dest = deposit32(dest, 0, 2, type - 4); 3491 } 3492 } else { 3493 dest &= -4; /* priv = 0 */ 3494 } 3495 #endif 3496 3497 if (a->l) { 3498 TCGv_reg tmp = dest_gpr(ctx, a->l); 3499 if (ctx->privilege < 3) { 3500 tcg_gen_andi_reg(tmp, tmp, -4); 3501 } 3502 tcg_gen_ori_reg(tmp, tmp, ctx->privilege); 3503 save_gpr(ctx, a->l, tmp); 3504 } 3505 3506 return do_dbranch(ctx, dest, 0, a->n); 3507 } 3508 3509 static bool trans_blr(DisasContext *ctx, arg_blr *a) 3510 { 3511 if (a->x) { 3512 TCGv_reg tmp = get_temp(ctx); 3513 tcg_gen_shli_reg(tmp, load_gpr(ctx, a->x), 3); 3514 tcg_gen_addi_reg(tmp, tmp, ctx->iaoq_f + 8); 3515 /* The computation here never changes privilege level. */ 3516 return do_ibranch(ctx, tmp, a->l, a->n); 3517 } else { 3518 /* BLR R0,RX is a good way to load PC+8 into RX. */ 3519 return do_dbranch(ctx, ctx->iaoq_f + 8, a->l, a->n); 3520 } 3521 } 3522 3523 static bool trans_bv(DisasContext *ctx, arg_bv *a) 3524 { 3525 TCGv_reg dest; 3526 3527 if (a->x == 0) { 3528 dest = load_gpr(ctx, a->b); 3529 } else { 3530 dest = get_temp(ctx); 3531 tcg_gen_shli_reg(dest, load_gpr(ctx, a->x), 3); 3532 tcg_gen_add_reg(dest, dest, load_gpr(ctx, a->b)); 3533 } 3534 dest = do_ibranch_priv(ctx, dest); 3535 return do_ibranch(ctx, dest, 0, a->n); 3536 } 3537 3538 static bool trans_bve(DisasContext *ctx, arg_bve *a) 3539 { 3540 TCGv_reg dest; 3541 3542 #ifdef CONFIG_USER_ONLY 3543 dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b)); 3544 return do_ibranch(ctx, dest, a->l, a->n); 3545 #else 3546 nullify_over(ctx); 3547 dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b)); 3548 3549 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b); 3550 if (ctx->iaoq_b == -1) { 3551 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b); 3552 } 3553 copy_iaoq_entry(cpu_iaoq_b, -1, dest); 3554 tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest)); 3555 if (a->l) { 3556 copy_iaoq_entry(cpu_gr[a->l], ctx->iaoq_n, ctx->iaoq_n_var); 3557 } 3558 nullify_set(ctx, a->n); 3559 tcg_gen_lookup_and_goto_ptr(); 3560 ctx->base.is_jmp = DISAS_NORETURN; 3561 return nullify_end(ctx); 3562 #endif 3563 } 3564 3565 /* 3566 * Float class 0 3567 */ 3568 3569 static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src) 3570 { 3571 tcg_gen_mov_i32(dst, src); 3572 } 3573 3574 static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a) 3575 { 3576 return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f); 3577 } 3578 3579 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src) 3580 { 3581 tcg_gen_mov_i64(dst, src); 3582 } 3583 3584 static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a) 3585 { 3586 return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d); 3587 } 3588 3589 static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src) 3590 { 3591 tcg_gen_andi_i32(dst, src, INT32_MAX); 3592 } 3593 3594 static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a) 3595 { 3596 return do_fop_wew(ctx, a->t, a->r, gen_fabs_f); 3597 } 3598 3599 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src) 3600 { 3601 tcg_gen_andi_i64(dst, src, INT64_MAX); 3602 } 3603 3604 static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a) 3605 { 3606 return do_fop_ded(ctx, a->t, a->r, gen_fabs_d); 3607 } 3608 3609 static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a) 3610 { 3611 return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s); 3612 } 3613 3614 static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a) 3615 { 3616 return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d); 3617 } 3618 3619 static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a) 3620 { 3621 return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s); 3622 } 3623 3624 static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a) 3625 { 3626 return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d); 3627 } 3628 3629 static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src) 3630 { 3631 tcg_gen_xori_i32(dst, src, INT32_MIN); 3632 } 3633 3634 static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a) 3635 { 3636 return do_fop_wew(ctx, a->t, a->r, gen_fneg_f); 3637 } 3638 3639 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src) 3640 { 3641 tcg_gen_xori_i64(dst, src, INT64_MIN); 3642 } 3643 3644 static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a) 3645 { 3646 return do_fop_ded(ctx, a->t, a->r, gen_fneg_d); 3647 } 3648 3649 static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src) 3650 { 3651 tcg_gen_ori_i32(dst, src, INT32_MIN); 3652 } 3653 3654 static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a) 3655 { 3656 return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f); 3657 } 3658 3659 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src) 3660 { 3661 tcg_gen_ori_i64(dst, src, INT64_MIN); 3662 } 3663 3664 static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a) 3665 { 3666 return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d); 3667 } 3668 3669 /* 3670 * Float class 1 3671 */ 3672 3673 static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a) 3674 { 3675 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s); 3676 } 3677 3678 static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a) 3679 { 3680 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d); 3681 } 3682 3683 static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a) 3684 { 3685 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s); 3686 } 3687 3688 static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a) 3689 { 3690 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s); 3691 } 3692 3693 static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a) 3694 { 3695 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d); 3696 } 3697 3698 static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a) 3699 { 3700 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d); 3701 } 3702 3703 static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a) 3704 { 3705 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w); 3706 } 3707 3708 static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a) 3709 { 3710 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w); 3711 } 3712 3713 static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a) 3714 { 3715 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw); 3716 } 3717 3718 static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a) 3719 { 3720 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw); 3721 } 3722 3723 static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a) 3724 { 3725 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w); 3726 } 3727 3728 static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a) 3729 { 3730 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w); 3731 } 3732 3733 static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a) 3734 { 3735 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw); 3736 } 3737 3738 static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a) 3739 { 3740 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw); 3741 } 3742 3743 static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a) 3744 { 3745 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s); 3746 } 3747 3748 static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a) 3749 { 3750 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s); 3751 } 3752 3753 static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a) 3754 { 3755 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d); 3756 } 3757 3758 static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a) 3759 { 3760 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d); 3761 } 3762 3763 static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a) 3764 { 3765 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw); 3766 } 3767 3768 static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a) 3769 { 3770 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw); 3771 } 3772 3773 static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a) 3774 { 3775 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw); 3776 } 3777 3778 static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a) 3779 { 3780 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw); 3781 } 3782 3783 static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a) 3784 { 3785 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw); 3786 } 3787 3788 static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a) 3789 { 3790 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw); 3791 } 3792 3793 static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a) 3794 { 3795 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw); 3796 } 3797 3798 static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a) 3799 { 3800 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw); 3801 } 3802 3803 /* 3804 * Float class 2 3805 */ 3806 3807 static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a) 3808 { 3809 TCGv_i32 ta, tb, tc, ty; 3810 3811 nullify_over(ctx); 3812 3813 ta = load_frw0_i32(a->r1); 3814 tb = load_frw0_i32(a->r2); 3815 ty = tcg_const_i32(a->y); 3816 tc = tcg_const_i32(a->c); 3817 3818 gen_helper_fcmp_s(cpu_env, ta, tb, ty, tc); 3819 3820 tcg_temp_free_i32(ta); 3821 tcg_temp_free_i32(tb); 3822 tcg_temp_free_i32(ty); 3823 tcg_temp_free_i32(tc); 3824 3825 return nullify_end(ctx); 3826 } 3827 3828 static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a) 3829 { 3830 TCGv_i64 ta, tb; 3831 TCGv_i32 tc, ty; 3832 3833 nullify_over(ctx); 3834 3835 ta = load_frd0(a->r1); 3836 tb = load_frd0(a->r2); 3837 ty = tcg_const_i32(a->y); 3838 tc = tcg_const_i32(a->c); 3839 3840 gen_helper_fcmp_d(cpu_env, ta, tb, ty, tc); 3841 3842 tcg_temp_free_i64(ta); 3843 tcg_temp_free_i64(tb); 3844 tcg_temp_free_i32(ty); 3845 tcg_temp_free_i32(tc); 3846 3847 return nullify_end(ctx); 3848 } 3849 3850 static bool trans_ftest(DisasContext *ctx, arg_ftest *a) 3851 { 3852 TCGv_reg t; 3853 3854 nullify_over(ctx); 3855 3856 t = get_temp(ctx); 3857 tcg_gen_ld32u_reg(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow)); 3858 3859 if (a->y == 1) { 3860 int mask; 3861 bool inv = false; 3862 3863 switch (a->c) { 3864 case 0: /* simple */ 3865 tcg_gen_andi_reg(t, t, 0x4000000); 3866 ctx->null_cond = cond_make_0(TCG_COND_NE, t); 3867 goto done; 3868 case 2: /* rej */ 3869 inv = true; 3870 /* fallthru */ 3871 case 1: /* acc */ 3872 mask = 0x43ff800; 3873 break; 3874 case 6: /* rej8 */ 3875 inv = true; 3876 /* fallthru */ 3877 case 5: /* acc8 */ 3878 mask = 0x43f8000; 3879 break; 3880 case 9: /* acc6 */ 3881 mask = 0x43e0000; 3882 break; 3883 case 13: /* acc4 */ 3884 mask = 0x4380000; 3885 break; 3886 case 17: /* acc2 */ 3887 mask = 0x4200000; 3888 break; 3889 default: 3890 gen_illegal(ctx); 3891 return true; 3892 } 3893 if (inv) { 3894 TCGv_reg c = load_const(ctx, mask); 3895 tcg_gen_or_reg(t, t, c); 3896 ctx->null_cond = cond_make(TCG_COND_EQ, t, c); 3897 } else { 3898 tcg_gen_andi_reg(t, t, mask); 3899 ctx->null_cond = cond_make_0(TCG_COND_EQ, t); 3900 } 3901 } else { 3902 unsigned cbit = (a->y ^ 1) - 1; 3903 3904 tcg_gen_extract_reg(t, t, 21 - cbit, 1); 3905 ctx->null_cond = cond_make_0(TCG_COND_NE, t); 3906 tcg_temp_free(t); 3907 } 3908 3909 done: 3910 return nullify_end(ctx); 3911 } 3912 3913 /* 3914 * Float class 2 3915 */ 3916 3917 static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a) 3918 { 3919 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s); 3920 } 3921 3922 static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a) 3923 { 3924 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d); 3925 } 3926 3927 static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a) 3928 { 3929 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s); 3930 } 3931 3932 static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a) 3933 { 3934 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d); 3935 } 3936 3937 static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a) 3938 { 3939 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s); 3940 } 3941 3942 static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a) 3943 { 3944 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d); 3945 } 3946 3947 static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a) 3948 { 3949 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s); 3950 } 3951 3952 static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a) 3953 { 3954 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d); 3955 } 3956 3957 static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a) 3958 { 3959 TCGv_i64 x, y; 3960 3961 nullify_over(ctx); 3962 3963 x = load_frw0_i64(a->r1); 3964 y = load_frw0_i64(a->r2); 3965 tcg_gen_mul_i64(x, x, y); 3966 save_frd(a->t, x); 3967 tcg_temp_free_i64(x); 3968 tcg_temp_free_i64(y); 3969 3970 return nullify_end(ctx); 3971 } 3972 3973 /* Convert the fmpyadd single-precision register encodings to standard. */ 3974 static inline int fmpyadd_s_reg(unsigned r) 3975 { 3976 return (r & 16) * 2 + 16 + (r & 15); 3977 } 3978 3979 static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub) 3980 { 3981 int tm = fmpyadd_s_reg(a->tm); 3982 int ra = fmpyadd_s_reg(a->ra); 3983 int ta = fmpyadd_s_reg(a->ta); 3984 int rm2 = fmpyadd_s_reg(a->rm2); 3985 int rm1 = fmpyadd_s_reg(a->rm1); 3986 3987 nullify_over(ctx); 3988 3989 do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s); 3990 do_fop_weww(ctx, ta, ta, ra, 3991 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s); 3992 3993 return nullify_end(ctx); 3994 } 3995 3996 static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a) 3997 { 3998 return do_fmpyadd_s(ctx, a, false); 3999 } 4000 4001 static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a) 4002 { 4003 return do_fmpyadd_s(ctx, a, true); 4004 } 4005 4006 static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub) 4007 { 4008 nullify_over(ctx); 4009 4010 do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d); 4011 do_fop_dedd(ctx, a->ta, a->ta, a->ra, 4012 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d); 4013 4014 return nullify_end(ctx); 4015 } 4016 4017 static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a) 4018 { 4019 return do_fmpyadd_d(ctx, a, false); 4020 } 4021 4022 static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a) 4023 { 4024 return do_fmpyadd_d(ctx, a, true); 4025 } 4026 4027 static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a) 4028 { 4029 TCGv_i32 x, y, z; 4030 4031 nullify_over(ctx); 4032 x = load_frw0_i32(a->rm1); 4033 y = load_frw0_i32(a->rm2); 4034 z = load_frw0_i32(a->ra3); 4035 4036 if (a->neg) { 4037 gen_helper_fmpynfadd_s(x, cpu_env, x, y, z); 4038 } else { 4039 gen_helper_fmpyfadd_s(x, cpu_env, x, y, z); 4040 } 4041 4042 tcg_temp_free_i32(y); 4043 tcg_temp_free_i32(z); 4044 save_frw_i32(a->t, x); 4045 tcg_temp_free_i32(x); 4046 return nullify_end(ctx); 4047 } 4048 4049 static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a) 4050 { 4051 TCGv_i64 x, y, z; 4052 4053 nullify_over(ctx); 4054 x = load_frd0(a->rm1); 4055 y = load_frd0(a->rm2); 4056 z = load_frd0(a->ra3); 4057 4058 if (a->neg) { 4059 gen_helper_fmpynfadd_d(x, cpu_env, x, y, z); 4060 } else { 4061 gen_helper_fmpyfadd_d(x, cpu_env, x, y, z); 4062 } 4063 4064 tcg_temp_free_i64(y); 4065 tcg_temp_free_i64(z); 4066 save_frd(a->t, x); 4067 tcg_temp_free_i64(x); 4068 return nullify_end(ctx); 4069 } 4070 4071 static bool trans_diag(DisasContext *ctx, arg_diag *a) 4072 { 4073 qemu_log_mask(LOG_UNIMP, "DIAG opcode ignored\n"); 4074 cond_free(&ctx->null_cond); 4075 return true; 4076 } 4077 4078 static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) 4079 { 4080 DisasContext *ctx = container_of(dcbase, DisasContext, base); 4081 int bound; 4082 4083 ctx->cs = cs; 4084 ctx->tb_flags = ctx->base.tb->flags; 4085 4086 #ifdef CONFIG_USER_ONLY 4087 ctx->privilege = MMU_USER_IDX; 4088 ctx->mmu_idx = MMU_USER_IDX; 4089 ctx->iaoq_f = ctx->base.pc_first | MMU_USER_IDX; 4090 ctx->iaoq_b = ctx->base.tb->cs_base | MMU_USER_IDX; 4091 #else 4092 ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3; 4093 ctx->mmu_idx = (ctx->tb_flags & PSW_D ? ctx->privilege : MMU_PHYS_IDX); 4094 4095 /* Recover the IAOQ values from the GVA + PRIV. */ 4096 uint64_t cs_base = ctx->base.tb->cs_base; 4097 uint64_t iasq_f = cs_base & ~0xffffffffull; 4098 int32_t diff = cs_base; 4099 4100 ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege; 4101 ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1); 4102 #endif 4103 ctx->iaoq_n = -1; 4104 ctx->iaoq_n_var = NULL; 4105 4106 /* Bound the number of instructions by those left on the page. */ 4107 bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4; 4108 ctx->base.max_insns = MIN(ctx->base.max_insns, bound); 4109 4110 ctx->ntempr = 0; 4111 ctx->ntempl = 0; 4112 memset(ctx->tempr, 0, sizeof(ctx->tempr)); 4113 memset(ctx->templ, 0, sizeof(ctx->templ)); 4114 } 4115 4116 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs) 4117 { 4118 DisasContext *ctx = container_of(dcbase, DisasContext, base); 4119 4120 /* Seed the nullification status from PSW[N], as saved in TB->FLAGS. */ 4121 ctx->null_cond = cond_make_f(); 4122 ctx->psw_n_nonzero = false; 4123 if (ctx->tb_flags & PSW_N) { 4124 ctx->null_cond.c = TCG_COND_ALWAYS; 4125 ctx->psw_n_nonzero = true; 4126 } 4127 ctx->null_lab = NULL; 4128 } 4129 4130 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs) 4131 { 4132 DisasContext *ctx = container_of(dcbase, DisasContext, base); 4133 4134 tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b); 4135 } 4136 4137 static bool hppa_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs, 4138 const CPUBreakpoint *bp) 4139 { 4140 DisasContext *ctx = container_of(dcbase, DisasContext, base); 4141 4142 gen_excp(ctx, EXCP_DEBUG); 4143 ctx->base.pc_next += 4; 4144 return true; 4145 } 4146 4147 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) 4148 { 4149 DisasContext *ctx = container_of(dcbase, DisasContext, base); 4150 CPUHPPAState *env = cs->env_ptr; 4151 DisasJumpType ret; 4152 int i, n; 4153 4154 /* Execute one insn. */ 4155 #ifdef CONFIG_USER_ONLY 4156 if (ctx->base.pc_next < TARGET_PAGE_SIZE) { 4157 do_page_zero(ctx); 4158 ret = ctx->base.is_jmp; 4159 assert(ret != DISAS_NEXT); 4160 } else 4161 #endif 4162 { 4163 /* Always fetch the insn, even if nullified, so that we check 4164 the page permissions for execute. */ 4165 uint32_t insn = cpu_ldl_code(env, ctx->base.pc_next); 4166 4167 /* Set up the IA queue for the next insn. 4168 This will be overwritten by a branch. */ 4169 if (ctx->iaoq_b == -1) { 4170 ctx->iaoq_n = -1; 4171 ctx->iaoq_n_var = get_temp(ctx); 4172 tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4); 4173 } else { 4174 ctx->iaoq_n = ctx->iaoq_b + 4; 4175 ctx->iaoq_n_var = NULL; 4176 } 4177 4178 if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) { 4179 ctx->null_cond.c = TCG_COND_NEVER; 4180 ret = DISAS_NEXT; 4181 } else { 4182 ctx->insn = insn; 4183 if (!decode(ctx, insn)) { 4184 gen_illegal(ctx); 4185 } 4186 ret = ctx->base.is_jmp; 4187 assert(ctx->null_lab == NULL); 4188 } 4189 } 4190 4191 /* Free any temporaries allocated. */ 4192 for (i = 0, n = ctx->ntempr; i < n; ++i) { 4193 tcg_temp_free(ctx->tempr[i]); 4194 ctx->tempr[i] = NULL; 4195 } 4196 for (i = 0, n = ctx->ntempl; i < n; ++i) { 4197 tcg_temp_free_tl(ctx->templ[i]); 4198 ctx->templ[i] = NULL; 4199 } 4200 ctx->ntempr = 0; 4201 ctx->ntempl = 0; 4202 4203 /* Advance the insn queue. Note that this check also detects 4204 a priority change within the instruction queue. */ 4205 if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) { 4206 if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1 4207 && use_goto_tb(ctx, ctx->iaoq_b) 4208 && (ctx->null_cond.c == TCG_COND_NEVER 4209 || ctx->null_cond.c == TCG_COND_ALWAYS)) { 4210 nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS); 4211 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n); 4212 ctx->base.is_jmp = ret = DISAS_NORETURN; 4213 } else { 4214 ctx->base.is_jmp = ret = DISAS_IAQ_N_STALE; 4215 } 4216 } 4217 ctx->iaoq_f = ctx->iaoq_b; 4218 ctx->iaoq_b = ctx->iaoq_n; 4219 ctx->base.pc_next += 4; 4220 4221 if (ret == DISAS_NORETURN || ret == DISAS_IAQ_N_UPDATED) { 4222 return; 4223 } 4224 if (ctx->iaoq_f == -1) { 4225 tcg_gen_mov_reg(cpu_iaoq_f, cpu_iaoq_b); 4226 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var); 4227 #ifndef CONFIG_USER_ONLY 4228 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b); 4229 #endif 4230 nullify_save(ctx); 4231 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED; 4232 } else if (ctx->iaoq_b == -1) { 4233 tcg_gen_mov_reg(cpu_iaoq_b, ctx->iaoq_n_var); 4234 } 4235 } 4236 4237 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) 4238 { 4239 DisasContext *ctx = container_of(dcbase, DisasContext, base); 4240 DisasJumpType is_jmp = ctx->base.is_jmp; 4241 4242 switch (is_jmp) { 4243 case DISAS_NORETURN: 4244 break; 4245 case DISAS_TOO_MANY: 4246 case DISAS_IAQ_N_STALE: 4247 case DISAS_IAQ_N_STALE_EXIT: 4248 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f); 4249 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b); 4250 nullify_save(ctx); 4251 /* FALLTHRU */ 4252 case DISAS_IAQ_N_UPDATED: 4253 if (ctx->base.singlestep_enabled) { 4254 gen_excp_1(EXCP_DEBUG); 4255 } else if (is_jmp == DISAS_IAQ_N_STALE_EXIT) { 4256 tcg_gen_exit_tb(NULL, 0); 4257 } else { 4258 tcg_gen_lookup_and_goto_ptr(); 4259 } 4260 break; 4261 default: 4262 g_assert_not_reached(); 4263 } 4264 } 4265 4266 static void hppa_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs) 4267 { 4268 target_ulong pc = dcbase->pc_first; 4269 4270 #ifdef CONFIG_USER_ONLY 4271 switch (pc) { 4272 case 0x00: 4273 qemu_log("IN:\n0x00000000: (null)\n"); 4274 return; 4275 case 0xb0: 4276 qemu_log("IN:\n0x000000b0: light-weight-syscall\n"); 4277 return; 4278 case 0xe0: 4279 qemu_log("IN:\n0x000000e0: set-thread-pointer-syscall\n"); 4280 return; 4281 case 0x100: 4282 qemu_log("IN:\n0x00000100: syscall\n"); 4283 return; 4284 } 4285 #endif 4286 4287 qemu_log("IN: %s\n", lookup_symbol(pc)); 4288 log_target_disas(cs, pc, dcbase->tb->size); 4289 } 4290 4291 static const TranslatorOps hppa_tr_ops = { 4292 .init_disas_context = hppa_tr_init_disas_context, 4293 .tb_start = hppa_tr_tb_start, 4294 .insn_start = hppa_tr_insn_start, 4295 .breakpoint_check = hppa_tr_breakpoint_check, 4296 .translate_insn = hppa_tr_translate_insn, 4297 .tb_stop = hppa_tr_tb_stop, 4298 .disas_log = hppa_tr_disas_log, 4299 }; 4300 4301 void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb) 4302 4303 { 4304 DisasContext ctx; 4305 translator_loop(&hppa_tr_ops, &ctx.base, cs, tb); 4306 } 4307 4308 void restore_state_to_opc(CPUHPPAState *env, TranslationBlock *tb, 4309 target_ulong *data) 4310 { 4311 env->iaoq_f = data[0]; 4312 if (data[1] != (target_ureg)-1) { 4313 env->iaoq_b = data[1]; 4314 } 4315 /* Since we were executing the instruction at IAOQ_F, and took some 4316 sort of action that provoked the cpu_restore_state, we can infer 4317 that the instruction was not nullified. */ 4318 env->psw_n = 0; 4319 } 4320