1 /* 2 * HPPA emulation cpu translation for qemu. 3 * 4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net> 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "cpu.h" 22 #include "disas/disas.h" 23 #include "qemu/host-utils.h" 24 #include "exec/exec-all.h" 25 #include "tcg/tcg-op.h" 26 #include "exec/cpu_ldst.h" 27 #include "exec/helper-proto.h" 28 #include "exec/helper-gen.h" 29 #include "exec/translator.h" 30 #include "exec/log.h" 31 32 /* Since we have a distinction between register size and address size, 33 we need to redefine all of these. */ 34 35 #undef TCGv 36 #undef tcg_temp_new 37 #undef tcg_global_mem_new 38 #undef tcg_temp_local_new 39 #undef tcg_temp_free 40 41 #if TARGET_LONG_BITS == 64 42 #define TCGv_tl TCGv_i64 43 #define tcg_temp_new_tl tcg_temp_new_i64 44 #define tcg_temp_free_tl tcg_temp_free_i64 45 #if TARGET_REGISTER_BITS == 64 46 #define tcg_gen_extu_reg_tl tcg_gen_mov_i64 47 #else 48 #define tcg_gen_extu_reg_tl tcg_gen_extu_i32_i64 49 #endif 50 #else 51 #define TCGv_tl TCGv_i32 52 #define tcg_temp_new_tl tcg_temp_new_i32 53 #define tcg_temp_free_tl tcg_temp_free_i32 54 #define tcg_gen_extu_reg_tl tcg_gen_mov_i32 55 #endif 56 57 #if TARGET_REGISTER_BITS == 64 58 #define TCGv_reg TCGv_i64 59 60 #define tcg_temp_new tcg_temp_new_i64 61 #define tcg_global_mem_new tcg_global_mem_new_i64 62 #define tcg_temp_local_new tcg_temp_local_new_i64 63 #define tcg_temp_free tcg_temp_free_i64 64 65 #define tcg_gen_movi_reg tcg_gen_movi_i64 66 #define tcg_gen_mov_reg tcg_gen_mov_i64 67 #define tcg_gen_ld8u_reg tcg_gen_ld8u_i64 68 #define tcg_gen_ld8s_reg tcg_gen_ld8s_i64 69 #define tcg_gen_ld16u_reg tcg_gen_ld16u_i64 70 #define tcg_gen_ld16s_reg tcg_gen_ld16s_i64 71 #define tcg_gen_ld32u_reg tcg_gen_ld32u_i64 72 #define tcg_gen_ld32s_reg tcg_gen_ld32s_i64 73 #define tcg_gen_ld_reg tcg_gen_ld_i64 74 #define tcg_gen_st8_reg tcg_gen_st8_i64 75 #define tcg_gen_st16_reg tcg_gen_st16_i64 76 #define tcg_gen_st32_reg tcg_gen_st32_i64 77 #define tcg_gen_st_reg tcg_gen_st_i64 78 #define tcg_gen_add_reg tcg_gen_add_i64 79 #define tcg_gen_addi_reg tcg_gen_addi_i64 80 #define tcg_gen_sub_reg tcg_gen_sub_i64 81 #define tcg_gen_neg_reg tcg_gen_neg_i64 82 #define tcg_gen_subfi_reg tcg_gen_subfi_i64 83 #define tcg_gen_subi_reg tcg_gen_subi_i64 84 #define tcg_gen_and_reg tcg_gen_and_i64 85 #define tcg_gen_andi_reg tcg_gen_andi_i64 86 #define tcg_gen_or_reg tcg_gen_or_i64 87 #define tcg_gen_ori_reg tcg_gen_ori_i64 88 #define tcg_gen_xor_reg tcg_gen_xor_i64 89 #define tcg_gen_xori_reg tcg_gen_xori_i64 90 #define tcg_gen_not_reg tcg_gen_not_i64 91 #define tcg_gen_shl_reg tcg_gen_shl_i64 92 #define tcg_gen_shli_reg tcg_gen_shli_i64 93 #define tcg_gen_shr_reg tcg_gen_shr_i64 94 #define tcg_gen_shri_reg tcg_gen_shri_i64 95 #define tcg_gen_sar_reg tcg_gen_sar_i64 96 #define tcg_gen_sari_reg tcg_gen_sari_i64 97 #define tcg_gen_brcond_reg tcg_gen_brcond_i64 98 #define tcg_gen_brcondi_reg tcg_gen_brcondi_i64 99 #define tcg_gen_setcond_reg tcg_gen_setcond_i64 100 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i64 101 #define tcg_gen_mul_reg tcg_gen_mul_i64 102 #define tcg_gen_muli_reg tcg_gen_muli_i64 103 #define tcg_gen_div_reg tcg_gen_div_i64 104 #define tcg_gen_rem_reg tcg_gen_rem_i64 105 #define tcg_gen_divu_reg tcg_gen_divu_i64 106 #define tcg_gen_remu_reg tcg_gen_remu_i64 107 #define tcg_gen_discard_reg tcg_gen_discard_i64 108 #define tcg_gen_trunc_reg_i32 tcg_gen_extrl_i64_i32 109 #define tcg_gen_trunc_i64_reg tcg_gen_mov_i64 110 #define tcg_gen_extu_i32_reg tcg_gen_extu_i32_i64 111 #define tcg_gen_ext_i32_reg tcg_gen_ext_i32_i64 112 #define tcg_gen_extu_reg_i64 tcg_gen_mov_i64 113 #define tcg_gen_ext_reg_i64 tcg_gen_mov_i64 114 #define tcg_gen_ext8u_reg tcg_gen_ext8u_i64 115 #define tcg_gen_ext8s_reg tcg_gen_ext8s_i64 116 #define tcg_gen_ext16u_reg tcg_gen_ext16u_i64 117 #define tcg_gen_ext16s_reg tcg_gen_ext16s_i64 118 #define tcg_gen_ext32u_reg tcg_gen_ext32u_i64 119 #define tcg_gen_ext32s_reg tcg_gen_ext32s_i64 120 #define tcg_gen_bswap16_reg tcg_gen_bswap16_i64 121 #define tcg_gen_bswap32_reg tcg_gen_bswap32_i64 122 #define tcg_gen_bswap64_reg tcg_gen_bswap64_i64 123 #define tcg_gen_concat_reg_i64 tcg_gen_concat32_i64 124 #define tcg_gen_andc_reg tcg_gen_andc_i64 125 #define tcg_gen_eqv_reg tcg_gen_eqv_i64 126 #define tcg_gen_nand_reg tcg_gen_nand_i64 127 #define tcg_gen_nor_reg tcg_gen_nor_i64 128 #define tcg_gen_orc_reg tcg_gen_orc_i64 129 #define tcg_gen_clz_reg tcg_gen_clz_i64 130 #define tcg_gen_ctz_reg tcg_gen_ctz_i64 131 #define tcg_gen_clzi_reg tcg_gen_clzi_i64 132 #define tcg_gen_ctzi_reg tcg_gen_ctzi_i64 133 #define tcg_gen_clrsb_reg tcg_gen_clrsb_i64 134 #define tcg_gen_ctpop_reg tcg_gen_ctpop_i64 135 #define tcg_gen_rotl_reg tcg_gen_rotl_i64 136 #define tcg_gen_rotli_reg tcg_gen_rotli_i64 137 #define tcg_gen_rotr_reg tcg_gen_rotr_i64 138 #define tcg_gen_rotri_reg tcg_gen_rotri_i64 139 #define tcg_gen_deposit_reg tcg_gen_deposit_i64 140 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i64 141 #define tcg_gen_extract_reg tcg_gen_extract_i64 142 #define tcg_gen_sextract_reg tcg_gen_sextract_i64 143 #define tcg_gen_extract2_reg tcg_gen_extract2_i64 144 #define tcg_const_reg tcg_const_i64 145 #define tcg_const_local_reg tcg_const_local_i64 146 #define tcg_constant_reg tcg_constant_i64 147 #define tcg_gen_movcond_reg tcg_gen_movcond_i64 148 #define tcg_gen_add2_reg tcg_gen_add2_i64 149 #define tcg_gen_sub2_reg tcg_gen_sub2_i64 150 #define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i64 151 #define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i64 152 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i64 153 #define tcg_gen_trunc_reg_ptr tcg_gen_trunc_i64_ptr 154 #else 155 #define TCGv_reg TCGv_i32 156 #define tcg_temp_new tcg_temp_new_i32 157 #define tcg_global_mem_new tcg_global_mem_new_i32 158 #define tcg_temp_local_new tcg_temp_local_new_i32 159 #define tcg_temp_free tcg_temp_free_i32 160 161 #define tcg_gen_movi_reg tcg_gen_movi_i32 162 #define tcg_gen_mov_reg tcg_gen_mov_i32 163 #define tcg_gen_ld8u_reg tcg_gen_ld8u_i32 164 #define tcg_gen_ld8s_reg tcg_gen_ld8s_i32 165 #define tcg_gen_ld16u_reg tcg_gen_ld16u_i32 166 #define tcg_gen_ld16s_reg tcg_gen_ld16s_i32 167 #define tcg_gen_ld32u_reg tcg_gen_ld_i32 168 #define tcg_gen_ld32s_reg tcg_gen_ld_i32 169 #define tcg_gen_ld_reg tcg_gen_ld_i32 170 #define tcg_gen_st8_reg tcg_gen_st8_i32 171 #define tcg_gen_st16_reg tcg_gen_st16_i32 172 #define tcg_gen_st32_reg tcg_gen_st32_i32 173 #define tcg_gen_st_reg tcg_gen_st_i32 174 #define tcg_gen_add_reg tcg_gen_add_i32 175 #define tcg_gen_addi_reg tcg_gen_addi_i32 176 #define tcg_gen_sub_reg tcg_gen_sub_i32 177 #define tcg_gen_neg_reg tcg_gen_neg_i32 178 #define tcg_gen_subfi_reg tcg_gen_subfi_i32 179 #define tcg_gen_subi_reg tcg_gen_subi_i32 180 #define tcg_gen_and_reg tcg_gen_and_i32 181 #define tcg_gen_andi_reg tcg_gen_andi_i32 182 #define tcg_gen_or_reg tcg_gen_or_i32 183 #define tcg_gen_ori_reg tcg_gen_ori_i32 184 #define tcg_gen_xor_reg tcg_gen_xor_i32 185 #define tcg_gen_xori_reg tcg_gen_xori_i32 186 #define tcg_gen_not_reg tcg_gen_not_i32 187 #define tcg_gen_shl_reg tcg_gen_shl_i32 188 #define tcg_gen_shli_reg tcg_gen_shli_i32 189 #define tcg_gen_shr_reg tcg_gen_shr_i32 190 #define tcg_gen_shri_reg tcg_gen_shri_i32 191 #define tcg_gen_sar_reg tcg_gen_sar_i32 192 #define tcg_gen_sari_reg tcg_gen_sari_i32 193 #define tcg_gen_brcond_reg tcg_gen_brcond_i32 194 #define tcg_gen_brcondi_reg tcg_gen_brcondi_i32 195 #define tcg_gen_setcond_reg tcg_gen_setcond_i32 196 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i32 197 #define tcg_gen_mul_reg tcg_gen_mul_i32 198 #define tcg_gen_muli_reg tcg_gen_muli_i32 199 #define tcg_gen_div_reg tcg_gen_div_i32 200 #define tcg_gen_rem_reg tcg_gen_rem_i32 201 #define tcg_gen_divu_reg tcg_gen_divu_i32 202 #define tcg_gen_remu_reg tcg_gen_remu_i32 203 #define tcg_gen_discard_reg tcg_gen_discard_i32 204 #define tcg_gen_trunc_reg_i32 tcg_gen_mov_i32 205 #define tcg_gen_trunc_i64_reg tcg_gen_extrl_i64_i32 206 #define tcg_gen_extu_i32_reg tcg_gen_mov_i32 207 #define tcg_gen_ext_i32_reg tcg_gen_mov_i32 208 #define tcg_gen_extu_reg_i64 tcg_gen_extu_i32_i64 209 #define tcg_gen_ext_reg_i64 tcg_gen_ext_i32_i64 210 #define tcg_gen_ext8u_reg tcg_gen_ext8u_i32 211 #define tcg_gen_ext8s_reg tcg_gen_ext8s_i32 212 #define tcg_gen_ext16u_reg tcg_gen_ext16u_i32 213 #define tcg_gen_ext16s_reg tcg_gen_ext16s_i32 214 #define tcg_gen_ext32u_reg tcg_gen_mov_i32 215 #define tcg_gen_ext32s_reg tcg_gen_mov_i32 216 #define tcg_gen_bswap16_reg tcg_gen_bswap16_i32 217 #define tcg_gen_bswap32_reg tcg_gen_bswap32_i32 218 #define tcg_gen_concat_reg_i64 tcg_gen_concat_i32_i64 219 #define tcg_gen_andc_reg tcg_gen_andc_i32 220 #define tcg_gen_eqv_reg tcg_gen_eqv_i32 221 #define tcg_gen_nand_reg tcg_gen_nand_i32 222 #define tcg_gen_nor_reg tcg_gen_nor_i32 223 #define tcg_gen_orc_reg tcg_gen_orc_i32 224 #define tcg_gen_clz_reg tcg_gen_clz_i32 225 #define tcg_gen_ctz_reg tcg_gen_ctz_i32 226 #define tcg_gen_clzi_reg tcg_gen_clzi_i32 227 #define tcg_gen_ctzi_reg tcg_gen_ctzi_i32 228 #define tcg_gen_clrsb_reg tcg_gen_clrsb_i32 229 #define tcg_gen_ctpop_reg tcg_gen_ctpop_i32 230 #define tcg_gen_rotl_reg tcg_gen_rotl_i32 231 #define tcg_gen_rotli_reg tcg_gen_rotli_i32 232 #define tcg_gen_rotr_reg tcg_gen_rotr_i32 233 #define tcg_gen_rotri_reg tcg_gen_rotri_i32 234 #define tcg_gen_deposit_reg tcg_gen_deposit_i32 235 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i32 236 #define tcg_gen_extract_reg tcg_gen_extract_i32 237 #define tcg_gen_sextract_reg tcg_gen_sextract_i32 238 #define tcg_gen_extract2_reg tcg_gen_extract2_i32 239 #define tcg_const_reg tcg_const_i32 240 #define tcg_const_local_reg tcg_const_local_i32 241 #define tcg_constant_reg tcg_constant_i32 242 #define tcg_gen_movcond_reg tcg_gen_movcond_i32 243 #define tcg_gen_add2_reg tcg_gen_add2_i32 244 #define tcg_gen_sub2_reg tcg_gen_sub2_i32 245 #define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i32 246 #define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i32 247 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i32 248 #define tcg_gen_trunc_reg_ptr tcg_gen_ext_i32_ptr 249 #endif /* TARGET_REGISTER_BITS */ 250 251 typedef struct DisasCond { 252 TCGCond c; 253 TCGv_reg a0, a1; 254 } DisasCond; 255 256 typedef struct DisasContext { 257 DisasContextBase base; 258 CPUState *cs; 259 260 target_ureg iaoq_f; 261 target_ureg iaoq_b; 262 target_ureg iaoq_n; 263 TCGv_reg iaoq_n_var; 264 265 int ntempr, ntempl; 266 TCGv_reg tempr[8]; 267 TCGv_tl templ[4]; 268 269 DisasCond null_cond; 270 TCGLabel *null_lab; 271 272 uint32_t insn; 273 uint32_t tb_flags; 274 int mmu_idx; 275 int privilege; 276 bool psw_n_nonzero; 277 278 #ifdef CONFIG_USER_ONLY 279 MemOp unalign; 280 #endif 281 } DisasContext; 282 283 #ifdef CONFIG_USER_ONLY 284 #define UNALIGN(C) (C)->unalign 285 #else 286 #define UNALIGN(C) 0 287 #endif 288 289 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */ 290 static int expand_sm_imm(DisasContext *ctx, int val) 291 { 292 if (val & PSW_SM_E) { 293 val = (val & ~PSW_SM_E) | PSW_E; 294 } 295 if (val & PSW_SM_W) { 296 val = (val & ~PSW_SM_W) | PSW_W; 297 } 298 return val; 299 } 300 301 /* Inverted space register indicates 0 means sr0 not inferred from base. */ 302 static int expand_sr3x(DisasContext *ctx, int val) 303 { 304 return ~val; 305 } 306 307 /* Convert the M:A bits within a memory insn to the tri-state value 308 we use for the final M. */ 309 static int ma_to_m(DisasContext *ctx, int val) 310 { 311 return val & 2 ? (val & 1 ? -1 : 1) : 0; 312 } 313 314 /* Convert the sign of the displacement to a pre or post-modify. */ 315 static int pos_to_m(DisasContext *ctx, int val) 316 { 317 return val ? 1 : -1; 318 } 319 320 static int neg_to_m(DisasContext *ctx, int val) 321 { 322 return val ? -1 : 1; 323 } 324 325 /* Used for branch targets and fp memory ops. */ 326 static int expand_shl2(DisasContext *ctx, int val) 327 { 328 return val << 2; 329 } 330 331 /* Used for fp memory ops. */ 332 static int expand_shl3(DisasContext *ctx, int val) 333 { 334 return val << 3; 335 } 336 337 /* Used for assemble_21. */ 338 static int expand_shl11(DisasContext *ctx, int val) 339 { 340 return val << 11; 341 } 342 343 344 /* Include the auto-generated decoder. */ 345 #include "decode-insns.c.inc" 346 347 /* We are not using a goto_tb (for whatever reason), but have updated 348 the iaq (for whatever reason), so don't do it again on exit. */ 349 #define DISAS_IAQ_N_UPDATED DISAS_TARGET_0 350 351 /* We are exiting the TB, but have neither emitted a goto_tb, nor 352 updated the iaq for the next instruction to be executed. */ 353 #define DISAS_IAQ_N_STALE DISAS_TARGET_1 354 355 /* Similarly, but we want to return to the main loop immediately 356 to recognize unmasked interrupts. */ 357 #define DISAS_IAQ_N_STALE_EXIT DISAS_TARGET_2 358 #define DISAS_EXIT DISAS_TARGET_3 359 360 /* global register indexes */ 361 static TCGv_reg cpu_gr[32]; 362 static TCGv_i64 cpu_sr[4]; 363 static TCGv_i64 cpu_srH; 364 static TCGv_reg cpu_iaoq_f; 365 static TCGv_reg cpu_iaoq_b; 366 static TCGv_i64 cpu_iasq_f; 367 static TCGv_i64 cpu_iasq_b; 368 static TCGv_reg cpu_sar; 369 static TCGv_reg cpu_psw_n; 370 static TCGv_reg cpu_psw_v; 371 static TCGv_reg cpu_psw_cb; 372 static TCGv_reg cpu_psw_cb_msb; 373 374 #include "exec/gen-icount.h" 375 376 void hppa_translate_init(void) 377 { 378 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) } 379 380 typedef struct { TCGv_reg *var; const char *name; int ofs; } GlobalVar; 381 static const GlobalVar vars[] = { 382 { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) }, 383 DEF_VAR(psw_n), 384 DEF_VAR(psw_v), 385 DEF_VAR(psw_cb), 386 DEF_VAR(psw_cb_msb), 387 DEF_VAR(iaoq_f), 388 DEF_VAR(iaoq_b), 389 }; 390 391 #undef DEF_VAR 392 393 /* Use the symbolic register names that match the disassembler. */ 394 static const char gr_names[32][4] = { 395 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", 396 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", 397 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", 398 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31" 399 }; 400 /* SR[4-7] are not global registers so that we can index them. */ 401 static const char sr_names[5][4] = { 402 "sr0", "sr1", "sr2", "sr3", "srH" 403 }; 404 405 int i; 406 407 cpu_gr[0] = NULL; 408 for (i = 1; i < 32; i++) { 409 cpu_gr[i] = tcg_global_mem_new(cpu_env, 410 offsetof(CPUHPPAState, gr[i]), 411 gr_names[i]); 412 } 413 for (i = 0; i < 4; i++) { 414 cpu_sr[i] = tcg_global_mem_new_i64(cpu_env, 415 offsetof(CPUHPPAState, sr[i]), 416 sr_names[i]); 417 } 418 cpu_srH = tcg_global_mem_new_i64(cpu_env, 419 offsetof(CPUHPPAState, sr[4]), 420 sr_names[4]); 421 422 for (i = 0; i < ARRAY_SIZE(vars); ++i) { 423 const GlobalVar *v = &vars[i]; 424 *v->var = tcg_global_mem_new(cpu_env, v->ofs, v->name); 425 } 426 427 cpu_iasq_f = tcg_global_mem_new_i64(cpu_env, 428 offsetof(CPUHPPAState, iasq_f), 429 "iasq_f"); 430 cpu_iasq_b = tcg_global_mem_new_i64(cpu_env, 431 offsetof(CPUHPPAState, iasq_b), 432 "iasq_b"); 433 } 434 435 static DisasCond cond_make_f(void) 436 { 437 return (DisasCond){ 438 .c = TCG_COND_NEVER, 439 .a0 = NULL, 440 .a1 = NULL, 441 }; 442 } 443 444 static DisasCond cond_make_t(void) 445 { 446 return (DisasCond){ 447 .c = TCG_COND_ALWAYS, 448 .a0 = NULL, 449 .a1 = NULL, 450 }; 451 } 452 453 static DisasCond cond_make_n(void) 454 { 455 return (DisasCond){ 456 .c = TCG_COND_NE, 457 .a0 = cpu_psw_n, 458 .a1 = tcg_constant_reg(0) 459 }; 460 } 461 462 static DisasCond cond_make_0_tmp(TCGCond c, TCGv_reg a0) 463 { 464 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS); 465 return (DisasCond){ 466 .c = c, .a0 = a0, .a1 = tcg_constant_reg(0) 467 }; 468 } 469 470 static DisasCond cond_make_0(TCGCond c, TCGv_reg a0) 471 { 472 TCGv_reg tmp = tcg_temp_new(); 473 tcg_gen_mov_reg(tmp, a0); 474 return cond_make_0_tmp(c, tmp); 475 } 476 477 static DisasCond cond_make(TCGCond c, TCGv_reg a0, TCGv_reg a1) 478 { 479 DisasCond r = { .c = c }; 480 481 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS); 482 r.a0 = tcg_temp_new(); 483 tcg_gen_mov_reg(r.a0, a0); 484 r.a1 = tcg_temp_new(); 485 tcg_gen_mov_reg(r.a1, a1); 486 487 return r; 488 } 489 490 static void cond_free(DisasCond *cond) 491 { 492 switch (cond->c) { 493 default: 494 if (cond->a0 != cpu_psw_n) { 495 tcg_temp_free(cond->a0); 496 } 497 tcg_temp_free(cond->a1); 498 cond->a0 = NULL; 499 cond->a1 = NULL; 500 /* fallthru */ 501 case TCG_COND_ALWAYS: 502 cond->c = TCG_COND_NEVER; 503 break; 504 case TCG_COND_NEVER: 505 break; 506 } 507 } 508 509 static TCGv_reg get_temp(DisasContext *ctx) 510 { 511 unsigned i = ctx->ntempr++; 512 g_assert(i < ARRAY_SIZE(ctx->tempr)); 513 return ctx->tempr[i] = tcg_temp_new(); 514 } 515 516 #ifndef CONFIG_USER_ONLY 517 static TCGv_tl get_temp_tl(DisasContext *ctx) 518 { 519 unsigned i = ctx->ntempl++; 520 g_assert(i < ARRAY_SIZE(ctx->templ)); 521 return ctx->templ[i] = tcg_temp_new_tl(); 522 } 523 #endif 524 525 static TCGv_reg load_const(DisasContext *ctx, target_sreg v) 526 { 527 TCGv_reg t = get_temp(ctx); 528 tcg_gen_movi_reg(t, v); 529 return t; 530 } 531 532 static TCGv_reg load_gpr(DisasContext *ctx, unsigned reg) 533 { 534 if (reg == 0) { 535 TCGv_reg t = get_temp(ctx); 536 tcg_gen_movi_reg(t, 0); 537 return t; 538 } else { 539 return cpu_gr[reg]; 540 } 541 } 542 543 static TCGv_reg dest_gpr(DisasContext *ctx, unsigned reg) 544 { 545 if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) { 546 return get_temp(ctx); 547 } else { 548 return cpu_gr[reg]; 549 } 550 } 551 552 static void save_or_nullify(DisasContext *ctx, TCGv_reg dest, TCGv_reg t) 553 { 554 if (ctx->null_cond.c != TCG_COND_NEVER) { 555 tcg_gen_movcond_reg(ctx->null_cond.c, dest, ctx->null_cond.a0, 556 ctx->null_cond.a1, dest, t); 557 } else { 558 tcg_gen_mov_reg(dest, t); 559 } 560 } 561 562 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_reg t) 563 { 564 if (reg != 0) { 565 save_or_nullify(ctx, cpu_gr[reg], t); 566 } 567 } 568 569 #if HOST_BIG_ENDIAN 570 # define HI_OFS 0 571 # define LO_OFS 4 572 #else 573 # define HI_OFS 4 574 # define LO_OFS 0 575 #endif 576 577 static TCGv_i32 load_frw_i32(unsigned rt) 578 { 579 TCGv_i32 ret = tcg_temp_new_i32(); 580 tcg_gen_ld_i32(ret, cpu_env, 581 offsetof(CPUHPPAState, fr[rt & 31]) 582 + (rt & 32 ? LO_OFS : HI_OFS)); 583 return ret; 584 } 585 586 static TCGv_i32 load_frw0_i32(unsigned rt) 587 { 588 if (rt == 0) { 589 return tcg_const_i32(0); 590 } else { 591 return load_frw_i32(rt); 592 } 593 } 594 595 static TCGv_i64 load_frw0_i64(unsigned rt) 596 { 597 if (rt == 0) { 598 return tcg_const_i64(0); 599 } else { 600 TCGv_i64 ret = tcg_temp_new_i64(); 601 tcg_gen_ld32u_i64(ret, cpu_env, 602 offsetof(CPUHPPAState, fr[rt & 31]) 603 + (rt & 32 ? LO_OFS : HI_OFS)); 604 return ret; 605 } 606 } 607 608 static void save_frw_i32(unsigned rt, TCGv_i32 val) 609 { 610 tcg_gen_st_i32(val, cpu_env, 611 offsetof(CPUHPPAState, fr[rt & 31]) 612 + (rt & 32 ? LO_OFS : HI_OFS)); 613 } 614 615 #undef HI_OFS 616 #undef LO_OFS 617 618 static TCGv_i64 load_frd(unsigned rt) 619 { 620 TCGv_i64 ret = tcg_temp_new_i64(); 621 tcg_gen_ld_i64(ret, cpu_env, offsetof(CPUHPPAState, fr[rt])); 622 return ret; 623 } 624 625 static TCGv_i64 load_frd0(unsigned rt) 626 { 627 if (rt == 0) { 628 return tcg_const_i64(0); 629 } else { 630 return load_frd(rt); 631 } 632 } 633 634 static void save_frd(unsigned rt, TCGv_i64 val) 635 { 636 tcg_gen_st_i64(val, cpu_env, offsetof(CPUHPPAState, fr[rt])); 637 } 638 639 static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg) 640 { 641 #ifdef CONFIG_USER_ONLY 642 tcg_gen_movi_i64(dest, 0); 643 #else 644 if (reg < 4) { 645 tcg_gen_mov_i64(dest, cpu_sr[reg]); 646 } else if (ctx->tb_flags & TB_FLAG_SR_SAME) { 647 tcg_gen_mov_i64(dest, cpu_srH); 648 } else { 649 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUHPPAState, sr[reg])); 650 } 651 #endif 652 } 653 654 /* Skip over the implementation of an insn that has been nullified. 655 Use this when the insn is too complex for a conditional move. */ 656 static void nullify_over(DisasContext *ctx) 657 { 658 if (ctx->null_cond.c != TCG_COND_NEVER) { 659 /* The always condition should have been handled in the main loop. */ 660 assert(ctx->null_cond.c != TCG_COND_ALWAYS); 661 662 ctx->null_lab = gen_new_label(); 663 664 /* If we're using PSW[N], copy it to a temp because... */ 665 if (ctx->null_cond.a0 == cpu_psw_n) { 666 ctx->null_cond.a0 = tcg_temp_new(); 667 tcg_gen_mov_reg(ctx->null_cond.a0, cpu_psw_n); 668 } 669 /* ... we clear it before branching over the implementation, 670 so that (1) it's clear after nullifying this insn and 671 (2) if this insn nullifies the next, PSW[N] is valid. */ 672 if (ctx->psw_n_nonzero) { 673 ctx->psw_n_nonzero = false; 674 tcg_gen_movi_reg(cpu_psw_n, 0); 675 } 676 677 tcg_gen_brcond_reg(ctx->null_cond.c, ctx->null_cond.a0, 678 ctx->null_cond.a1, ctx->null_lab); 679 cond_free(&ctx->null_cond); 680 } 681 } 682 683 /* Save the current nullification state to PSW[N]. */ 684 static void nullify_save(DisasContext *ctx) 685 { 686 if (ctx->null_cond.c == TCG_COND_NEVER) { 687 if (ctx->psw_n_nonzero) { 688 tcg_gen_movi_reg(cpu_psw_n, 0); 689 } 690 return; 691 } 692 if (ctx->null_cond.a0 != cpu_psw_n) { 693 tcg_gen_setcond_reg(ctx->null_cond.c, cpu_psw_n, 694 ctx->null_cond.a0, ctx->null_cond.a1); 695 ctx->psw_n_nonzero = true; 696 } 697 cond_free(&ctx->null_cond); 698 } 699 700 /* Set a PSW[N] to X. The intention is that this is used immediately 701 before a goto_tb/exit_tb, so that there is no fallthru path to other 702 code within the TB. Therefore we do not update psw_n_nonzero. */ 703 static void nullify_set(DisasContext *ctx, bool x) 704 { 705 if (ctx->psw_n_nonzero || x) { 706 tcg_gen_movi_reg(cpu_psw_n, x); 707 } 708 } 709 710 /* Mark the end of an instruction that may have been nullified. 711 This is the pair to nullify_over. Always returns true so that 712 it may be tail-called from a translate function. */ 713 static bool nullify_end(DisasContext *ctx) 714 { 715 TCGLabel *null_lab = ctx->null_lab; 716 DisasJumpType status = ctx->base.is_jmp; 717 718 /* For NEXT, NORETURN, STALE, we can easily continue (or exit). 719 For UPDATED, we cannot update on the nullified path. */ 720 assert(status != DISAS_IAQ_N_UPDATED); 721 722 if (likely(null_lab == NULL)) { 723 /* The current insn wasn't conditional or handled the condition 724 applied to it without a branch, so the (new) setting of 725 NULL_COND can be applied directly to the next insn. */ 726 return true; 727 } 728 ctx->null_lab = NULL; 729 730 if (likely(ctx->null_cond.c == TCG_COND_NEVER)) { 731 /* The next instruction will be unconditional, 732 and NULL_COND already reflects that. */ 733 gen_set_label(null_lab); 734 } else { 735 /* The insn that we just executed is itself nullifying the next 736 instruction. Store the condition in the PSW[N] global. 737 We asserted PSW[N] = 0 in nullify_over, so that after the 738 label we have the proper value in place. */ 739 nullify_save(ctx); 740 gen_set_label(null_lab); 741 ctx->null_cond = cond_make_n(); 742 } 743 if (status == DISAS_NORETURN) { 744 ctx->base.is_jmp = DISAS_NEXT; 745 } 746 return true; 747 } 748 749 static void copy_iaoq_entry(TCGv_reg dest, target_ureg ival, TCGv_reg vval) 750 { 751 if (unlikely(ival == -1)) { 752 tcg_gen_mov_reg(dest, vval); 753 } else { 754 tcg_gen_movi_reg(dest, ival); 755 } 756 } 757 758 static inline target_ureg iaoq_dest(DisasContext *ctx, target_sreg disp) 759 { 760 return ctx->iaoq_f + disp + 8; 761 } 762 763 static void gen_excp_1(int exception) 764 { 765 gen_helper_excp(cpu_env, tcg_constant_i32(exception)); 766 } 767 768 static void gen_excp(DisasContext *ctx, int exception) 769 { 770 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f); 771 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b); 772 nullify_save(ctx); 773 gen_excp_1(exception); 774 ctx->base.is_jmp = DISAS_NORETURN; 775 } 776 777 static bool gen_excp_iir(DisasContext *ctx, int exc) 778 { 779 nullify_over(ctx); 780 tcg_gen_st_reg(tcg_constant_reg(ctx->insn), 781 cpu_env, offsetof(CPUHPPAState, cr[CR_IIR])); 782 gen_excp(ctx, exc); 783 return nullify_end(ctx); 784 } 785 786 static bool gen_illegal(DisasContext *ctx) 787 { 788 return gen_excp_iir(ctx, EXCP_ILL); 789 } 790 791 #ifdef CONFIG_USER_ONLY 792 #define CHECK_MOST_PRIVILEGED(EXCP) \ 793 return gen_excp_iir(ctx, EXCP) 794 #else 795 #define CHECK_MOST_PRIVILEGED(EXCP) \ 796 do { \ 797 if (ctx->privilege != 0) { \ 798 return gen_excp_iir(ctx, EXCP); \ 799 } \ 800 } while (0) 801 #endif 802 803 static bool use_goto_tb(DisasContext *ctx, target_ureg dest) 804 { 805 return translator_use_goto_tb(&ctx->base, dest); 806 } 807 808 /* If the next insn is to be nullified, and it's on the same page, 809 and we're not attempting to set a breakpoint on it, then we can 810 totally skip the nullified insn. This avoids creating and 811 executing a TB that merely branches to the next TB. */ 812 static bool use_nullify_skip(DisasContext *ctx) 813 { 814 return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0 815 && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY)); 816 } 817 818 static void gen_goto_tb(DisasContext *ctx, int which, 819 target_ureg f, target_ureg b) 820 { 821 if (f != -1 && b != -1 && use_goto_tb(ctx, f)) { 822 tcg_gen_goto_tb(which); 823 tcg_gen_movi_reg(cpu_iaoq_f, f); 824 tcg_gen_movi_reg(cpu_iaoq_b, b); 825 tcg_gen_exit_tb(ctx->base.tb, which); 826 } else { 827 copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b); 828 copy_iaoq_entry(cpu_iaoq_b, b, ctx->iaoq_n_var); 829 tcg_gen_lookup_and_goto_ptr(); 830 } 831 } 832 833 static bool cond_need_sv(int c) 834 { 835 return c == 2 || c == 3 || c == 6; 836 } 837 838 static bool cond_need_cb(int c) 839 { 840 return c == 4 || c == 5; 841 } 842 843 /* 844 * Compute conditional for arithmetic. See Page 5-3, Table 5-1, of 845 * the Parisc 1.1 Architecture Reference Manual for details. 846 */ 847 848 static DisasCond do_cond(unsigned cf, TCGv_reg res, 849 TCGv_reg cb_msb, TCGv_reg sv) 850 { 851 DisasCond cond; 852 TCGv_reg tmp; 853 854 switch (cf >> 1) { 855 case 0: /* Never / TR (0 / 1) */ 856 cond = cond_make_f(); 857 break; 858 case 1: /* = / <> (Z / !Z) */ 859 cond = cond_make_0(TCG_COND_EQ, res); 860 break; 861 case 2: /* < / >= (N ^ V / !(N ^ V) */ 862 tmp = tcg_temp_new(); 863 tcg_gen_xor_reg(tmp, res, sv); 864 cond = cond_make_0_tmp(TCG_COND_LT, tmp); 865 break; 866 case 3: /* <= / > (N ^ V) | Z / !((N ^ V) | Z) */ 867 /* 868 * Simplify: 869 * (N ^ V) | Z 870 * ((res < 0) ^ (sv < 0)) | !res 871 * ((res ^ sv) < 0) | !res 872 * (~(res ^ sv) >= 0) | !res 873 * !(~(res ^ sv) >> 31) | !res 874 * !(~(res ^ sv) >> 31 & res) 875 */ 876 tmp = tcg_temp_new(); 877 tcg_gen_eqv_reg(tmp, res, sv); 878 tcg_gen_sari_reg(tmp, tmp, TARGET_REGISTER_BITS - 1); 879 tcg_gen_and_reg(tmp, tmp, res); 880 cond = cond_make_0_tmp(TCG_COND_EQ, tmp); 881 break; 882 case 4: /* NUV / UV (!C / C) */ 883 cond = cond_make_0(TCG_COND_EQ, cb_msb); 884 break; 885 case 5: /* ZNV / VNZ (!C | Z / C & !Z) */ 886 tmp = tcg_temp_new(); 887 tcg_gen_neg_reg(tmp, cb_msb); 888 tcg_gen_and_reg(tmp, tmp, res); 889 cond = cond_make_0_tmp(TCG_COND_EQ, tmp); 890 break; 891 case 6: /* SV / NSV (V / !V) */ 892 cond = cond_make_0(TCG_COND_LT, sv); 893 break; 894 case 7: /* OD / EV */ 895 tmp = tcg_temp_new(); 896 tcg_gen_andi_reg(tmp, res, 1); 897 cond = cond_make_0_tmp(TCG_COND_NE, tmp); 898 break; 899 default: 900 g_assert_not_reached(); 901 } 902 if (cf & 1) { 903 cond.c = tcg_invert_cond(cond.c); 904 } 905 906 return cond; 907 } 908 909 /* Similar, but for the special case of subtraction without borrow, we 910 can use the inputs directly. This can allow other computation to be 911 deleted as unused. */ 912 913 static DisasCond do_sub_cond(unsigned cf, TCGv_reg res, 914 TCGv_reg in1, TCGv_reg in2, TCGv_reg sv) 915 { 916 DisasCond cond; 917 918 switch (cf >> 1) { 919 case 1: /* = / <> */ 920 cond = cond_make(TCG_COND_EQ, in1, in2); 921 break; 922 case 2: /* < / >= */ 923 cond = cond_make(TCG_COND_LT, in1, in2); 924 break; 925 case 3: /* <= / > */ 926 cond = cond_make(TCG_COND_LE, in1, in2); 927 break; 928 case 4: /* << / >>= */ 929 cond = cond_make(TCG_COND_LTU, in1, in2); 930 break; 931 case 5: /* <<= / >> */ 932 cond = cond_make(TCG_COND_LEU, in1, in2); 933 break; 934 default: 935 return do_cond(cf, res, NULL, sv); 936 } 937 if (cf & 1) { 938 cond.c = tcg_invert_cond(cond.c); 939 } 940 941 return cond; 942 } 943 944 /* 945 * Similar, but for logicals, where the carry and overflow bits are not 946 * computed, and use of them is undefined. 947 * 948 * Undefined or not, hardware does not trap. It seems reasonable to 949 * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's 950 * how cases c={2,3} are treated. 951 */ 952 953 static DisasCond do_log_cond(unsigned cf, TCGv_reg res) 954 { 955 switch (cf) { 956 case 0: /* never */ 957 case 9: /* undef, C */ 958 case 11: /* undef, C & !Z */ 959 case 12: /* undef, V */ 960 return cond_make_f(); 961 962 case 1: /* true */ 963 case 8: /* undef, !C */ 964 case 10: /* undef, !C | Z */ 965 case 13: /* undef, !V */ 966 return cond_make_t(); 967 968 case 2: /* == */ 969 return cond_make_0(TCG_COND_EQ, res); 970 case 3: /* <> */ 971 return cond_make_0(TCG_COND_NE, res); 972 case 4: /* < */ 973 return cond_make_0(TCG_COND_LT, res); 974 case 5: /* >= */ 975 return cond_make_0(TCG_COND_GE, res); 976 case 6: /* <= */ 977 return cond_make_0(TCG_COND_LE, res); 978 case 7: /* > */ 979 return cond_make_0(TCG_COND_GT, res); 980 981 case 14: /* OD */ 982 case 15: /* EV */ 983 return do_cond(cf, res, NULL, NULL); 984 985 default: 986 g_assert_not_reached(); 987 } 988 } 989 990 /* Similar, but for shift/extract/deposit conditions. */ 991 992 static DisasCond do_sed_cond(unsigned orig, TCGv_reg res) 993 { 994 unsigned c, f; 995 996 /* Convert the compressed condition codes to standard. 997 0-2 are the same as logicals (nv,<,<=), while 3 is OD. 998 4-7 are the reverse of 0-3. */ 999 c = orig & 3; 1000 if (c == 3) { 1001 c = 7; 1002 } 1003 f = (orig & 4) / 4; 1004 1005 return do_log_cond(c * 2 + f, res); 1006 } 1007 1008 /* Similar, but for unit conditions. */ 1009 1010 static DisasCond do_unit_cond(unsigned cf, TCGv_reg res, 1011 TCGv_reg in1, TCGv_reg in2) 1012 { 1013 DisasCond cond; 1014 TCGv_reg tmp, cb = NULL; 1015 1016 if (cf & 8) { 1017 /* Since we want to test lots of carry-out bits all at once, do not 1018 * do our normal thing and compute carry-in of bit B+1 since that 1019 * leaves us with carry bits spread across two words. 1020 */ 1021 cb = tcg_temp_new(); 1022 tmp = tcg_temp_new(); 1023 tcg_gen_or_reg(cb, in1, in2); 1024 tcg_gen_and_reg(tmp, in1, in2); 1025 tcg_gen_andc_reg(cb, cb, res); 1026 tcg_gen_or_reg(cb, cb, tmp); 1027 tcg_temp_free(tmp); 1028 } 1029 1030 switch (cf >> 1) { 1031 case 0: /* never / TR */ 1032 case 1: /* undefined */ 1033 case 5: /* undefined */ 1034 cond = cond_make_f(); 1035 break; 1036 1037 case 2: /* SBZ / NBZ */ 1038 /* See hasless(v,1) from 1039 * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord 1040 */ 1041 tmp = tcg_temp_new(); 1042 tcg_gen_subi_reg(tmp, res, 0x01010101u); 1043 tcg_gen_andc_reg(tmp, tmp, res); 1044 tcg_gen_andi_reg(tmp, tmp, 0x80808080u); 1045 cond = cond_make_0(TCG_COND_NE, tmp); 1046 tcg_temp_free(tmp); 1047 break; 1048 1049 case 3: /* SHZ / NHZ */ 1050 tmp = tcg_temp_new(); 1051 tcg_gen_subi_reg(tmp, res, 0x00010001u); 1052 tcg_gen_andc_reg(tmp, tmp, res); 1053 tcg_gen_andi_reg(tmp, tmp, 0x80008000u); 1054 cond = cond_make_0(TCG_COND_NE, tmp); 1055 tcg_temp_free(tmp); 1056 break; 1057 1058 case 4: /* SDC / NDC */ 1059 tcg_gen_andi_reg(cb, cb, 0x88888888u); 1060 cond = cond_make_0(TCG_COND_NE, cb); 1061 break; 1062 1063 case 6: /* SBC / NBC */ 1064 tcg_gen_andi_reg(cb, cb, 0x80808080u); 1065 cond = cond_make_0(TCG_COND_NE, cb); 1066 break; 1067 1068 case 7: /* SHC / NHC */ 1069 tcg_gen_andi_reg(cb, cb, 0x80008000u); 1070 cond = cond_make_0(TCG_COND_NE, cb); 1071 break; 1072 1073 default: 1074 g_assert_not_reached(); 1075 } 1076 if (cf & 8) { 1077 tcg_temp_free(cb); 1078 } 1079 if (cf & 1) { 1080 cond.c = tcg_invert_cond(cond.c); 1081 } 1082 1083 return cond; 1084 } 1085 1086 /* Compute signed overflow for addition. */ 1087 static TCGv_reg do_add_sv(DisasContext *ctx, TCGv_reg res, 1088 TCGv_reg in1, TCGv_reg in2) 1089 { 1090 TCGv_reg sv = get_temp(ctx); 1091 TCGv_reg tmp = tcg_temp_new(); 1092 1093 tcg_gen_xor_reg(sv, res, in1); 1094 tcg_gen_xor_reg(tmp, in1, in2); 1095 tcg_gen_andc_reg(sv, sv, tmp); 1096 tcg_temp_free(tmp); 1097 1098 return sv; 1099 } 1100 1101 /* Compute signed overflow for subtraction. */ 1102 static TCGv_reg do_sub_sv(DisasContext *ctx, TCGv_reg res, 1103 TCGv_reg in1, TCGv_reg in2) 1104 { 1105 TCGv_reg sv = get_temp(ctx); 1106 TCGv_reg tmp = tcg_temp_new(); 1107 1108 tcg_gen_xor_reg(sv, res, in1); 1109 tcg_gen_xor_reg(tmp, in1, in2); 1110 tcg_gen_and_reg(sv, sv, tmp); 1111 tcg_temp_free(tmp); 1112 1113 return sv; 1114 } 1115 1116 static void do_add(DisasContext *ctx, unsigned rt, TCGv_reg in1, 1117 TCGv_reg in2, unsigned shift, bool is_l, 1118 bool is_tsv, bool is_tc, bool is_c, unsigned cf) 1119 { 1120 TCGv_reg dest, cb, cb_msb, sv, tmp; 1121 unsigned c = cf >> 1; 1122 DisasCond cond; 1123 1124 dest = tcg_temp_new(); 1125 cb = NULL; 1126 cb_msb = NULL; 1127 1128 if (shift) { 1129 tmp = get_temp(ctx); 1130 tcg_gen_shli_reg(tmp, in1, shift); 1131 in1 = tmp; 1132 } 1133 1134 if (!is_l || cond_need_cb(c)) { 1135 TCGv_reg zero = tcg_constant_reg(0); 1136 cb_msb = get_temp(ctx); 1137 tcg_gen_add2_reg(dest, cb_msb, in1, zero, in2, zero); 1138 if (is_c) { 1139 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cpu_psw_cb_msb, zero); 1140 } 1141 if (!is_l) { 1142 cb = get_temp(ctx); 1143 tcg_gen_xor_reg(cb, in1, in2); 1144 tcg_gen_xor_reg(cb, cb, dest); 1145 } 1146 } else { 1147 tcg_gen_add_reg(dest, in1, in2); 1148 if (is_c) { 1149 tcg_gen_add_reg(dest, dest, cpu_psw_cb_msb); 1150 } 1151 } 1152 1153 /* Compute signed overflow if required. */ 1154 sv = NULL; 1155 if (is_tsv || cond_need_sv(c)) { 1156 sv = do_add_sv(ctx, dest, in1, in2); 1157 if (is_tsv) { 1158 /* ??? Need to include overflow from shift. */ 1159 gen_helper_tsv(cpu_env, sv); 1160 } 1161 } 1162 1163 /* Emit any conditional trap before any writeback. */ 1164 cond = do_cond(cf, dest, cb_msb, sv); 1165 if (is_tc) { 1166 tmp = tcg_temp_new(); 1167 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1); 1168 gen_helper_tcond(cpu_env, tmp); 1169 tcg_temp_free(tmp); 1170 } 1171 1172 /* Write back the result. */ 1173 if (!is_l) { 1174 save_or_nullify(ctx, cpu_psw_cb, cb); 1175 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb); 1176 } 1177 save_gpr(ctx, rt, dest); 1178 tcg_temp_free(dest); 1179 1180 /* Install the new nullification. */ 1181 cond_free(&ctx->null_cond); 1182 ctx->null_cond = cond; 1183 } 1184 1185 static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_sh *a, 1186 bool is_l, bool is_tsv, bool is_tc, bool is_c) 1187 { 1188 TCGv_reg tcg_r1, tcg_r2; 1189 1190 if (a->cf) { 1191 nullify_over(ctx); 1192 } 1193 tcg_r1 = load_gpr(ctx, a->r1); 1194 tcg_r2 = load_gpr(ctx, a->r2); 1195 do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l, is_tsv, is_tc, is_c, a->cf); 1196 return nullify_end(ctx); 1197 } 1198 1199 static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a, 1200 bool is_tsv, bool is_tc) 1201 { 1202 TCGv_reg tcg_im, tcg_r2; 1203 1204 if (a->cf) { 1205 nullify_over(ctx); 1206 } 1207 tcg_im = load_const(ctx, a->i); 1208 tcg_r2 = load_gpr(ctx, a->r); 1209 do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf); 1210 return nullify_end(ctx); 1211 } 1212 1213 static void do_sub(DisasContext *ctx, unsigned rt, TCGv_reg in1, 1214 TCGv_reg in2, bool is_tsv, bool is_b, 1215 bool is_tc, unsigned cf) 1216 { 1217 TCGv_reg dest, sv, cb, cb_msb, zero, tmp; 1218 unsigned c = cf >> 1; 1219 DisasCond cond; 1220 1221 dest = tcg_temp_new(); 1222 cb = tcg_temp_new(); 1223 cb_msb = tcg_temp_new(); 1224 1225 zero = tcg_constant_reg(0); 1226 if (is_b) { 1227 /* DEST,C = IN1 + ~IN2 + C. */ 1228 tcg_gen_not_reg(cb, in2); 1229 tcg_gen_add2_reg(dest, cb_msb, in1, zero, cpu_psw_cb_msb, zero); 1230 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cb, zero); 1231 tcg_gen_xor_reg(cb, cb, in1); 1232 tcg_gen_xor_reg(cb, cb, dest); 1233 } else { 1234 /* DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer 1235 operations by seeding the high word with 1 and subtracting. */ 1236 tcg_gen_movi_reg(cb_msb, 1); 1237 tcg_gen_sub2_reg(dest, cb_msb, in1, cb_msb, in2, zero); 1238 tcg_gen_eqv_reg(cb, in1, in2); 1239 tcg_gen_xor_reg(cb, cb, dest); 1240 } 1241 1242 /* Compute signed overflow if required. */ 1243 sv = NULL; 1244 if (is_tsv || cond_need_sv(c)) { 1245 sv = do_sub_sv(ctx, dest, in1, in2); 1246 if (is_tsv) { 1247 gen_helper_tsv(cpu_env, sv); 1248 } 1249 } 1250 1251 /* Compute the condition. We cannot use the special case for borrow. */ 1252 if (!is_b) { 1253 cond = do_sub_cond(cf, dest, in1, in2, sv); 1254 } else { 1255 cond = do_cond(cf, dest, cb_msb, sv); 1256 } 1257 1258 /* Emit any conditional trap before any writeback. */ 1259 if (is_tc) { 1260 tmp = tcg_temp_new(); 1261 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1); 1262 gen_helper_tcond(cpu_env, tmp); 1263 tcg_temp_free(tmp); 1264 } 1265 1266 /* Write back the result. */ 1267 save_or_nullify(ctx, cpu_psw_cb, cb); 1268 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb); 1269 save_gpr(ctx, rt, dest); 1270 tcg_temp_free(dest); 1271 tcg_temp_free(cb); 1272 tcg_temp_free(cb_msb); 1273 1274 /* Install the new nullification. */ 1275 cond_free(&ctx->null_cond); 1276 ctx->null_cond = cond; 1277 } 1278 1279 static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf *a, 1280 bool is_tsv, bool is_b, bool is_tc) 1281 { 1282 TCGv_reg tcg_r1, tcg_r2; 1283 1284 if (a->cf) { 1285 nullify_over(ctx); 1286 } 1287 tcg_r1 = load_gpr(ctx, a->r1); 1288 tcg_r2 = load_gpr(ctx, a->r2); 1289 do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf); 1290 return nullify_end(ctx); 1291 } 1292 1293 static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv) 1294 { 1295 TCGv_reg tcg_im, tcg_r2; 1296 1297 if (a->cf) { 1298 nullify_over(ctx); 1299 } 1300 tcg_im = load_const(ctx, a->i); 1301 tcg_r2 = load_gpr(ctx, a->r); 1302 do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf); 1303 return nullify_end(ctx); 1304 } 1305 1306 static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_reg in1, 1307 TCGv_reg in2, unsigned cf) 1308 { 1309 TCGv_reg dest, sv; 1310 DisasCond cond; 1311 1312 dest = tcg_temp_new(); 1313 tcg_gen_sub_reg(dest, in1, in2); 1314 1315 /* Compute signed overflow if required. */ 1316 sv = NULL; 1317 if (cond_need_sv(cf >> 1)) { 1318 sv = do_sub_sv(ctx, dest, in1, in2); 1319 } 1320 1321 /* Form the condition for the compare. */ 1322 cond = do_sub_cond(cf, dest, in1, in2, sv); 1323 1324 /* Clear. */ 1325 tcg_gen_movi_reg(dest, 0); 1326 save_gpr(ctx, rt, dest); 1327 tcg_temp_free(dest); 1328 1329 /* Install the new nullification. */ 1330 cond_free(&ctx->null_cond); 1331 ctx->null_cond = cond; 1332 } 1333 1334 static void do_log(DisasContext *ctx, unsigned rt, TCGv_reg in1, 1335 TCGv_reg in2, unsigned cf, 1336 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg)) 1337 { 1338 TCGv_reg dest = dest_gpr(ctx, rt); 1339 1340 /* Perform the operation, and writeback. */ 1341 fn(dest, in1, in2); 1342 save_gpr(ctx, rt, dest); 1343 1344 /* Install the new nullification. */ 1345 cond_free(&ctx->null_cond); 1346 if (cf) { 1347 ctx->null_cond = do_log_cond(cf, dest); 1348 } 1349 } 1350 1351 static bool do_log_reg(DisasContext *ctx, arg_rrr_cf *a, 1352 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg)) 1353 { 1354 TCGv_reg tcg_r1, tcg_r2; 1355 1356 if (a->cf) { 1357 nullify_over(ctx); 1358 } 1359 tcg_r1 = load_gpr(ctx, a->r1); 1360 tcg_r2 = load_gpr(ctx, a->r2); 1361 do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, fn); 1362 return nullify_end(ctx); 1363 } 1364 1365 static void do_unit(DisasContext *ctx, unsigned rt, TCGv_reg in1, 1366 TCGv_reg in2, unsigned cf, bool is_tc, 1367 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg)) 1368 { 1369 TCGv_reg dest; 1370 DisasCond cond; 1371 1372 if (cf == 0) { 1373 dest = dest_gpr(ctx, rt); 1374 fn(dest, in1, in2); 1375 save_gpr(ctx, rt, dest); 1376 cond_free(&ctx->null_cond); 1377 } else { 1378 dest = tcg_temp_new(); 1379 fn(dest, in1, in2); 1380 1381 cond = do_unit_cond(cf, dest, in1, in2); 1382 1383 if (is_tc) { 1384 TCGv_reg tmp = tcg_temp_new(); 1385 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1); 1386 gen_helper_tcond(cpu_env, tmp); 1387 tcg_temp_free(tmp); 1388 } 1389 save_gpr(ctx, rt, dest); 1390 1391 cond_free(&ctx->null_cond); 1392 ctx->null_cond = cond; 1393 } 1394 } 1395 1396 #ifndef CONFIG_USER_ONLY 1397 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space 1398 from the top 2 bits of the base register. There are a few system 1399 instructions that have a 3-bit space specifier, for which SR0 is 1400 not special. To handle this, pass ~SP. */ 1401 static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_reg base) 1402 { 1403 TCGv_ptr ptr; 1404 TCGv_reg tmp; 1405 TCGv_i64 spc; 1406 1407 if (sp != 0) { 1408 if (sp < 0) { 1409 sp = ~sp; 1410 } 1411 spc = get_temp_tl(ctx); 1412 load_spr(ctx, spc, sp); 1413 return spc; 1414 } 1415 if (ctx->tb_flags & TB_FLAG_SR_SAME) { 1416 return cpu_srH; 1417 } 1418 1419 ptr = tcg_temp_new_ptr(); 1420 tmp = tcg_temp_new(); 1421 spc = get_temp_tl(ctx); 1422 1423 tcg_gen_shri_reg(tmp, base, TARGET_REGISTER_BITS - 5); 1424 tcg_gen_andi_reg(tmp, tmp, 030); 1425 tcg_gen_trunc_reg_ptr(ptr, tmp); 1426 tcg_temp_free(tmp); 1427 1428 tcg_gen_add_ptr(ptr, ptr, cpu_env); 1429 tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4])); 1430 tcg_temp_free_ptr(ptr); 1431 1432 return spc; 1433 } 1434 #endif 1435 1436 static void form_gva(DisasContext *ctx, TCGv_tl *pgva, TCGv_reg *pofs, 1437 unsigned rb, unsigned rx, int scale, target_sreg disp, 1438 unsigned sp, int modify, bool is_phys) 1439 { 1440 TCGv_reg base = load_gpr(ctx, rb); 1441 TCGv_reg ofs; 1442 1443 /* Note that RX is mutually exclusive with DISP. */ 1444 if (rx) { 1445 ofs = get_temp(ctx); 1446 tcg_gen_shli_reg(ofs, cpu_gr[rx], scale); 1447 tcg_gen_add_reg(ofs, ofs, base); 1448 } else if (disp || modify) { 1449 ofs = get_temp(ctx); 1450 tcg_gen_addi_reg(ofs, base, disp); 1451 } else { 1452 ofs = base; 1453 } 1454 1455 *pofs = ofs; 1456 #ifdef CONFIG_USER_ONLY 1457 *pgva = (modify <= 0 ? ofs : base); 1458 #else 1459 TCGv_tl addr = get_temp_tl(ctx); 1460 tcg_gen_extu_reg_tl(addr, modify <= 0 ? ofs : base); 1461 if (ctx->tb_flags & PSW_W) { 1462 tcg_gen_andi_tl(addr, addr, 0x3fffffffffffffffull); 1463 } 1464 if (!is_phys) { 1465 tcg_gen_or_tl(addr, addr, space_select(ctx, sp, base)); 1466 } 1467 *pgva = addr; 1468 #endif 1469 } 1470 1471 /* Emit a memory load. The modify parameter should be 1472 * < 0 for pre-modify, 1473 * > 0 for post-modify, 1474 * = 0 for no base register update. 1475 */ 1476 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb, 1477 unsigned rx, int scale, target_sreg disp, 1478 unsigned sp, int modify, MemOp mop) 1479 { 1480 TCGv_reg ofs; 1481 TCGv_tl addr; 1482 1483 /* Caller uses nullify_over/nullify_end. */ 1484 assert(ctx->null_cond.c == TCG_COND_NEVER); 1485 1486 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify, 1487 ctx->mmu_idx == MMU_PHYS_IDX); 1488 tcg_gen_qemu_ld_reg(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx)); 1489 if (modify) { 1490 save_gpr(ctx, rb, ofs); 1491 } 1492 } 1493 1494 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb, 1495 unsigned rx, int scale, target_sreg disp, 1496 unsigned sp, int modify, MemOp mop) 1497 { 1498 TCGv_reg ofs; 1499 TCGv_tl addr; 1500 1501 /* Caller uses nullify_over/nullify_end. */ 1502 assert(ctx->null_cond.c == TCG_COND_NEVER); 1503 1504 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify, 1505 ctx->mmu_idx == MMU_PHYS_IDX); 1506 tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx)); 1507 if (modify) { 1508 save_gpr(ctx, rb, ofs); 1509 } 1510 } 1511 1512 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb, 1513 unsigned rx, int scale, target_sreg disp, 1514 unsigned sp, int modify, MemOp mop) 1515 { 1516 TCGv_reg ofs; 1517 TCGv_tl addr; 1518 1519 /* Caller uses nullify_over/nullify_end. */ 1520 assert(ctx->null_cond.c == TCG_COND_NEVER); 1521 1522 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify, 1523 ctx->mmu_idx == MMU_PHYS_IDX); 1524 tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx)); 1525 if (modify) { 1526 save_gpr(ctx, rb, ofs); 1527 } 1528 } 1529 1530 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb, 1531 unsigned rx, int scale, target_sreg disp, 1532 unsigned sp, int modify, MemOp mop) 1533 { 1534 TCGv_reg ofs; 1535 TCGv_tl addr; 1536 1537 /* Caller uses nullify_over/nullify_end. */ 1538 assert(ctx->null_cond.c == TCG_COND_NEVER); 1539 1540 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify, 1541 ctx->mmu_idx == MMU_PHYS_IDX); 1542 tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx)); 1543 if (modify) { 1544 save_gpr(ctx, rb, ofs); 1545 } 1546 } 1547 1548 #if TARGET_REGISTER_BITS == 64 1549 #define do_load_reg do_load_64 1550 #define do_store_reg do_store_64 1551 #else 1552 #define do_load_reg do_load_32 1553 #define do_store_reg do_store_32 1554 #endif 1555 1556 static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb, 1557 unsigned rx, int scale, target_sreg disp, 1558 unsigned sp, int modify, MemOp mop) 1559 { 1560 TCGv_reg dest; 1561 1562 nullify_over(ctx); 1563 1564 if (modify == 0) { 1565 /* No base register update. */ 1566 dest = dest_gpr(ctx, rt); 1567 } else { 1568 /* Make sure if RT == RB, we see the result of the load. */ 1569 dest = get_temp(ctx); 1570 } 1571 do_load_reg(ctx, dest, rb, rx, scale, disp, sp, modify, mop); 1572 save_gpr(ctx, rt, dest); 1573 1574 return nullify_end(ctx); 1575 } 1576 1577 static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb, 1578 unsigned rx, int scale, target_sreg disp, 1579 unsigned sp, int modify) 1580 { 1581 TCGv_i32 tmp; 1582 1583 nullify_over(ctx); 1584 1585 tmp = tcg_temp_new_i32(); 1586 do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL); 1587 save_frw_i32(rt, tmp); 1588 tcg_temp_free_i32(tmp); 1589 1590 if (rt == 0) { 1591 gen_helper_loaded_fr0(cpu_env); 1592 } 1593 1594 return nullify_end(ctx); 1595 } 1596 1597 static bool trans_fldw(DisasContext *ctx, arg_ldst *a) 1598 { 1599 return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0, 1600 a->disp, a->sp, a->m); 1601 } 1602 1603 static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb, 1604 unsigned rx, int scale, target_sreg disp, 1605 unsigned sp, int modify) 1606 { 1607 TCGv_i64 tmp; 1608 1609 nullify_over(ctx); 1610 1611 tmp = tcg_temp_new_i64(); 1612 do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ); 1613 save_frd(rt, tmp); 1614 tcg_temp_free_i64(tmp); 1615 1616 if (rt == 0) { 1617 gen_helper_loaded_fr0(cpu_env); 1618 } 1619 1620 return nullify_end(ctx); 1621 } 1622 1623 static bool trans_fldd(DisasContext *ctx, arg_ldst *a) 1624 { 1625 return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0, 1626 a->disp, a->sp, a->m); 1627 } 1628 1629 static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb, 1630 target_sreg disp, unsigned sp, 1631 int modify, MemOp mop) 1632 { 1633 nullify_over(ctx); 1634 do_store_reg(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop); 1635 return nullify_end(ctx); 1636 } 1637 1638 static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb, 1639 unsigned rx, int scale, target_sreg disp, 1640 unsigned sp, int modify) 1641 { 1642 TCGv_i32 tmp; 1643 1644 nullify_over(ctx); 1645 1646 tmp = load_frw_i32(rt); 1647 do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL); 1648 tcg_temp_free_i32(tmp); 1649 1650 return nullify_end(ctx); 1651 } 1652 1653 static bool trans_fstw(DisasContext *ctx, arg_ldst *a) 1654 { 1655 return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0, 1656 a->disp, a->sp, a->m); 1657 } 1658 1659 static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb, 1660 unsigned rx, int scale, target_sreg disp, 1661 unsigned sp, int modify) 1662 { 1663 TCGv_i64 tmp; 1664 1665 nullify_over(ctx); 1666 1667 tmp = load_frd(rt); 1668 do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ); 1669 tcg_temp_free_i64(tmp); 1670 1671 return nullify_end(ctx); 1672 } 1673 1674 static bool trans_fstd(DisasContext *ctx, arg_ldst *a) 1675 { 1676 return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0, 1677 a->disp, a->sp, a->m); 1678 } 1679 1680 static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra, 1681 void (*func)(TCGv_i32, TCGv_env, TCGv_i32)) 1682 { 1683 TCGv_i32 tmp; 1684 1685 nullify_over(ctx); 1686 tmp = load_frw0_i32(ra); 1687 1688 func(tmp, cpu_env, tmp); 1689 1690 save_frw_i32(rt, tmp); 1691 tcg_temp_free_i32(tmp); 1692 return nullify_end(ctx); 1693 } 1694 1695 static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra, 1696 void (*func)(TCGv_i32, TCGv_env, TCGv_i64)) 1697 { 1698 TCGv_i32 dst; 1699 TCGv_i64 src; 1700 1701 nullify_over(ctx); 1702 src = load_frd(ra); 1703 dst = tcg_temp_new_i32(); 1704 1705 func(dst, cpu_env, src); 1706 1707 tcg_temp_free_i64(src); 1708 save_frw_i32(rt, dst); 1709 tcg_temp_free_i32(dst); 1710 return nullify_end(ctx); 1711 } 1712 1713 static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra, 1714 void (*func)(TCGv_i64, TCGv_env, TCGv_i64)) 1715 { 1716 TCGv_i64 tmp; 1717 1718 nullify_over(ctx); 1719 tmp = load_frd0(ra); 1720 1721 func(tmp, cpu_env, tmp); 1722 1723 save_frd(rt, tmp); 1724 tcg_temp_free_i64(tmp); 1725 return nullify_end(ctx); 1726 } 1727 1728 static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra, 1729 void (*func)(TCGv_i64, TCGv_env, TCGv_i32)) 1730 { 1731 TCGv_i32 src; 1732 TCGv_i64 dst; 1733 1734 nullify_over(ctx); 1735 src = load_frw0_i32(ra); 1736 dst = tcg_temp_new_i64(); 1737 1738 func(dst, cpu_env, src); 1739 1740 tcg_temp_free_i32(src); 1741 save_frd(rt, dst); 1742 tcg_temp_free_i64(dst); 1743 return nullify_end(ctx); 1744 } 1745 1746 static bool do_fop_weww(DisasContext *ctx, unsigned rt, 1747 unsigned ra, unsigned rb, 1748 void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32)) 1749 { 1750 TCGv_i32 a, b; 1751 1752 nullify_over(ctx); 1753 a = load_frw0_i32(ra); 1754 b = load_frw0_i32(rb); 1755 1756 func(a, cpu_env, a, b); 1757 1758 tcg_temp_free_i32(b); 1759 save_frw_i32(rt, a); 1760 tcg_temp_free_i32(a); 1761 return nullify_end(ctx); 1762 } 1763 1764 static bool do_fop_dedd(DisasContext *ctx, unsigned rt, 1765 unsigned ra, unsigned rb, 1766 void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64)) 1767 { 1768 TCGv_i64 a, b; 1769 1770 nullify_over(ctx); 1771 a = load_frd0(ra); 1772 b = load_frd0(rb); 1773 1774 func(a, cpu_env, a, b); 1775 1776 tcg_temp_free_i64(b); 1777 save_frd(rt, a); 1778 tcg_temp_free_i64(a); 1779 return nullify_end(ctx); 1780 } 1781 1782 /* Emit an unconditional branch to a direct target, which may or may not 1783 have already had nullification handled. */ 1784 static bool do_dbranch(DisasContext *ctx, target_ureg dest, 1785 unsigned link, bool is_n) 1786 { 1787 if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) { 1788 if (link != 0) { 1789 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var); 1790 } 1791 ctx->iaoq_n = dest; 1792 if (is_n) { 1793 ctx->null_cond.c = TCG_COND_ALWAYS; 1794 } 1795 } else { 1796 nullify_over(ctx); 1797 1798 if (link != 0) { 1799 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var); 1800 } 1801 1802 if (is_n && use_nullify_skip(ctx)) { 1803 nullify_set(ctx, 0); 1804 gen_goto_tb(ctx, 0, dest, dest + 4); 1805 } else { 1806 nullify_set(ctx, is_n); 1807 gen_goto_tb(ctx, 0, ctx->iaoq_b, dest); 1808 } 1809 1810 nullify_end(ctx); 1811 1812 nullify_set(ctx, 0); 1813 gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n); 1814 ctx->base.is_jmp = DISAS_NORETURN; 1815 } 1816 return true; 1817 } 1818 1819 /* Emit a conditional branch to a direct target. If the branch itself 1820 is nullified, we should have already used nullify_over. */ 1821 static bool do_cbranch(DisasContext *ctx, target_sreg disp, bool is_n, 1822 DisasCond *cond) 1823 { 1824 target_ureg dest = iaoq_dest(ctx, disp); 1825 TCGLabel *taken = NULL; 1826 TCGCond c = cond->c; 1827 bool n; 1828 1829 assert(ctx->null_cond.c == TCG_COND_NEVER); 1830 1831 /* Handle TRUE and NEVER as direct branches. */ 1832 if (c == TCG_COND_ALWAYS) { 1833 return do_dbranch(ctx, dest, 0, is_n && disp >= 0); 1834 } 1835 if (c == TCG_COND_NEVER) { 1836 return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0); 1837 } 1838 1839 taken = gen_new_label(); 1840 tcg_gen_brcond_reg(c, cond->a0, cond->a1, taken); 1841 cond_free(cond); 1842 1843 /* Not taken: Condition not satisfied; nullify on backward branches. */ 1844 n = is_n && disp < 0; 1845 if (n && use_nullify_skip(ctx)) { 1846 nullify_set(ctx, 0); 1847 gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4); 1848 } else { 1849 if (!n && ctx->null_lab) { 1850 gen_set_label(ctx->null_lab); 1851 ctx->null_lab = NULL; 1852 } 1853 nullify_set(ctx, n); 1854 if (ctx->iaoq_n == -1) { 1855 /* The temporary iaoq_n_var died at the branch above. 1856 Regenerate it here instead of saving it. */ 1857 tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4); 1858 } 1859 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n); 1860 } 1861 1862 gen_set_label(taken); 1863 1864 /* Taken: Condition satisfied; nullify on forward branches. */ 1865 n = is_n && disp >= 0; 1866 if (n && use_nullify_skip(ctx)) { 1867 nullify_set(ctx, 0); 1868 gen_goto_tb(ctx, 1, dest, dest + 4); 1869 } else { 1870 nullify_set(ctx, n); 1871 gen_goto_tb(ctx, 1, ctx->iaoq_b, dest); 1872 } 1873 1874 /* Not taken: the branch itself was nullified. */ 1875 if (ctx->null_lab) { 1876 gen_set_label(ctx->null_lab); 1877 ctx->null_lab = NULL; 1878 ctx->base.is_jmp = DISAS_IAQ_N_STALE; 1879 } else { 1880 ctx->base.is_jmp = DISAS_NORETURN; 1881 } 1882 return true; 1883 } 1884 1885 /* Emit an unconditional branch to an indirect target. This handles 1886 nullification of the branch itself. */ 1887 static bool do_ibranch(DisasContext *ctx, TCGv_reg dest, 1888 unsigned link, bool is_n) 1889 { 1890 TCGv_reg a0, a1, next, tmp; 1891 TCGCond c; 1892 1893 assert(ctx->null_lab == NULL); 1894 1895 if (ctx->null_cond.c == TCG_COND_NEVER) { 1896 if (link != 0) { 1897 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var); 1898 } 1899 next = get_temp(ctx); 1900 tcg_gen_mov_reg(next, dest); 1901 if (is_n) { 1902 if (use_nullify_skip(ctx)) { 1903 tcg_gen_mov_reg(cpu_iaoq_f, next); 1904 tcg_gen_addi_reg(cpu_iaoq_b, next, 4); 1905 nullify_set(ctx, 0); 1906 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED; 1907 return true; 1908 } 1909 ctx->null_cond.c = TCG_COND_ALWAYS; 1910 } 1911 ctx->iaoq_n = -1; 1912 ctx->iaoq_n_var = next; 1913 } else if (is_n && use_nullify_skip(ctx)) { 1914 /* The (conditional) branch, B, nullifies the next insn, N, 1915 and we're allowed to skip execution N (no single-step or 1916 tracepoint in effect). Since the goto_ptr that we must use 1917 for the indirect branch consumes no special resources, we 1918 can (conditionally) skip B and continue execution. */ 1919 /* The use_nullify_skip test implies we have a known control path. */ 1920 tcg_debug_assert(ctx->iaoq_b != -1); 1921 tcg_debug_assert(ctx->iaoq_n != -1); 1922 1923 /* We do have to handle the non-local temporary, DEST, before 1924 branching. Since IOAQ_F is not really live at this point, we 1925 can simply store DEST optimistically. Similarly with IAOQ_B. */ 1926 tcg_gen_mov_reg(cpu_iaoq_f, dest); 1927 tcg_gen_addi_reg(cpu_iaoq_b, dest, 4); 1928 1929 nullify_over(ctx); 1930 if (link != 0) { 1931 tcg_gen_movi_reg(cpu_gr[link], ctx->iaoq_n); 1932 } 1933 tcg_gen_lookup_and_goto_ptr(); 1934 return nullify_end(ctx); 1935 } else { 1936 c = ctx->null_cond.c; 1937 a0 = ctx->null_cond.a0; 1938 a1 = ctx->null_cond.a1; 1939 1940 tmp = tcg_temp_new(); 1941 next = get_temp(ctx); 1942 1943 copy_iaoq_entry(tmp, ctx->iaoq_n, ctx->iaoq_n_var); 1944 tcg_gen_movcond_reg(c, next, a0, a1, tmp, dest); 1945 ctx->iaoq_n = -1; 1946 ctx->iaoq_n_var = next; 1947 1948 if (link != 0) { 1949 tcg_gen_movcond_reg(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp); 1950 } 1951 1952 if (is_n) { 1953 /* The branch nullifies the next insn, which means the state of N 1954 after the branch is the inverse of the state of N that applied 1955 to the branch. */ 1956 tcg_gen_setcond_reg(tcg_invert_cond(c), cpu_psw_n, a0, a1); 1957 cond_free(&ctx->null_cond); 1958 ctx->null_cond = cond_make_n(); 1959 ctx->psw_n_nonzero = true; 1960 } else { 1961 cond_free(&ctx->null_cond); 1962 } 1963 } 1964 return true; 1965 } 1966 1967 /* Implement 1968 * if (IAOQ_Front{30..31} < GR[b]{30..31}) 1969 * IAOQ_Next{30..31} ← GR[b]{30..31}; 1970 * else 1971 * IAOQ_Next{30..31} ← IAOQ_Front{30..31}; 1972 * which keeps the privilege level from being increased. 1973 */ 1974 static TCGv_reg do_ibranch_priv(DisasContext *ctx, TCGv_reg offset) 1975 { 1976 TCGv_reg dest; 1977 switch (ctx->privilege) { 1978 case 0: 1979 /* Privilege 0 is maximum and is allowed to decrease. */ 1980 return offset; 1981 case 3: 1982 /* Privilege 3 is minimum and is never allowed to increase. */ 1983 dest = get_temp(ctx); 1984 tcg_gen_ori_reg(dest, offset, 3); 1985 break; 1986 default: 1987 dest = get_temp(ctx); 1988 tcg_gen_andi_reg(dest, offset, -4); 1989 tcg_gen_ori_reg(dest, dest, ctx->privilege); 1990 tcg_gen_movcond_reg(TCG_COND_GTU, dest, dest, offset, dest, offset); 1991 break; 1992 } 1993 return dest; 1994 } 1995 1996 #ifdef CONFIG_USER_ONLY 1997 /* On Linux, page zero is normally marked execute only + gateway. 1998 Therefore normal read or write is supposed to fail, but specific 1999 offsets have kernel code mapped to raise permissions to implement 2000 system calls. Handling this via an explicit check here, rather 2001 in than the "be disp(sr2,r0)" instruction that probably sent us 2002 here, is the easiest way to handle the branch delay slot on the 2003 aforementioned BE. */ 2004 static void do_page_zero(DisasContext *ctx) 2005 { 2006 /* If by some means we get here with PSW[N]=1, that implies that 2007 the B,GATE instruction would be skipped, and we'd fault on the 2008 next insn within the privilaged page. */ 2009 switch (ctx->null_cond.c) { 2010 case TCG_COND_NEVER: 2011 break; 2012 case TCG_COND_ALWAYS: 2013 tcg_gen_movi_reg(cpu_psw_n, 0); 2014 goto do_sigill; 2015 default: 2016 /* Since this is always the first (and only) insn within the 2017 TB, we should know the state of PSW[N] from TB->FLAGS. */ 2018 g_assert_not_reached(); 2019 } 2020 2021 /* Check that we didn't arrive here via some means that allowed 2022 non-sequential instruction execution. Normally the PSW[B] bit 2023 detects this by disallowing the B,GATE instruction to execute 2024 under such conditions. */ 2025 if (ctx->iaoq_b != ctx->iaoq_f + 4) { 2026 goto do_sigill; 2027 } 2028 2029 switch (ctx->iaoq_f & -4) { 2030 case 0x00: /* Null pointer call */ 2031 gen_excp_1(EXCP_IMP); 2032 ctx->base.is_jmp = DISAS_NORETURN; 2033 break; 2034 2035 case 0xb0: /* LWS */ 2036 gen_excp_1(EXCP_SYSCALL_LWS); 2037 ctx->base.is_jmp = DISAS_NORETURN; 2038 break; 2039 2040 case 0xe0: /* SET_THREAD_POINTER */ 2041 tcg_gen_st_reg(cpu_gr[26], cpu_env, offsetof(CPUHPPAState, cr[27])); 2042 tcg_gen_ori_reg(cpu_iaoq_f, cpu_gr[31], 3); 2043 tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4); 2044 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED; 2045 break; 2046 2047 case 0x100: /* SYSCALL */ 2048 gen_excp_1(EXCP_SYSCALL); 2049 ctx->base.is_jmp = DISAS_NORETURN; 2050 break; 2051 2052 default: 2053 do_sigill: 2054 gen_excp_1(EXCP_ILL); 2055 ctx->base.is_jmp = DISAS_NORETURN; 2056 break; 2057 } 2058 } 2059 #endif 2060 2061 static bool trans_nop(DisasContext *ctx, arg_nop *a) 2062 { 2063 cond_free(&ctx->null_cond); 2064 return true; 2065 } 2066 2067 static bool trans_break(DisasContext *ctx, arg_break *a) 2068 { 2069 return gen_excp_iir(ctx, EXCP_BREAK); 2070 } 2071 2072 static bool trans_sync(DisasContext *ctx, arg_sync *a) 2073 { 2074 /* No point in nullifying the memory barrier. */ 2075 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL); 2076 2077 cond_free(&ctx->null_cond); 2078 return true; 2079 } 2080 2081 static bool trans_mfia(DisasContext *ctx, arg_mfia *a) 2082 { 2083 unsigned rt = a->t; 2084 TCGv_reg tmp = dest_gpr(ctx, rt); 2085 tcg_gen_movi_reg(tmp, ctx->iaoq_f); 2086 save_gpr(ctx, rt, tmp); 2087 2088 cond_free(&ctx->null_cond); 2089 return true; 2090 } 2091 2092 static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a) 2093 { 2094 unsigned rt = a->t; 2095 unsigned rs = a->sp; 2096 TCGv_i64 t0 = tcg_temp_new_i64(); 2097 TCGv_reg t1 = tcg_temp_new(); 2098 2099 load_spr(ctx, t0, rs); 2100 tcg_gen_shri_i64(t0, t0, 32); 2101 tcg_gen_trunc_i64_reg(t1, t0); 2102 2103 save_gpr(ctx, rt, t1); 2104 tcg_temp_free(t1); 2105 tcg_temp_free_i64(t0); 2106 2107 cond_free(&ctx->null_cond); 2108 return true; 2109 } 2110 2111 static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a) 2112 { 2113 unsigned rt = a->t; 2114 unsigned ctl = a->r; 2115 TCGv_reg tmp; 2116 2117 switch (ctl) { 2118 case CR_SAR: 2119 #ifdef TARGET_HPPA64 2120 if (a->e == 0) { 2121 /* MFSAR without ,W masks low 5 bits. */ 2122 tmp = dest_gpr(ctx, rt); 2123 tcg_gen_andi_reg(tmp, cpu_sar, 31); 2124 save_gpr(ctx, rt, tmp); 2125 goto done; 2126 } 2127 #endif 2128 save_gpr(ctx, rt, cpu_sar); 2129 goto done; 2130 case CR_IT: /* Interval Timer */ 2131 /* FIXME: Respect PSW_S bit. */ 2132 nullify_over(ctx); 2133 tmp = dest_gpr(ctx, rt); 2134 if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { 2135 gen_io_start(); 2136 gen_helper_read_interval_timer(tmp); 2137 ctx->base.is_jmp = DISAS_IAQ_N_STALE; 2138 } else { 2139 gen_helper_read_interval_timer(tmp); 2140 } 2141 save_gpr(ctx, rt, tmp); 2142 return nullify_end(ctx); 2143 case 26: 2144 case 27: 2145 break; 2146 default: 2147 /* All other control registers are privileged. */ 2148 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG); 2149 break; 2150 } 2151 2152 tmp = get_temp(ctx); 2153 tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl])); 2154 save_gpr(ctx, rt, tmp); 2155 2156 done: 2157 cond_free(&ctx->null_cond); 2158 return true; 2159 } 2160 2161 static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a) 2162 { 2163 unsigned rr = a->r; 2164 unsigned rs = a->sp; 2165 TCGv_i64 t64; 2166 2167 if (rs >= 5) { 2168 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG); 2169 } 2170 nullify_over(ctx); 2171 2172 t64 = tcg_temp_new_i64(); 2173 tcg_gen_extu_reg_i64(t64, load_gpr(ctx, rr)); 2174 tcg_gen_shli_i64(t64, t64, 32); 2175 2176 if (rs >= 4) { 2177 tcg_gen_st_i64(t64, cpu_env, offsetof(CPUHPPAState, sr[rs])); 2178 ctx->tb_flags &= ~TB_FLAG_SR_SAME; 2179 } else { 2180 tcg_gen_mov_i64(cpu_sr[rs], t64); 2181 } 2182 tcg_temp_free_i64(t64); 2183 2184 return nullify_end(ctx); 2185 } 2186 2187 static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a) 2188 { 2189 unsigned ctl = a->t; 2190 TCGv_reg reg; 2191 TCGv_reg tmp; 2192 2193 if (ctl == CR_SAR) { 2194 reg = load_gpr(ctx, a->r); 2195 tmp = tcg_temp_new(); 2196 tcg_gen_andi_reg(tmp, reg, TARGET_REGISTER_BITS - 1); 2197 save_or_nullify(ctx, cpu_sar, tmp); 2198 tcg_temp_free(tmp); 2199 2200 cond_free(&ctx->null_cond); 2201 return true; 2202 } 2203 2204 /* All other control registers are privileged or read-only. */ 2205 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG); 2206 2207 #ifndef CONFIG_USER_ONLY 2208 nullify_over(ctx); 2209 reg = load_gpr(ctx, a->r); 2210 2211 switch (ctl) { 2212 case CR_IT: 2213 gen_helper_write_interval_timer(cpu_env, reg); 2214 break; 2215 case CR_EIRR: 2216 gen_helper_write_eirr(cpu_env, reg); 2217 break; 2218 case CR_EIEM: 2219 gen_helper_write_eiem(cpu_env, reg); 2220 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT; 2221 break; 2222 2223 case CR_IIASQ: 2224 case CR_IIAOQ: 2225 /* FIXME: Respect PSW_Q bit */ 2226 /* The write advances the queue and stores to the back element. */ 2227 tmp = get_temp(ctx); 2228 tcg_gen_ld_reg(tmp, cpu_env, 2229 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ])); 2230 tcg_gen_st_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl])); 2231 tcg_gen_st_reg(reg, cpu_env, 2232 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ])); 2233 break; 2234 2235 case CR_PID1: 2236 case CR_PID2: 2237 case CR_PID3: 2238 case CR_PID4: 2239 tcg_gen_st_reg(reg, cpu_env, offsetof(CPUHPPAState, cr[ctl])); 2240 #ifndef CONFIG_USER_ONLY 2241 gen_helper_change_prot_id(cpu_env); 2242 #endif 2243 break; 2244 2245 default: 2246 tcg_gen_st_reg(reg, cpu_env, offsetof(CPUHPPAState, cr[ctl])); 2247 break; 2248 } 2249 return nullify_end(ctx); 2250 #endif 2251 } 2252 2253 static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a) 2254 { 2255 TCGv_reg tmp = tcg_temp_new(); 2256 2257 tcg_gen_not_reg(tmp, load_gpr(ctx, a->r)); 2258 tcg_gen_andi_reg(tmp, tmp, TARGET_REGISTER_BITS - 1); 2259 save_or_nullify(ctx, cpu_sar, tmp); 2260 tcg_temp_free(tmp); 2261 2262 cond_free(&ctx->null_cond); 2263 return true; 2264 } 2265 2266 static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a) 2267 { 2268 TCGv_reg dest = dest_gpr(ctx, a->t); 2269 2270 #ifdef CONFIG_USER_ONLY 2271 /* We don't implement space registers in user mode. */ 2272 tcg_gen_movi_reg(dest, 0); 2273 #else 2274 TCGv_i64 t0 = tcg_temp_new_i64(); 2275 2276 tcg_gen_mov_i64(t0, space_select(ctx, a->sp, load_gpr(ctx, a->b))); 2277 tcg_gen_shri_i64(t0, t0, 32); 2278 tcg_gen_trunc_i64_reg(dest, t0); 2279 2280 tcg_temp_free_i64(t0); 2281 #endif 2282 save_gpr(ctx, a->t, dest); 2283 2284 cond_free(&ctx->null_cond); 2285 return true; 2286 } 2287 2288 static bool trans_rsm(DisasContext *ctx, arg_rsm *a) 2289 { 2290 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2291 #ifndef CONFIG_USER_ONLY 2292 TCGv_reg tmp; 2293 2294 nullify_over(ctx); 2295 2296 tmp = get_temp(ctx); 2297 tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw)); 2298 tcg_gen_andi_reg(tmp, tmp, ~a->i); 2299 gen_helper_swap_system_mask(tmp, cpu_env, tmp); 2300 save_gpr(ctx, a->t, tmp); 2301 2302 /* Exit the TB to recognize new interrupts, e.g. PSW_M. */ 2303 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT; 2304 return nullify_end(ctx); 2305 #endif 2306 } 2307 2308 static bool trans_ssm(DisasContext *ctx, arg_ssm *a) 2309 { 2310 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2311 #ifndef CONFIG_USER_ONLY 2312 TCGv_reg tmp; 2313 2314 nullify_over(ctx); 2315 2316 tmp = get_temp(ctx); 2317 tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw)); 2318 tcg_gen_ori_reg(tmp, tmp, a->i); 2319 gen_helper_swap_system_mask(tmp, cpu_env, tmp); 2320 save_gpr(ctx, a->t, tmp); 2321 2322 /* Exit the TB to recognize new interrupts, e.g. PSW_I. */ 2323 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT; 2324 return nullify_end(ctx); 2325 #endif 2326 } 2327 2328 static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a) 2329 { 2330 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2331 #ifndef CONFIG_USER_ONLY 2332 TCGv_reg tmp, reg; 2333 nullify_over(ctx); 2334 2335 reg = load_gpr(ctx, a->r); 2336 tmp = get_temp(ctx); 2337 gen_helper_swap_system_mask(tmp, cpu_env, reg); 2338 2339 /* Exit the TB to recognize new interrupts. */ 2340 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT; 2341 return nullify_end(ctx); 2342 #endif 2343 } 2344 2345 static bool do_rfi(DisasContext *ctx, bool rfi_r) 2346 { 2347 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2348 #ifndef CONFIG_USER_ONLY 2349 nullify_over(ctx); 2350 2351 if (rfi_r) { 2352 gen_helper_rfi_r(cpu_env); 2353 } else { 2354 gen_helper_rfi(cpu_env); 2355 } 2356 /* Exit the TB to recognize new interrupts. */ 2357 tcg_gen_exit_tb(NULL, 0); 2358 ctx->base.is_jmp = DISAS_NORETURN; 2359 2360 return nullify_end(ctx); 2361 #endif 2362 } 2363 2364 static bool trans_rfi(DisasContext *ctx, arg_rfi *a) 2365 { 2366 return do_rfi(ctx, false); 2367 } 2368 2369 static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a) 2370 { 2371 return do_rfi(ctx, true); 2372 } 2373 2374 static bool trans_halt(DisasContext *ctx, arg_halt *a) 2375 { 2376 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2377 #ifndef CONFIG_USER_ONLY 2378 nullify_over(ctx); 2379 gen_helper_halt(cpu_env); 2380 ctx->base.is_jmp = DISAS_NORETURN; 2381 return nullify_end(ctx); 2382 #endif 2383 } 2384 2385 static bool trans_reset(DisasContext *ctx, arg_reset *a) 2386 { 2387 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2388 #ifndef CONFIG_USER_ONLY 2389 nullify_over(ctx); 2390 gen_helper_reset(cpu_env); 2391 ctx->base.is_jmp = DISAS_NORETURN; 2392 return nullify_end(ctx); 2393 #endif 2394 } 2395 2396 static bool trans_getshadowregs(DisasContext *ctx, arg_getshadowregs *a) 2397 { 2398 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2399 #ifndef CONFIG_USER_ONLY 2400 nullify_over(ctx); 2401 gen_helper_getshadowregs(cpu_env); 2402 return nullify_end(ctx); 2403 #endif 2404 } 2405 2406 static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a) 2407 { 2408 if (a->m) { 2409 TCGv_reg dest = dest_gpr(ctx, a->b); 2410 TCGv_reg src1 = load_gpr(ctx, a->b); 2411 TCGv_reg src2 = load_gpr(ctx, a->x); 2412 2413 /* The only thing we need to do is the base register modification. */ 2414 tcg_gen_add_reg(dest, src1, src2); 2415 save_gpr(ctx, a->b, dest); 2416 } 2417 cond_free(&ctx->null_cond); 2418 return true; 2419 } 2420 2421 static bool trans_probe(DisasContext *ctx, arg_probe *a) 2422 { 2423 TCGv_reg dest, ofs; 2424 TCGv_i32 level, want; 2425 TCGv_tl addr; 2426 2427 nullify_over(ctx); 2428 2429 dest = dest_gpr(ctx, a->t); 2430 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false); 2431 2432 if (a->imm) { 2433 level = tcg_constant_i32(a->ri); 2434 } else { 2435 level = tcg_temp_new_i32(); 2436 tcg_gen_trunc_reg_i32(level, load_gpr(ctx, a->ri)); 2437 tcg_gen_andi_i32(level, level, 3); 2438 } 2439 want = tcg_constant_i32(a->write ? PAGE_WRITE : PAGE_READ); 2440 2441 gen_helper_probe(dest, cpu_env, addr, level, want); 2442 2443 tcg_temp_free_i32(level); 2444 2445 save_gpr(ctx, a->t, dest); 2446 return nullify_end(ctx); 2447 } 2448 2449 static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a) 2450 { 2451 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2452 #ifndef CONFIG_USER_ONLY 2453 TCGv_tl addr; 2454 TCGv_reg ofs, reg; 2455 2456 nullify_over(ctx); 2457 2458 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false); 2459 reg = load_gpr(ctx, a->r); 2460 if (a->addr) { 2461 gen_helper_itlba(cpu_env, addr, reg); 2462 } else { 2463 gen_helper_itlbp(cpu_env, addr, reg); 2464 } 2465 2466 /* Exit TB for TLB change if mmu is enabled. */ 2467 if (ctx->tb_flags & PSW_C) { 2468 ctx->base.is_jmp = DISAS_IAQ_N_STALE; 2469 } 2470 return nullify_end(ctx); 2471 #endif 2472 } 2473 2474 static bool trans_pxtlbx(DisasContext *ctx, arg_pxtlbx *a) 2475 { 2476 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2477 #ifndef CONFIG_USER_ONLY 2478 TCGv_tl addr; 2479 TCGv_reg ofs; 2480 2481 nullify_over(ctx); 2482 2483 form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false); 2484 if (a->m) { 2485 save_gpr(ctx, a->b, ofs); 2486 } 2487 if (a->local) { 2488 gen_helper_ptlbe(cpu_env); 2489 } else { 2490 gen_helper_ptlb(cpu_env, addr); 2491 } 2492 2493 /* Exit TB for TLB change if mmu is enabled. */ 2494 if (ctx->tb_flags & PSW_C) { 2495 ctx->base.is_jmp = DISAS_IAQ_N_STALE; 2496 } 2497 return nullify_end(ctx); 2498 #endif 2499 } 2500 2501 /* 2502 * Implement the pcxl and pcxl2 Fast TLB Insert instructions. 2503 * See 2504 * https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf 2505 * page 13-9 (195/206) 2506 */ 2507 static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a) 2508 { 2509 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2510 #ifndef CONFIG_USER_ONLY 2511 TCGv_tl addr, atl, stl; 2512 TCGv_reg reg; 2513 2514 nullify_over(ctx); 2515 2516 /* 2517 * FIXME: 2518 * if (not (pcxl or pcxl2)) 2519 * return gen_illegal(ctx); 2520 * 2521 * Note for future: these are 32-bit systems; no hppa64. 2522 */ 2523 2524 atl = tcg_temp_new_tl(); 2525 stl = tcg_temp_new_tl(); 2526 addr = tcg_temp_new_tl(); 2527 2528 tcg_gen_ld32u_i64(stl, cpu_env, 2529 a->data ? offsetof(CPUHPPAState, cr[CR_ISR]) 2530 : offsetof(CPUHPPAState, cr[CR_IIASQ])); 2531 tcg_gen_ld32u_i64(atl, cpu_env, 2532 a->data ? offsetof(CPUHPPAState, cr[CR_IOR]) 2533 : offsetof(CPUHPPAState, cr[CR_IIAOQ])); 2534 tcg_gen_shli_i64(stl, stl, 32); 2535 tcg_gen_or_tl(addr, atl, stl); 2536 tcg_temp_free_tl(atl); 2537 tcg_temp_free_tl(stl); 2538 2539 reg = load_gpr(ctx, a->r); 2540 if (a->addr) { 2541 gen_helper_itlba(cpu_env, addr, reg); 2542 } else { 2543 gen_helper_itlbp(cpu_env, addr, reg); 2544 } 2545 tcg_temp_free_tl(addr); 2546 2547 /* Exit TB for TLB change if mmu is enabled. */ 2548 if (ctx->tb_flags & PSW_C) { 2549 ctx->base.is_jmp = DISAS_IAQ_N_STALE; 2550 } 2551 return nullify_end(ctx); 2552 #endif 2553 } 2554 2555 static bool trans_lpa(DisasContext *ctx, arg_ldst *a) 2556 { 2557 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2558 #ifndef CONFIG_USER_ONLY 2559 TCGv_tl vaddr; 2560 TCGv_reg ofs, paddr; 2561 2562 nullify_over(ctx); 2563 2564 form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false); 2565 2566 paddr = tcg_temp_new(); 2567 gen_helper_lpa(paddr, cpu_env, vaddr); 2568 2569 /* Note that physical address result overrides base modification. */ 2570 if (a->m) { 2571 save_gpr(ctx, a->b, ofs); 2572 } 2573 save_gpr(ctx, a->t, paddr); 2574 tcg_temp_free(paddr); 2575 2576 return nullify_end(ctx); 2577 #endif 2578 } 2579 2580 static bool trans_lci(DisasContext *ctx, arg_lci *a) 2581 { 2582 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2583 2584 /* The Coherence Index is an implementation-defined function of the 2585 physical address. Two addresses with the same CI have a coherent 2586 view of the cache. Our implementation is to return 0 for all, 2587 since the entire address space is coherent. */ 2588 save_gpr(ctx, a->t, tcg_constant_reg(0)); 2589 2590 cond_free(&ctx->null_cond); 2591 return true; 2592 } 2593 2594 static bool trans_add(DisasContext *ctx, arg_rrr_cf_sh *a) 2595 { 2596 return do_add_reg(ctx, a, false, false, false, false); 2597 } 2598 2599 static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_sh *a) 2600 { 2601 return do_add_reg(ctx, a, true, false, false, false); 2602 } 2603 2604 static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_sh *a) 2605 { 2606 return do_add_reg(ctx, a, false, true, false, false); 2607 } 2608 2609 static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_sh *a) 2610 { 2611 return do_add_reg(ctx, a, false, false, false, true); 2612 } 2613 2614 static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_sh *a) 2615 { 2616 return do_add_reg(ctx, a, false, true, false, true); 2617 } 2618 2619 static bool trans_sub(DisasContext *ctx, arg_rrr_cf *a) 2620 { 2621 return do_sub_reg(ctx, a, false, false, false); 2622 } 2623 2624 static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf *a) 2625 { 2626 return do_sub_reg(ctx, a, true, false, false); 2627 } 2628 2629 static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf *a) 2630 { 2631 return do_sub_reg(ctx, a, false, false, true); 2632 } 2633 2634 static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf *a) 2635 { 2636 return do_sub_reg(ctx, a, true, false, true); 2637 } 2638 2639 static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf *a) 2640 { 2641 return do_sub_reg(ctx, a, false, true, false); 2642 } 2643 2644 static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf *a) 2645 { 2646 return do_sub_reg(ctx, a, true, true, false); 2647 } 2648 2649 static bool trans_andcm(DisasContext *ctx, arg_rrr_cf *a) 2650 { 2651 return do_log_reg(ctx, a, tcg_gen_andc_reg); 2652 } 2653 2654 static bool trans_and(DisasContext *ctx, arg_rrr_cf *a) 2655 { 2656 return do_log_reg(ctx, a, tcg_gen_and_reg); 2657 } 2658 2659 static bool trans_or(DisasContext *ctx, arg_rrr_cf *a) 2660 { 2661 if (a->cf == 0) { 2662 unsigned r2 = a->r2; 2663 unsigned r1 = a->r1; 2664 unsigned rt = a->t; 2665 2666 if (rt == 0) { /* NOP */ 2667 cond_free(&ctx->null_cond); 2668 return true; 2669 } 2670 if (r2 == 0) { /* COPY */ 2671 if (r1 == 0) { 2672 TCGv_reg dest = dest_gpr(ctx, rt); 2673 tcg_gen_movi_reg(dest, 0); 2674 save_gpr(ctx, rt, dest); 2675 } else { 2676 save_gpr(ctx, rt, cpu_gr[r1]); 2677 } 2678 cond_free(&ctx->null_cond); 2679 return true; 2680 } 2681 #ifndef CONFIG_USER_ONLY 2682 /* These are QEMU extensions and are nops in the real architecture: 2683 * 2684 * or %r10,%r10,%r10 -- idle loop; wait for interrupt 2685 * or %r31,%r31,%r31 -- death loop; offline cpu 2686 * currently implemented as idle. 2687 */ 2688 if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */ 2689 /* No need to check for supervisor, as userland can only pause 2690 until the next timer interrupt. */ 2691 nullify_over(ctx); 2692 2693 /* Advance the instruction queue. */ 2694 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b); 2695 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var); 2696 nullify_set(ctx, 0); 2697 2698 /* Tell the qemu main loop to halt until this cpu has work. */ 2699 tcg_gen_st_i32(tcg_constant_i32(1), cpu_env, 2700 offsetof(CPUState, halted) - offsetof(HPPACPU, env)); 2701 gen_excp_1(EXCP_HALTED); 2702 ctx->base.is_jmp = DISAS_NORETURN; 2703 2704 return nullify_end(ctx); 2705 } 2706 #endif 2707 } 2708 return do_log_reg(ctx, a, tcg_gen_or_reg); 2709 } 2710 2711 static bool trans_xor(DisasContext *ctx, arg_rrr_cf *a) 2712 { 2713 return do_log_reg(ctx, a, tcg_gen_xor_reg); 2714 } 2715 2716 static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf *a) 2717 { 2718 TCGv_reg tcg_r1, tcg_r2; 2719 2720 if (a->cf) { 2721 nullify_over(ctx); 2722 } 2723 tcg_r1 = load_gpr(ctx, a->r1); 2724 tcg_r2 = load_gpr(ctx, a->r2); 2725 do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf); 2726 return nullify_end(ctx); 2727 } 2728 2729 static bool trans_uxor(DisasContext *ctx, arg_rrr_cf *a) 2730 { 2731 TCGv_reg tcg_r1, tcg_r2; 2732 2733 if (a->cf) { 2734 nullify_over(ctx); 2735 } 2736 tcg_r1 = load_gpr(ctx, a->r1); 2737 tcg_r2 = load_gpr(ctx, a->r2); 2738 do_unit(ctx, a->t, tcg_r1, tcg_r2, a->cf, false, tcg_gen_xor_reg); 2739 return nullify_end(ctx); 2740 } 2741 2742 static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf *a, bool is_tc) 2743 { 2744 TCGv_reg tcg_r1, tcg_r2, tmp; 2745 2746 if (a->cf) { 2747 nullify_over(ctx); 2748 } 2749 tcg_r1 = load_gpr(ctx, a->r1); 2750 tcg_r2 = load_gpr(ctx, a->r2); 2751 tmp = get_temp(ctx); 2752 tcg_gen_not_reg(tmp, tcg_r2); 2753 do_unit(ctx, a->t, tcg_r1, tmp, a->cf, is_tc, tcg_gen_add_reg); 2754 return nullify_end(ctx); 2755 } 2756 2757 static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf *a) 2758 { 2759 return do_uaddcm(ctx, a, false); 2760 } 2761 2762 static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf *a) 2763 { 2764 return do_uaddcm(ctx, a, true); 2765 } 2766 2767 static bool do_dcor(DisasContext *ctx, arg_rr_cf *a, bool is_i) 2768 { 2769 TCGv_reg tmp; 2770 2771 nullify_over(ctx); 2772 2773 tmp = get_temp(ctx); 2774 tcg_gen_shri_reg(tmp, cpu_psw_cb, 3); 2775 if (!is_i) { 2776 tcg_gen_not_reg(tmp, tmp); 2777 } 2778 tcg_gen_andi_reg(tmp, tmp, 0x11111111); 2779 tcg_gen_muli_reg(tmp, tmp, 6); 2780 do_unit(ctx, a->t, load_gpr(ctx, a->r), tmp, a->cf, false, 2781 is_i ? tcg_gen_add_reg : tcg_gen_sub_reg); 2782 return nullify_end(ctx); 2783 } 2784 2785 static bool trans_dcor(DisasContext *ctx, arg_rr_cf *a) 2786 { 2787 return do_dcor(ctx, a, false); 2788 } 2789 2790 static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf *a) 2791 { 2792 return do_dcor(ctx, a, true); 2793 } 2794 2795 static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a) 2796 { 2797 TCGv_reg dest, add1, add2, addc, zero, in1, in2; 2798 2799 nullify_over(ctx); 2800 2801 in1 = load_gpr(ctx, a->r1); 2802 in2 = load_gpr(ctx, a->r2); 2803 2804 add1 = tcg_temp_new(); 2805 add2 = tcg_temp_new(); 2806 addc = tcg_temp_new(); 2807 dest = tcg_temp_new(); 2808 zero = tcg_constant_reg(0); 2809 2810 /* Form R1 << 1 | PSW[CB]{8}. */ 2811 tcg_gen_add_reg(add1, in1, in1); 2812 tcg_gen_add_reg(add1, add1, cpu_psw_cb_msb); 2813 2814 /* Add or subtract R2, depending on PSW[V]. Proper computation of 2815 carry{8} requires that we subtract via + ~R2 + 1, as described in 2816 the manual. By extracting and masking V, we can produce the 2817 proper inputs to the addition without movcond. */ 2818 tcg_gen_sari_reg(addc, cpu_psw_v, TARGET_REGISTER_BITS - 1); 2819 tcg_gen_xor_reg(add2, in2, addc); 2820 tcg_gen_andi_reg(addc, addc, 1); 2821 /* ??? This is only correct for 32-bit. */ 2822 tcg_gen_add2_i32(dest, cpu_psw_cb_msb, add1, zero, add2, zero); 2823 tcg_gen_add2_i32(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero); 2824 2825 tcg_temp_free(addc); 2826 2827 /* Write back the result register. */ 2828 save_gpr(ctx, a->t, dest); 2829 2830 /* Write back PSW[CB]. */ 2831 tcg_gen_xor_reg(cpu_psw_cb, add1, add2); 2832 tcg_gen_xor_reg(cpu_psw_cb, cpu_psw_cb, dest); 2833 2834 /* Write back PSW[V] for the division step. */ 2835 tcg_gen_neg_reg(cpu_psw_v, cpu_psw_cb_msb); 2836 tcg_gen_xor_reg(cpu_psw_v, cpu_psw_v, in2); 2837 2838 /* Install the new nullification. */ 2839 if (a->cf) { 2840 TCGv_reg sv = NULL; 2841 if (cond_need_sv(a->cf >> 1)) { 2842 /* ??? The lshift is supposed to contribute to overflow. */ 2843 sv = do_add_sv(ctx, dest, add1, add2); 2844 } 2845 ctx->null_cond = do_cond(a->cf, dest, cpu_psw_cb_msb, sv); 2846 } 2847 2848 tcg_temp_free(add1); 2849 tcg_temp_free(add2); 2850 tcg_temp_free(dest); 2851 2852 return nullify_end(ctx); 2853 } 2854 2855 static bool trans_addi(DisasContext *ctx, arg_rri_cf *a) 2856 { 2857 return do_add_imm(ctx, a, false, false); 2858 } 2859 2860 static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a) 2861 { 2862 return do_add_imm(ctx, a, true, false); 2863 } 2864 2865 static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a) 2866 { 2867 return do_add_imm(ctx, a, false, true); 2868 } 2869 2870 static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a) 2871 { 2872 return do_add_imm(ctx, a, true, true); 2873 } 2874 2875 static bool trans_subi(DisasContext *ctx, arg_rri_cf *a) 2876 { 2877 return do_sub_imm(ctx, a, false); 2878 } 2879 2880 static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a) 2881 { 2882 return do_sub_imm(ctx, a, true); 2883 } 2884 2885 static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf *a) 2886 { 2887 TCGv_reg tcg_im, tcg_r2; 2888 2889 if (a->cf) { 2890 nullify_over(ctx); 2891 } 2892 2893 tcg_im = load_const(ctx, a->i); 2894 tcg_r2 = load_gpr(ctx, a->r); 2895 do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf); 2896 2897 return nullify_end(ctx); 2898 } 2899 2900 static bool trans_ld(DisasContext *ctx, arg_ldst *a) 2901 { 2902 if (unlikely(TARGET_REGISTER_BITS == 32 && a->size > MO_32)) { 2903 return gen_illegal(ctx); 2904 } else { 2905 return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0, 2906 a->disp, a->sp, a->m, a->size | MO_TE); 2907 } 2908 } 2909 2910 static bool trans_st(DisasContext *ctx, arg_ldst *a) 2911 { 2912 assert(a->x == 0 && a->scale == 0); 2913 if (unlikely(TARGET_REGISTER_BITS == 32 && a->size > MO_32)) { 2914 return gen_illegal(ctx); 2915 } else { 2916 return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE); 2917 } 2918 } 2919 2920 static bool trans_ldc(DisasContext *ctx, arg_ldst *a) 2921 { 2922 MemOp mop = MO_TE | MO_ALIGN | a->size; 2923 TCGv_reg zero, dest, ofs; 2924 TCGv_tl addr; 2925 2926 nullify_over(ctx); 2927 2928 if (a->m) { 2929 /* Base register modification. Make sure if RT == RB, 2930 we see the result of the load. */ 2931 dest = get_temp(ctx); 2932 } else { 2933 dest = dest_gpr(ctx, a->t); 2934 } 2935 2936 form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? a->size : 0, 2937 a->disp, a->sp, a->m, ctx->mmu_idx == MMU_PHYS_IDX); 2938 2939 /* 2940 * For hppa1.1, LDCW is undefined unless aligned mod 16. 2941 * However actual hardware succeeds with aligned mod 4. 2942 * Detect this case and log a GUEST_ERROR. 2943 * 2944 * TODO: HPPA64 relaxes the over-alignment requirement 2945 * with the ,co completer. 2946 */ 2947 gen_helper_ldc_check(addr); 2948 2949 zero = tcg_constant_reg(0); 2950 tcg_gen_atomic_xchg_reg(dest, addr, zero, ctx->mmu_idx, mop); 2951 2952 if (a->m) { 2953 save_gpr(ctx, a->b, ofs); 2954 } 2955 save_gpr(ctx, a->t, dest); 2956 2957 return nullify_end(ctx); 2958 } 2959 2960 static bool trans_stby(DisasContext *ctx, arg_stby *a) 2961 { 2962 TCGv_reg ofs, val; 2963 TCGv_tl addr; 2964 2965 nullify_over(ctx); 2966 2967 form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m, 2968 ctx->mmu_idx == MMU_PHYS_IDX); 2969 val = load_gpr(ctx, a->r); 2970 if (a->a) { 2971 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { 2972 gen_helper_stby_e_parallel(cpu_env, addr, val); 2973 } else { 2974 gen_helper_stby_e(cpu_env, addr, val); 2975 } 2976 } else { 2977 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { 2978 gen_helper_stby_b_parallel(cpu_env, addr, val); 2979 } else { 2980 gen_helper_stby_b(cpu_env, addr, val); 2981 } 2982 } 2983 if (a->m) { 2984 tcg_gen_andi_reg(ofs, ofs, ~3); 2985 save_gpr(ctx, a->b, ofs); 2986 } 2987 2988 return nullify_end(ctx); 2989 } 2990 2991 static bool trans_lda(DisasContext *ctx, arg_ldst *a) 2992 { 2993 int hold_mmu_idx = ctx->mmu_idx; 2994 2995 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2996 ctx->mmu_idx = MMU_PHYS_IDX; 2997 trans_ld(ctx, a); 2998 ctx->mmu_idx = hold_mmu_idx; 2999 return true; 3000 } 3001 3002 static bool trans_sta(DisasContext *ctx, arg_ldst *a) 3003 { 3004 int hold_mmu_idx = ctx->mmu_idx; 3005 3006 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 3007 ctx->mmu_idx = MMU_PHYS_IDX; 3008 trans_st(ctx, a); 3009 ctx->mmu_idx = hold_mmu_idx; 3010 return true; 3011 } 3012 3013 static bool trans_ldil(DisasContext *ctx, arg_ldil *a) 3014 { 3015 TCGv_reg tcg_rt = dest_gpr(ctx, a->t); 3016 3017 tcg_gen_movi_reg(tcg_rt, a->i); 3018 save_gpr(ctx, a->t, tcg_rt); 3019 cond_free(&ctx->null_cond); 3020 return true; 3021 } 3022 3023 static bool trans_addil(DisasContext *ctx, arg_addil *a) 3024 { 3025 TCGv_reg tcg_rt = load_gpr(ctx, a->r); 3026 TCGv_reg tcg_r1 = dest_gpr(ctx, 1); 3027 3028 tcg_gen_addi_reg(tcg_r1, tcg_rt, a->i); 3029 save_gpr(ctx, 1, tcg_r1); 3030 cond_free(&ctx->null_cond); 3031 return true; 3032 } 3033 3034 static bool trans_ldo(DisasContext *ctx, arg_ldo *a) 3035 { 3036 TCGv_reg tcg_rt = dest_gpr(ctx, a->t); 3037 3038 /* Special case rb == 0, for the LDI pseudo-op. 3039 The COPY pseudo-op is handled for free within tcg_gen_addi_tl. */ 3040 if (a->b == 0) { 3041 tcg_gen_movi_reg(tcg_rt, a->i); 3042 } else { 3043 tcg_gen_addi_reg(tcg_rt, cpu_gr[a->b], a->i); 3044 } 3045 save_gpr(ctx, a->t, tcg_rt); 3046 cond_free(&ctx->null_cond); 3047 return true; 3048 } 3049 3050 static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_reg in1, 3051 unsigned c, unsigned f, unsigned n, int disp) 3052 { 3053 TCGv_reg dest, in2, sv; 3054 DisasCond cond; 3055 3056 in2 = load_gpr(ctx, r); 3057 dest = get_temp(ctx); 3058 3059 tcg_gen_sub_reg(dest, in1, in2); 3060 3061 sv = NULL; 3062 if (cond_need_sv(c)) { 3063 sv = do_sub_sv(ctx, dest, in1, in2); 3064 } 3065 3066 cond = do_sub_cond(c * 2 + f, dest, in1, in2, sv); 3067 return do_cbranch(ctx, disp, n, &cond); 3068 } 3069 3070 static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a) 3071 { 3072 nullify_over(ctx); 3073 return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp); 3074 } 3075 3076 static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a) 3077 { 3078 nullify_over(ctx); 3079 return do_cmpb(ctx, a->r, load_const(ctx, a->i), a->c, a->f, a->n, a->disp); 3080 } 3081 3082 static bool do_addb(DisasContext *ctx, unsigned r, TCGv_reg in1, 3083 unsigned c, unsigned f, unsigned n, int disp) 3084 { 3085 TCGv_reg dest, in2, sv, cb_msb; 3086 DisasCond cond; 3087 3088 in2 = load_gpr(ctx, r); 3089 dest = tcg_temp_new(); 3090 sv = NULL; 3091 cb_msb = NULL; 3092 3093 if (cond_need_cb(c)) { 3094 cb_msb = get_temp(ctx); 3095 tcg_gen_movi_reg(cb_msb, 0); 3096 tcg_gen_add2_reg(dest, cb_msb, in1, cb_msb, in2, cb_msb); 3097 } else { 3098 tcg_gen_add_reg(dest, in1, in2); 3099 } 3100 if (cond_need_sv(c)) { 3101 sv = do_add_sv(ctx, dest, in1, in2); 3102 } 3103 3104 cond = do_cond(c * 2 + f, dest, cb_msb, sv); 3105 save_gpr(ctx, r, dest); 3106 tcg_temp_free(dest); 3107 return do_cbranch(ctx, disp, n, &cond); 3108 } 3109 3110 static bool trans_addb(DisasContext *ctx, arg_addb *a) 3111 { 3112 nullify_over(ctx); 3113 return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp); 3114 } 3115 3116 static bool trans_addbi(DisasContext *ctx, arg_addbi *a) 3117 { 3118 nullify_over(ctx); 3119 return do_addb(ctx, a->r, load_const(ctx, a->i), a->c, a->f, a->n, a->disp); 3120 } 3121 3122 static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a) 3123 { 3124 TCGv_reg tmp, tcg_r; 3125 DisasCond cond; 3126 3127 nullify_over(ctx); 3128 3129 tmp = tcg_temp_new(); 3130 tcg_r = load_gpr(ctx, a->r); 3131 tcg_gen_shl_reg(tmp, tcg_r, cpu_sar); 3132 3133 cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp); 3134 tcg_temp_free(tmp); 3135 return do_cbranch(ctx, a->disp, a->n, &cond); 3136 } 3137 3138 static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a) 3139 { 3140 TCGv_reg tmp, tcg_r; 3141 DisasCond cond; 3142 3143 nullify_over(ctx); 3144 3145 tmp = tcg_temp_new(); 3146 tcg_r = load_gpr(ctx, a->r); 3147 tcg_gen_shli_reg(tmp, tcg_r, a->p); 3148 3149 cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp); 3150 tcg_temp_free(tmp); 3151 return do_cbranch(ctx, a->disp, a->n, &cond); 3152 } 3153 3154 static bool trans_movb(DisasContext *ctx, arg_movb *a) 3155 { 3156 TCGv_reg dest; 3157 DisasCond cond; 3158 3159 nullify_over(ctx); 3160 3161 dest = dest_gpr(ctx, a->r2); 3162 if (a->r1 == 0) { 3163 tcg_gen_movi_reg(dest, 0); 3164 } else { 3165 tcg_gen_mov_reg(dest, cpu_gr[a->r1]); 3166 } 3167 3168 cond = do_sed_cond(a->c, dest); 3169 return do_cbranch(ctx, a->disp, a->n, &cond); 3170 } 3171 3172 static bool trans_movbi(DisasContext *ctx, arg_movbi *a) 3173 { 3174 TCGv_reg dest; 3175 DisasCond cond; 3176 3177 nullify_over(ctx); 3178 3179 dest = dest_gpr(ctx, a->r); 3180 tcg_gen_movi_reg(dest, a->i); 3181 3182 cond = do_sed_cond(a->c, dest); 3183 return do_cbranch(ctx, a->disp, a->n, &cond); 3184 } 3185 3186 static bool trans_shrpw_sar(DisasContext *ctx, arg_shrpw_sar *a) 3187 { 3188 TCGv_reg dest; 3189 3190 if (a->c) { 3191 nullify_over(ctx); 3192 } 3193 3194 dest = dest_gpr(ctx, a->t); 3195 if (a->r1 == 0) { 3196 tcg_gen_ext32u_reg(dest, load_gpr(ctx, a->r2)); 3197 tcg_gen_shr_reg(dest, dest, cpu_sar); 3198 } else if (a->r1 == a->r2) { 3199 TCGv_i32 t32 = tcg_temp_new_i32(); 3200 tcg_gen_trunc_reg_i32(t32, load_gpr(ctx, a->r2)); 3201 tcg_gen_rotr_i32(t32, t32, cpu_sar); 3202 tcg_gen_extu_i32_reg(dest, t32); 3203 tcg_temp_free_i32(t32); 3204 } else { 3205 TCGv_i64 t = tcg_temp_new_i64(); 3206 TCGv_i64 s = tcg_temp_new_i64(); 3207 3208 tcg_gen_concat_reg_i64(t, load_gpr(ctx, a->r2), load_gpr(ctx, a->r1)); 3209 tcg_gen_extu_reg_i64(s, cpu_sar); 3210 tcg_gen_shr_i64(t, t, s); 3211 tcg_gen_trunc_i64_reg(dest, t); 3212 3213 tcg_temp_free_i64(t); 3214 tcg_temp_free_i64(s); 3215 } 3216 save_gpr(ctx, a->t, dest); 3217 3218 /* Install the new nullification. */ 3219 cond_free(&ctx->null_cond); 3220 if (a->c) { 3221 ctx->null_cond = do_sed_cond(a->c, dest); 3222 } 3223 return nullify_end(ctx); 3224 } 3225 3226 static bool trans_shrpw_imm(DisasContext *ctx, arg_shrpw_imm *a) 3227 { 3228 unsigned sa = 31 - a->cpos; 3229 TCGv_reg dest, t2; 3230 3231 if (a->c) { 3232 nullify_over(ctx); 3233 } 3234 3235 dest = dest_gpr(ctx, a->t); 3236 t2 = load_gpr(ctx, a->r2); 3237 if (a->r1 == 0) { 3238 tcg_gen_extract_reg(dest, t2, sa, 32 - sa); 3239 } else if (TARGET_REGISTER_BITS == 32) { 3240 tcg_gen_extract2_reg(dest, t2, cpu_gr[a->r1], sa); 3241 } else if (a->r1 == a->r2) { 3242 TCGv_i32 t32 = tcg_temp_new_i32(); 3243 tcg_gen_trunc_reg_i32(t32, t2); 3244 tcg_gen_rotri_i32(t32, t32, sa); 3245 tcg_gen_extu_i32_reg(dest, t32); 3246 tcg_temp_free_i32(t32); 3247 } else { 3248 TCGv_i64 t64 = tcg_temp_new_i64(); 3249 tcg_gen_concat_reg_i64(t64, t2, cpu_gr[a->r1]); 3250 tcg_gen_shri_i64(t64, t64, sa); 3251 tcg_gen_trunc_i64_reg(dest, t64); 3252 tcg_temp_free_i64(t64); 3253 } 3254 save_gpr(ctx, a->t, dest); 3255 3256 /* Install the new nullification. */ 3257 cond_free(&ctx->null_cond); 3258 if (a->c) { 3259 ctx->null_cond = do_sed_cond(a->c, dest); 3260 } 3261 return nullify_end(ctx); 3262 } 3263 3264 static bool trans_extrw_sar(DisasContext *ctx, arg_extrw_sar *a) 3265 { 3266 unsigned len = 32 - a->clen; 3267 TCGv_reg dest, src, tmp; 3268 3269 if (a->c) { 3270 nullify_over(ctx); 3271 } 3272 3273 dest = dest_gpr(ctx, a->t); 3274 src = load_gpr(ctx, a->r); 3275 tmp = tcg_temp_new(); 3276 3277 /* Recall that SAR is using big-endian bit numbering. */ 3278 tcg_gen_xori_reg(tmp, cpu_sar, TARGET_REGISTER_BITS - 1); 3279 if (a->se) { 3280 tcg_gen_sar_reg(dest, src, tmp); 3281 tcg_gen_sextract_reg(dest, dest, 0, len); 3282 } else { 3283 tcg_gen_shr_reg(dest, src, tmp); 3284 tcg_gen_extract_reg(dest, dest, 0, len); 3285 } 3286 tcg_temp_free(tmp); 3287 save_gpr(ctx, a->t, dest); 3288 3289 /* Install the new nullification. */ 3290 cond_free(&ctx->null_cond); 3291 if (a->c) { 3292 ctx->null_cond = do_sed_cond(a->c, dest); 3293 } 3294 return nullify_end(ctx); 3295 } 3296 3297 static bool trans_extrw_imm(DisasContext *ctx, arg_extrw_imm *a) 3298 { 3299 unsigned len = 32 - a->clen; 3300 unsigned cpos = 31 - a->pos; 3301 TCGv_reg dest, src; 3302 3303 if (a->c) { 3304 nullify_over(ctx); 3305 } 3306 3307 dest = dest_gpr(ctx, a->t); 3308 src = load_gpr(ctx, a->r); 3309 if (a->se) { 3310 tcg_gen_sextract_reg(dest, src, cpos, len); 3311 } else { 3312 tcg_gen_extract_reg(dest, src, cpos, len); 3313 } 3314 save_gpr(ctx, a->t, dest); 3315 3316 /* Install the new nullification. */ 3317 cond_free(&ctx->null_cond); 3318 if (a->c) { 3319 ctx->null_cond = do_sed_cond(a->c, dest); 3320 } 3321 return nullify_end(ctx); 3322 } 3323 3324 static bool trans_depwi_imm(DisasContext *ctx, arg_depwi_imm *a) 3325 { 3326 unsigned len = 32 - a->clen; 3327 target_sreg mask0, mask1; 3328 TCGv_reg dest; 3329 3330 if (a->c) { 3331 nullify_over(ctx); 3332 } 3333 if (a->cpos + len > 32) { 3334 len = 32 - a->cpos; 3335 } 3336 3337 dest = dest_gpr(ctx, a->t); 3338 mask0 = deposit64(0, a->cpos, len, a->i); 3339 mask1 = deposit64(-1, a->cpos, len, a->i); 3340 3341 if (a->nz) { 3342 TCGv_reg src = load_gpr(ctx, a->t); 3343 if (mask1 != -1) { 3344 tcg_gen_andi_reg(dest, src, mask1); 3345 src = dest; 3346 } 3347 tcg_gen_ori_reg(dest, src, mask0); 3348 } else { 3349 tcg_gen_movi_reg(dest, mask0); 3350 } 3351 save_gpr(ctx, a->t, dest); 3352 3353 /* Install the new nullification. */ 3354 cond_free(&ctx->null_cond); 3355 if (a->c) { 3356 ctx->null_cond = do_sed_cond(a->c, dest); 3357 } 3358 return nullify_end(ctx); 3359 } 3360 3361 static bool trans_depw_imm(DisasContext *ctx, arg_depw_imm *a) 3362 { 3363 unsigned rs = a->nz ? a->t : 0; 3364 unsigned len = 32 - a->clen; 3365 TCGv_reg dest, val; 3366 3367 if (a->c) { 3368 nullify_over(ctx); 3369 } 3370 if (a->cpos + len > 32) { 3371 len = 32 - a->cpos; 3372 } 3373 3374 dest = dest_gpr(ctx, a->t); 3375 val = load_gpr(ctx, a->r); 3376 if (rs == 0) { 3377 tcg_gen_deposit_z_reg(dest, val, a->cpos, len); 3378 } else { 3379 tcg_gen_deposit_reg(dest, cpu_gr[rs], val, a->cpos, len); 3380 } 3381 save_gpr(ctx, a->t, dest); 3382 3383 /* Install the new nullification. */ 3384 cond_free(&ctx->null_cond); 3385 if (a->c) { 3386 ctx->null_cond = do_sed_cond(a->c, dest); 3387 } 3388 return nullify_end(ctx); 3389 } 3390 3391 static bool do_depw_sar(DisasContext *ctx, unsigned rt, unsigned c, 3392 unsigned nz, unsigned clen, TCGv_reg val) 3393 { 3394 unsigned rs = nz ? rt : 0; 3395 unsigned len = 32 - clen; 3396 TCGv_reg mask, tmp, shift, dest; 3397 unsigned msb = 1U << (len - 1); 3398 3399 dest = dest_gpr(ctx, rt); 3400 shift = tcg_temp_new(); 3401 tmp = tcg_temp_new(); 3402 3403 /* Convert big-endian bit numbering in SAR to left-shift. */ 3404 tcg_gen_xori_reg(shift, cpu_sar, TARGET_REGISTER_BITS - 1); 3405 3406 mask = tcg_const_reg(msb + (msb - 1)); 3407 tcg_gen_and_reg(tmp, val, mask); 3408 if (rs) { 3409 tcg_gen_shl_reg(mask, mask, shift); 3410 tcg_gen_shl_reg(tmp, tmp, shift); 3411 tcg_gen_andc_reg(dest, cpu_gr[rs], mask); 3412 tcg_gen_or_reg(dest, dest, tmp); 3413 } else { 3414 tcg_gen_shl_reg(dest, tmp, shift); 3415 } 3416 tcg_temp_free(shift); 3417 tcg_temp_free(mask); 3418 tcg_temp_free(tmp); 3419 save_gpr(ctx, rt, dest); 3420 3421 /* Install the new nullification. */ 3422 cond_free(&ctx->null_cond); 3423 if (c) { 3424 ctx->null_cond = do_sed_cond(c, dest); 3425 } 3426 return nullify_end(ctx); 3427 } 3428 3429 static bool trans_depw_sar(DisasContext *ctx, arg_depw_sar *a) 3430 { 3431 if (a->c) { 3432 nullify_over(ctx); 3433 } 3434 return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_gpr(ctx, a->r)); 3435 } 3436 3437 static bool trans_depwi_sar(DisasContext *ctx, arg_depwi_sar *a) 3438 { 3439 if (a->c) { 3440 nullify_over(ctx); 3441 } 3442 return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_const(ctx, a->i)); 3443 } 3444 3445 static bool trans_be(DisasContext *ctx, arg_be *a) 3446 { 3447 TCGv_reg tmp; 3448 3449 #ifdef CONFIG_USER_ONLY 3450 /* ??? It seems like there should be a good way of using 3451 "be disp(sr2, r0)", the canonical gateway entry mechanism 3452 to our advantage. But that appears to be inconvenient to 3453 manage along side branch delay slots. Therefore we handle 3454 entry into the gateway page via absolute address. */ 3455 /* Since we don't implement spaces, just branch. Do notice the special 3456 case of "be disp(*,r0)" using a direct branch to disp, so that we can 3457 goto_tb to the TB containing the syscall. */ 3458 if (a->b == 0) { 3459 return do_dbranch(ctx, a->disp, a->l, a->n); 3460 } 3461 #else 3462 nullify_over(ctx); 3463 #endif 3464 3465 tmp = get_temp(ctx); 3466 tcg_gen_addi_reg(tmp, load_gpr(ctx, a->b), a->disp); 3467 tmp = do_ibranch_priv(ctx, tmp); 3468 3469 #ifdef CONFIG_USER_ONLY 3470 return do_ibranch(ctx, tmp, a->l, a->n); 3471 #else 3472 TCGv_i64 new_spc = tcg_temp_new_i64(); 3473 3474 load_spr(ctx, new_spc, a->sp); 3475 if (a->l) { 3476 copy_iaoq_entry(cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var); 3477 tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f); 3478 } 3479 if (a->n && use_nullify_skip(ctx)) { 3480 tcg_gen_mov_reg(cpu_iaoq_f, tmp); 3481 tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4); 3482 tcg_gen_mov_i64(cpu_iasq_f, new_spc); 3483 tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f); 3484 } else { 3485 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b); 3486 if (ctx->iaoq_b == -1) { 3487 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b); 3488 } 3489 tcg_gen_mov_reg(cpu_iaoq_b, tmp); 3490 tcg_gen_mov_i64(cpu_iasq_b, new_spc); 3491 nullify_set(ctx, a->n); 3492 } 3493 tcg_temp_free_i64(new_spc); 3494 tcg_gen_lookup_and_goto_ptr(); 3495 ctx->base.is_jmp = DISAS_NORETURN; 3496 return nullify_end(ctx); 3497 #endif 3498 } 3499 3500 static bool trans_bl(DisasContext *ctx, arg_bl *a) 3501 { 3502 return do_dbranch(ctx, iaoq_dest(ctx, a->disp), a->l, a->n); 3503 } 3504 3505 static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a) 3506 { 3507 target_ureg dest = iaoq_dest(ctx, a->disp); 3508 3509 nullify_over(ctx); 3510 3511 /* Make sure the caller hasn't done something weird with the queue. 3512 * ??? This is not quite the same as the PSW[B] bit, which would be 3513 * expensive to track. Real hardware will trap for 3514 * b gateway 3515 * b gateway+4 (in delay slot of first branch) 3516 * However, checking for a non-sequential instruction queue *will* 3517 * diagnose the security hole 3518 * b gateway 3519 * b evil 3520 * in which instructions at evil would run with increased privs. 3521 */ 3522 if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) { 3523 return gen_illegal(ctx); 3524 } 3525 3526 #ifndef CONFIG_USER_ONLY 3527 if (ctx->tb_flags & PSW_C) { 3528 CPUHPPAState *env = ctx->cs->env_ptr; 3529 int type = hppa_artype_for_page(env, ctx->base.pc_next); 3530 /* If we could not find a TLB entry, then we need to generate an 3531 ITLB miss exception so the kernel will provide it. 3532 The resulting TLB fill operation will invalidate this TB and 3533 we will re-translate, at which point we *will* be able to find 3534 the TLB entry and determine if this is in fact a gateway page. */ 3535 if (type < 0) { 3536 gen_excp(ctx, EXCP_ITLB_MISS); 3537 return true; 3538 } 3539 /* No change for non-gateway pages or for priv decrease. */ 3540 if (type >= 4 && type - 4 < ctx->privilege) { 3541 dest = deposit32(dest, 0, 2, type - 4); 3542 } 3543 } else { 3544 dest &= -4; /* priv = 0 */ 3545 } 3546 #endif 3547 3548 if (a->l) { 3549 TCGv_reg tmp = dest_gpr(ctx, a->l); 3550 if (ctx->privilege < 3) { 3551 tcg_gen_andi_reg(tmp, tmp, -4); 3552 } 3553 tcg_gen_ori_reg(tmp, tmp, ctx->privilege); 3554 save_gpr(ctx, a->l, tmp); 3555 } 3556 3557 return do_dbranch(ctx, dest, 0, a->n); 3558 } 3559 3560 static bool trans_blr(DisasContext *ctx, arg_blr *a) 3561 { 3562 if (a->x) { 3563 TCGv_reg tmp = get_temp(ctx); 3564 tcg_gen_shli_reg(tmp, load_gpr(ctx, a->x), 3); 3565 tcg_gen_addi_reg(tmp, tmp, ctx->iaoq_f + 8); 3566 /* The computation here never changes privilege level. */ 3567 return do_ibranch(ctx, tmp, a->l, a->n); 3568 } else { 3569 /* BLR R0,RX is a good way to load PC+8 into RX. */ 3570 return do_dbranch(ctx, ctx->iaoq_f + 8, a->l, a->n); 3571 } 3572 } 3573 3574 static bool trans_bv(DisasContext *ctx, arg_bv *a) 3575 { 3576 TCGv_reg dest; 3577 3578 if (a->x == 0) { 3579 dest = load_gpr(ctx, a->b); 3580 } else { 3581 dest = get_temp(ctx); 3582 tcg_gen_shli_reg(dest, load_gpr(ctx, a->x), 3); 3583 tcg_gen_add_reg(dest, dest, load_gpr(ctx, a->b)); 3584 } 3585 dest = do_ibranch_priv(ctx, dest); 3586 return do_ibranch(ctx, dest, 0, a->n); 3587 } 3588 3589 static bool trans_bve(DisasContext *ctx, arg_bve *a) 3590 { 3591 TCGv_reg dest; 3592 3593 #ifdef CONFIG_USER_ONLY 3594 dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b)); 3595 return do_ibranch(ctx, dest, a->l, a->n); 3596 #else 3597 nullify_over(ctx); 3598 dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b)); 3599 3600 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b); 3601 if (ctx->iaoq_b == -1) { 3602 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b); 3603 } 3604 copy_iaoq_entry(cpu_iaoq_b, -1, dest); 3605 tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest)); 3606 if (a->l) { 3607 copy_iaoq_entry(cpu_gr[a->l], ctx->iaoq_n, ctx->iaoq_n_var); 3608 } 3609 nullify_set(ctx, a->n); 3610 tcg_gen_lookup_and_goto_ptr(); 3611 ctx->base.is_jmp = DISAS_NORETURN; 3612 return nullify_end(ctx); 3613 #endif 3614 } 3615 3616 /* 3617 * Float class 0 3618 */ 3619 3620 static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src) 3621 { 3622 tcg_gen_mov_i32(dst, src); 3623 } 3624 3625 static bool trans_fid_f(DisasContext *ctx, arg_fid_f *a) 3626 { 3627 nullify_over(ctx); 3628 #if TARGET_REGISTER_BITS == 64 3629 save_frd(0, tcg_const_i64(0x13080000000000ULL)); /* PA8700 (PCX-W2) */ 3630 #else 3631 save_frd(0, tcg_const_i64(0x0f080000000000ULL)); /* PA7300LC (PCX-L2) */ 3632 #endif 3633 return nullify_end(ctx); 3634 } 3635 3636 static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a) 3637 { 3638 return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f); 3639 } 3640 3641 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src) 3642 { 3643 tcg_gen_mov_i64(dst, src); 3644 } 3645 3646 static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a) 3647 { 3648 return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d); 3649 } 3650 3651 static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src) 3652 { 3653 tcg_gen_andi_i32(dst, src, INT32_MAX); 3654 } 3655 3656 static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a) 3657 { 3658 return do_fop_wew(ctx, a->t, a->r, gen_fabs_f); 3659 } 3660 3661 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src) 3662 { 3663 tcg_gen_andi_i64(dst, src, INT64_MAX); 3664 } 3665 3666 static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a) 3667 { 3668 return do_fop_ded(ctx, a->t, a->r, gen_fabs_d); 3669 } 3670 3671 static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a) 3672 { 3673 return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s); 3674 } 3675 3676 static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a) 3677 { 3678 return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d); 3679 } 3680 3681 static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a) 3682 { 3683 return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s); 3684 } 3685 3686 static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a) 3687 { 3688 return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d); 3689 } 3690 3691 static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src) 3692 { 3693 tcg_gen_xori_i32(dst, src, INT32_MIN); 3694 } 3695 3696 static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a) 3697 { 3698 return do_fop_wew(ctx, a->t, a->r, gen_fneg_f); 3699 } 3700 3701 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src) 3702 { 3703 tcg_gen_xori_i64(dst, src, INT64_MIN); 3704 } 3705 3706 static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a) 3707 { 3708 return do_fop_ded(ctx, a->t, a->r, gen_fneg_d); 3709 } 3710 3711 static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src) 3712 { 3713 tcg_gen_ori_i32(dst, src, INT32_MIN); 3714 } 3715 3716 static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a) 3717 { 3718 return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f); 3719 } 3720 3721 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src) 3722 { 3723 tcg_gen_ori_i64(dst, src, INT64_MIN); 3724 } 3725 3726 static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a) 3727 { 3728 return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d); 3729 } 3730 3731 /* 3732 * Float class 1 3733 */ 3734 3735 static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a) 3736 { 3737 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s); 3738 } 3739 3740 static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a) 3741 { 3742 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d); 3743 } 3744 3745 static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a) 3746 { 3747 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s); 3748 } 3749 3750 static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a) 3751 { 3752 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s); 3753 } 3754 3755 static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a) 3756 { 3757 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d); 3758 } 3759 3760 static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a) 3761 { 3762 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d); 3763 } 3764 3765 static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a) 3766 { 3767 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w); 3768 } 3769 3770 static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a) 3771 { 3772 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w); 3773 } 3774 3775 static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a) 3776 { 3777 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw); 3778 } 3779 3780 static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a) 3781 { 3782 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw); 3783 } 3784 3785 static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a) 3786 { 3787 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w); 3788 } 3789 3790 static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a) 3791 { 3792 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w); 3793 } 3794 3795 static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a) 3796 { 3797 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw); 3798 } 3799 3800 static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a) 3801 { 3802 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw); 3803 } 3804 3805 static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a) 3806 { 3807 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s); 3808 } 3809 3810 static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a) 3811 { 3812 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s); 3813 } 3814 3815 static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a) 3816 { 3817 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d); 3818 } 3819 3820 static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a) 3821 { 3822 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d); 3823 } 3824 3825 static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a) 3826 { 3827 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw); 3828 } 3829 3830 static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a) 3831 { 3832 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw); 3833 } 3834 3835 static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a) 3836 { 3837 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw); 3838 } 3839 3840 static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a) 3841 { 3842 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw); 3843 } 3844 3845 static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a) 3846 { 3847 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw); 3848 } 3849 3850 static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a) 3851 { 3852 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw); 3853 } 3854 3855 static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a) 3856 { 3857 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw); 3858 } 3859 3860 static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a) 3861 { 3862 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw); 3863 } 3864 3865 /* 3866 * Float class 2 3867 */ 3868 3869 static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a) 3870 { 3871 TCGv_i32 ta, tb, tc, ty; 3872 3873 nullify_over(ctx); 3874 3875 ta = load_frw0_i32(a->r1); 3876 tb = load_frw0_i32(a->r2); 3877 ty = tcg_constant_i32(a->y); 3878 tc = tcg_constant_i32(a->c); 3879 3880 gen_helper_fcmp_s(cpu_env, ta, tb, ty, tc); 3881 3882 tcg_temp_free_i32(ta); 3883 tcg_temp_free_i32(tb); 3884 3885 return nullify_end(ctx); 3886 } 3887 3888 static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a) 3889 { 3890 TCGv_i64 ta, tb; 3891 TCGv_i32 tc, ty; 3892 3893 nullify_over(ctx); 3894 3895 ta = load_frd0(a->r1); 3896 tb = load_frd0(a->r2); 3897 ty = tcg_constant_i32(a->y); 3898 tc = tcg_constant_i32(a->c); 3899 3900 gen_helper_fcmp_d(cpu_env, ta, tb, ty, tc); 3901 3902 tcg_temp_free_i64(ta); 3903 tcg_temp_free_i64(tb); 3904 3905 return nullify_end(ctx); 3906 } 3907 3908 static bool trans_ftest(DisasContext *ctx, arg_ftest *a) 3909 { 3910 TCGv_reg t; 3911 3912 nullify_over(ctx); 3913 3914 t = get_temp(ctx); 3915 tcg_gen_ld32u_reg(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow)); 3916 3917 if (a->y == 1) { 3918 int mask; 3919 bool inv = false; 3920 3921 switch (a->c) { 3922 case 0: /* simple */ 3923 tcg_gen_andi_reg(t, t, 0x4000000); 3924 ctx->null_cond = cond_make_0(TCG_COND_NE, t); 3925 goto done; 3926 case 2: /* rej */ 3927 inv = true; 3928 /* fallthru */ 3929 case 1: /* acc */ 3930 mask = 0x43ff800; 3931 break; 3932 case 6: /* rej8 */ 3933 inv = true; 3934 /* fallthru */ 3935 case 5: /* acc8 */ 3936 mask = 0x43f8000; 3937 break; 3938 case 9: /* acc6 */ 3939 mask = 0x43e0000; 3940 break; 3941 case 13: /* acc4 */ 3942 mask = 0x4380000; 3943 break; 3944 case 17: /* acc2 */ 3945 mask = 0x4200000; 3946 break; 3947 default: 3948 gen_illegal(ctx); 3949 return true; 3950 } 3951 if (inv) { 3952 TCGv_reg c = load_const(ctx, mask); 3953 tcg_gen_or_reg(t, t, c); 3954 ctx->null_cond = cond_make(TCG_COND_EQ, t, c); 3955 } else { 3956 tcg_gen_andi_reg(t, t, mask); 3957 ctx->null_cond = cond_make_0(TCG_COND_EQ, t); 3958 } 3959 } else { 3960 unsigned cbit = (a->y ^ 1) - 1; 3961 3962 tcg_gen_extract_reg(t, t, 21 - cbit, 1); 3963 ctx->null_cond = cond_make_0(TCG_COND_NE, t); 3964 tcg_temp_free(t); 3965 } 3966 3967 done: 3968 return nullify_end(ctx); 3969 } 3970 3971 /* 3972 * Float class 2 3973 */ 3974 3975 static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a) 3976 { 3977 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s); 3978 } 3979 3980 static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a) 3981 { 3982 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d); 3983 } 3984 3985 static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a) 3986 { 3987 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s); 3988 } 3989 3990 static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a) 3991 { 3992 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d); 3993 } 3994 3995 static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a) 3996 { 3997 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s); 3998 } 3999 4000 static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a) 4001 { 4002 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d); 4003 } 4004 4005 static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a) 4006 { 4007 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s); 4008 } 4009 4010 static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a) 4011 { 4012 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d); 4013 } 4014 4015 static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a) 4016 { 4017 TCGv_i64 x, y; 4018 4019 nullify_over(ctx); 4020 4021 x = load_frw0_i64(a->r1); 4022 y = load_frw0_i64(a->r2); 4023 tcg_gen_mul_i64(x, x, y); 4024 save_frd(a->t, x); 4025 tcg_temp_free_i64(x); 4026 tcg_temp_free_i64(y); 4027 4028 return nullify_end(ctx); 4029 } 4030 4031 /* Convert the fmpyadd single-precision register encodings to standard. */ 4032 static inline int fmpyadd_s_reg(unsigned r) 4033 { 4034 return (r & 16) * 2 + 16 + (r & 15); 4035 } 4036 4037 static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub) 4038 { 4039 int tm = fmpyadd_s_reg(a->tm); 4040 int ra = fmpyadd_s_reg(a->ra); 4041 int ta = fmpyadd_s_reg(a->ta); 4042 int rm2 = fmpyadd_s_reg(a->rm2); 4043 int rm1 = fmpyadd_s_reg(a->rm1); 4044 4045 nullify_over(ctx); 4046 4047 do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s); 4048 do_fop_weww(ctx, ta, ta, ra, 4049 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s); 4050 4051 return nullify_end(ctx); 4052 } 4053 4054 static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a) 4055 { 4056 return do_fmpyadd_s(ctx, a, false); 4057 } 4058 4059 static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a) 4060 { 4061 return do_fmpyadd_s(ctx, a, true); 4062 } 4063 4064 static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub) 4065 { 4066 nullify_over(ctx); 4067 4068 do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d); 4069 do_fop_dedd(ctx, a->ta, a->ta, a->ra, 4070 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d); 4071 4072 return nullify_end(ctx); 4073 } 4074 4075 static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a) 4076 { 4077 return do_fmpyadd_d(ctx, a, false); 4078 } 4079 4080 static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a) 4081 { 4082 return do_fmpyadd_d(ctx, a, true); 4083 } 4084 4085 static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a) 4086 { 4087 TCGv_i32 x, y, z; 4088 4089 nullify_over(ctx); 4090 x = load_frw0_i32(a->rm1); 4091 y = load_frw0_i32(a->rm2); 4092 z = load_frw0_i32(a->ra3); 4093 4094 if (a->neg) { 4095 gen_helper_fmpynfadd_s(x, cpu_env, x, y, z); 4096 } else { 4097 gen_helper_fmpyfadd_s(x, cpu_env, x, y, z); 4098 } 4099 4100 tcg_temp_free_i32(y); 4101 tcg_temp_free_i32(z); 4102 save_frw_i32(a->t, x); 4103 tcg_temp_free_i32(x); 4104 return nullify_end(ctx); 4105 } 4106 4107 static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a) 4108 { 4109 TCGv_i64 x, y, z; 4110 4111 nullify_over(ctx); 4112 x = load_frd0(a->rm1); 4113 y = load_frd0(a->rm2); 4114 z = load_frd0(a->ra3); 4115 4116 if (a->neg) { 4117 gen_helper_fmpynfadd_d(x, cpu_env, x, y, z); 4118 } else { 4119 gen_helper_fmpyfadd_d(x, cpu_env, x, y, z); 4120 } 4121 4122 tcg_temp_free_i64(y); 4123 tcg_temp_free_i64(z); 4124 save_frd(a->t, x); 4125 tcg_temp_free_i64(x); 4126 return nullify_end(ctx); 4127 } 4128 4129 static bool trans_diag(DisasContext *ctx, arg_diag *a) 4130 { 4131 qemu_log_mask(LOG_UNIMP, "DIAG opcode ignored\n"); 4132 cond_free(&ctx->null_cond); 4133 return true; 4134 } 4135 4136 static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) 4137 { 4138 DisasContext *ctx = container_of(dcbase, DisasContext, base); 4139 int bound; 4140 4141 ctx->cs = cs; 4142 ctx->tb_flags = ctx->base.tb->flags; 4143 4144 #ifdef CONFIG_USER_ONLY 4145 ctx->privilege = MMU_USER_IDX; 4146 ctx->mmu_idx = MMU_USER_IDX; 4147 ctx->iaoq_f = ctx->base.pc_first | MMU_USER_IDX; 4148 ctx->iaoq_b = ctx->base.tb->cs_base | MMU_USER_IDX; 4149 ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN); 4150 #else 4151 ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3; 4152 ctx->mmu_idx = (ctx->tb_flags & PSW_D ? ctx->privilege : MMU_PHYS_IDX); 4153 4154 /* Recover the IAOQ values from the GVA + PRIV. */ 4155 uint64_t cs_base = ctx->base.tb->cs_base; 4156 uint64_t iasq_f = cs_base & ~0xffffffffull; 4157 int32_t diff = cs_base; 4158 4159 ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege; 4160 ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1); 4161 #endif 4162 ctx->iaoq_n = -1; 4163 ctx->iaoq_n_var = NULL; 4164 4165 /* Bound the number of instructions by those left on the page. */ 4166 bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4; 4167 ctx->base.max_insns = MIN(ctx->base.max_insns, bound); 4168 4169 ctx->ntempr = 0; 4170 ctx->ntempl = 0; 4171 memset(ctx->tempr, 0, sizeof(ctx->tempr)); 4172 memset(ctx->templ, 0, sizeof(ctx->templ)); 4173 } 4174 4175 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs) 4176 { 4177 DisasContext *ctx = container_of(dcbase, DisasContext, base); 4178 4179 /* Seed the nullification status from PSW[N], as saved in TB->FLAGS. */ 4180 ctx->null_cond = cond_make_f(); 4181 ctx->psw_n_nonzero = false; 4182 if (ctx->tb_flags & PSW_N) { 4183 ctx->null_cond.c = TCG_COND_ALWAYS; 4184 ctx->psw_n_nonzero = true; 4185 } 4186 ctx->null_lab = NULL; 4187 } 4188 4189 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs) 4190 { 4191 DisasContext *ctx = container_of(dcbase, DisasContext, base); 4192 4193 tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b); 4194 } 4195 4196 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) 4197 { 4198 DisasContext *ctx = container_of(dcbase, DisasContext, base); 4199 CPUHPPAState *env = cs->env_ptr; 4200 DisasJumpType ret; 4201 int i, n; 4202 4203 /* Execute one insn. */ 4204 #ifdef CONFIG_USER_ONLY 4205 if (ctx->base.pc_next < TARGET_PAGE_SIZE) { 4206 do_page_zero(ctx); 4207 ret = ctx->base.is_jmp; 4208 assert(ret != DISAS_NEXT); 4209 } else 4210 #endif 4211 { 4212 /* Always fetch the insn, even if nullified, so that we check 4213 the page permissions for execute. */ 4214 uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next); 4215 4216 /* Set up the IA queue for the next insn. 4217 This will be overwritten by a branch. */ 4218 if (ctx->iaoq_b == -1) { 4219 ctx->iaoq_n = -1; 4220 ctx->iaoq_n_var = get_temp(ctx); 4221 tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4); 4222 } else { 4223 ctx->iaoq_n = ctx->iaoq_b + 4; 4224 ctx->iaoq_n_var = NULL; 4225 } 4226 4227 if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) { 4228 ctx->null_cond.c = TCG_COND_NEVER; 4229 ret = DISAS_NEXT; 4230 } else { 4231 ctx->insn = insn; 4232 if (!decode(ctx, insn)) { 4233 gen_illegal(ctx); 4234 } 4235 ret = ctx->base.is_jmp; 4236 assert(ctx->null_lab == NULL); 4237 } 4238 } 4239 4240 /* Free any temporaries allocated. */ 4241 for (i = 0, n = ctx->ntempr; i < n; ++i) { 4242 tcg_temp_free(ctx->tempr[i]); 4243 ctx->tempr[i] = NULL; 4244 } 4245 for (i = 0, n = ctx->ntempl; i < n; ++i) { 4246 tcg_temp_free_tl(ctx->templ[i]); 4247 ctx->templ[i] = NULL; 4248 } 4249 ctx->ntempr = 0; 4250 ctx->ntempl = 0; 4251 4252 /* Advance the insn queue. Note that this check also detects 4253 a priority change within the instruction queue. */ 4254 if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) { 4255 if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1 4256 && use_goto_tb(ctx, ctx->iaoq_b) 4257 && (ctx->null_cond.c == TCG_COND_NEVER 4258 || ctx->null_cond.c == TCG_COND_ALWAYS)) { 4259 nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS); 4260 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n); 4261 ctx->base.is_jmp = ret = DISAS_NORETURN; 4262 } else { 4263 ctx->base.is_jmp = ret = DISAS_IAQ_N_STALE; 4264 } 4265 } 4266 ctx->iaoq_f = ctx->iaoq_b; 4267 ctx->iaoq_b = ctx->iaoq_n; 4268 ctx->base.pc_next += 4; 4269 4270 switch (ret) { 4271 case DISAS_NORETURN: 4272 case DISAS_IAQ_N_UPDATED: 4273 break; 4274 4275 case DISAS_NEXT: 4276 case DISAS_IAQ_N_STALE: 4277 case DISAS_IAQ_N_STALE_EXIT: 4278 if (ctx->iaoq_f == -1) { 4279 tcg_gen_mov_reg(cpu_iaoq_f, cpu_iaoq_b); 4280 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var); 4281 #ifndef CONFIG_USER_ONLY 4282 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b); 4283 #endif 4284 nullify_save(ctx); 4285 ctx->base.is_jmp = (ret == DISAS_IAQ_N_STALE_EXIT 4286 ? DISAS_EXIT 4287 : DISAS_IAQ_N_UPDATED); 4288 } else if (ctx->iaoq_b == -1) { 4289 tcg_gen_mov_reg(cpu_iaoq_b, ctx->iaoq_n_var); 4290 } 4291 break; 4292 4293 default: 4294 g_assert_not_reached(); 4295 } 4296 } 4297 4298 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) 4299 { 4300 DisasContext *ctx = container_of(dcbase, DisasContext, base); 4301 DisasJumpType is_jmp = ctx->base.is_jmp; 4302 4303 switch (is_jmp) { 4304 case DISAS_NORETURN: 4305 break; 4306 case DISAS_TOO_MANY: 4307 case DISAS_IAQ_N_STALE: 4308 case DISAS_IAQ_N_STALE_EXIT: 4309 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f); 4310 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b); 4311 nullify_save(ctx); 4312 /* FALLTHRU */ 4313 case DISAS_IAQ_N_UPDATED: 4314 if (is_jmp != DISAS_IAQ_N_STALE_EXIT) { 4315 tcg_gen_lookup_and_goto_ptr(); 4316 break; 4317 } 4318 /* FALLTHRU */ 4319 case DISAS_EXIT: 4320 tcg_gen_exit_tb(NULL, 0); 4321 break; 4322 default: 4323 g_assert_not_reached(); 4324 } 4325 } 4326 4327 static void hppa_tr_disas_log(const DisasContextBase *dcbase, 4328 CPUState *cs, FILE *logfile) 4329 { 4330 target_ulong pc = dcbase->pc_first; 4331 4332 #ifdef CONFIG_USER_ONLY 4333 switch (pc) { 4334 case 0x00: 4335 fprintf(logfile, "IN:\n0x00000000: (null)\n"); 4336 return; 4337 case 0xb0: 4338 fprintf(logfile, "IN:\n0x000000b0: light-weight-syscall\n"); 4339 return; 4340 case 0xe0: 4341 fprintf(logfile, "IN:\n0x000000e0: set-thread-pointer-syscall\n"); 4342 return; 4343 case 0x100: 4344 fprintf(logfile, "IN:\n0x00000100: syscall\n"); 4345 return; 4346 } 4347 #endif 4348 4349 fprintf(logfile, "IN: %s\n", lookup_symbol(pc)); 4350 target_disas(logfile, cs, pc, dcbase->tb->size); 4351 } 4352 4353 static const TranslatorOps hppa_tr_ops = { 4354 .init_disas_context = hppa_tr_init_disas_context, 4355 .tb_start = hppa_tr_tb_start, 4356 .insn_start = hppa_tr_insn_start, 4357 .translate_insn = hppa_tr_translate_insn, 4358 .tb_stop = hppa_tr_tb_stop, 4359 .disas_log = hppa_tr_disas_log, 4360 }; 4361 4362 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns, 4363 target_ulong pc, void *host_pc) 4364 { 4365 DisasContext ctx; 4366 translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base); 4367 } 4368