1 /* 2 * HPPA emulation cpu translation for qemu. 3 * 4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net> 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "cpu.h" 22 #include "disas/disas.h" 23 #include "qemu/host-utils.h" 24 #include "exec/exec-all.h" 25 #include "tcg/tcg-op.h" 26 #include "exec/helper-proto.h" 27 #include "exec/helper-gen.h" 28 #include "exec/translator.h" 29 #include "exec/log.h" 30 31 #define HELPER_H "helper.h" 32 #include "exec/helper-info.c.inc" 33 #undef HELPER_H 34 35 36 /* Since we have a distinction between register size and address size, 37 we need to redefine all of these. */ 38 39 #undef TCGv 40 #undef tcg_temp_new 41 #undef tcg_global_mem_new 42 43 #if TARGET_LONG_BITS == 64 44 #define TCGv_tl TCGv_i64 45 #define tcg_temp_new_tl tcg_temp_new_i64 46 #if TARGET_REGISTER_BITS == 64 47 #define tcg_gen_extu_reg_tl tcg_gen_mov_i64 48 #else 49 #define tcg_gen_extu_reg_tl tcg_gen_extu_i32_i64 50 #endif 51 #else 52 #define TCGv_tl TCGv_i32 53 #define tcg_temp_new_tl tcg_temp_new_i32 54 #define tcg_gen_extu_reg_tl tcg_gen_mov_i32 55 #endif 56 57 #if TARGET_REGISTER_BITS == 64 58 #define TCGv_reg TCGv_i64 59 60 #define tcg_temp_new tcg_temp_new_i64 61 #define tcg_global_mem_new tcg_global_mem_new_i64 62 63 #define tcg_gen_movi_reg tcg_gen_movi_i64 64 #define tcg_gen_mov_reg tcg_gen_mov_i64 65 #define tcg_gen_ld8u_reg tcg_gen_ld8u_i64 66 #define tcg_gen_ld8s_reg tcg_gen_ld8s_i64 67 #define tcg_gen_ld16u_reg tcg_gen_ld16u_i64 68 #define tcg_gen_ld16s_reg tcg_gen_ld16s_i64 69 #define tcg_gen_ld32u_reg tcg_gen_ld32u_i64 70 #define tcg_gen_ld32s_reg tcg_gen_ld32s_i64 71 #define tcg_gen_ld_reg tcg_gen_ld_i64 72 #define tcg_gen_st8_reg tcg_gen_st8_i64 73 #define tcg_gen_st16_reg tcg_gen_st16_i64 74 #define tcg_gen_st32_reg tcg_gen_st32_i64 75 #define tcg_gen_st_reg tcg_gen_st_i64 76 #define tcg_gen_add_reg tcg_gen_add_i64 77 #define tcg_gen_addi_reg tcg_gen_addi_i64 78 #define tcg_gen_sub_reg tcg_gen_sub_i64 79 #define tcg_gen_neg_reg tcg_gen_neg_i64 80 #define tcg_gen_subfi_reg tcg_gen_subfi_i64 81 #define tcg_gen_subi_reg tcg_gen_subi_i64 82 #define tcg_gen_and_reg tcg_gen_and_i64 83 #define tcg_gen_andi_reg tcg_gen_andi_i64 84 #define tcg_gen_or_reg tcg_gen_or_i64 85 #define tcg_gen_ori_reg tcg_gen_ori_i64 86 #define tcg_gen_xor_reg tcg_gen_xor_i64 87 #define tcg_gen_xori_reg tcg_gen_xori_i64 88 #define tcg_gen_not_reg tcg_gen_not_i64 89 #define tcg_gen_shl_reg tcg_gen_shl_i64 90 #define tcg_gen_shli_reg tcg_gen_shli_i64 91 #define tcg_gen_shr_reg tcg_gen_shr_i64 92 #define tcg_gen_shri_reg tcg_gen_shri_i64 93 #define tcg_gen_sar_reg tcg_gen_sar_i64 94 #define tcg_gen_sari_reg tcg_gen_sari_i64 95 #define tcg_gen_brcond_reg tcg_gen_brcond_i64 96 #define tcg_gen_brcondi_reg tcg_gen_brcondi_i64 97 #define tcg_gen_setcond_reg tcg_gen_setcond_i64 98 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i64 99 #define tcg_gen_mul_reg tcg_gen_mul_i64 100 #define tcg_gen_muli_reg tcg_gen_muli_i64 101 #define tcg_gen_div_reg tcg_gen_div_i64 102 #define tcg_gen_rem_reg tcg_gen_rem_i64 103 #define tcg_gen_divu_reg tcg_gen_divu_i64 104 #define tcg_gen_remu_reg tcg_gen_remu_i64 105 #define tcg_gen_discard_reg tcg_gen_discard_i64 106 #define tcg_gen_trunc_reg_i32 tcg_gen_extrl_i64_i32 107 #define tcg_gen_trunc_i64_reg tcg_gen_mov_i64 108 #define tcg_gen_extu_i32_reg tcg_gen_extu_i32_i64 109 #define tcg_gen_ext_i32_reg tcg_gen_ext_i32_i64 110 #define tcg_gen_extu_reg_i64 tcg_gen_mov_i64 111 #define tcg_gen_ext_reg_i64 tcg_gen_mov_i64 112 #define tcg_gen_ext8u_reg tcg_gen_ext8u_i64 113 #define tcg_gen_ext8s_reg tcg_gen_ext8s_i64 114 #define tcg_gen_ext16u_reg tcg_gen_ext16u_i64 115 #define tcg_gen_ext16s_reg tcg_gen_ext16s_i64 116 #define tcg_gen_ext32u_reg tcg_gen_ext32u_i64 117 #define tcg_gen_ext32s_reg tcg_gen_ext32s_i64 118 #define tcg_gen_bswap16_reg tcg_gen_bswap16_i64 119 #define tcg_gen_bswap32_reg tcg_gen_bswap32_i64 120 #define tcg_gen_bswap64_reg tcg_gen_bswap64_i64 121 #define tcg_gen_concat_reg_i64 tcg_gen_concat32_i64 122 #define tcg_gen_andc_reg tcg_gen_andc_i64 123 #define tcg_gen_eqv_reg tcg_gen_eqv_i64 124 #define tcg_gen_nand_reg tcg_gen_nand_i64 125 #define tcg_gen_nor_reg tcg_gen_nor_i64 126 #define tcg_gen_orc_reg tcg_gen_orc_i64 127 #define tcg_gen_clz_reg tcg_gen_clz_i64 128 #define tcg_gen_ctz_reg tcg_gen_ctz_i64 129 #define tcg_gen_clzi_reg tcg_gen_clzi_i64 130 #define tcg_gen_ctzi_reg tcg_gen_ctzi_i64 131 #define tcg_gen_clrsb_reg tcg_gen_clrsb_i64 132 #define tcg_gen_ctpop_reg tcg_gen_ctpop_i64 133 #define tcg_gen_rotl_reg tcg_gen_rotl_i64 134 #define tcg_gen_rotli_reg tcg_gen_rotli_i64 135 #define tcg_gen_rotr_reg tcg_gen_rotr_i64 136 #define tcg_gen_rotri_reg tcg_gen_rotri_i64 137 #define tcg_gen_deposit_reg tcg_gen_deposit_i64 138 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i64 139 #define tcg_gen_extract_reg tcg_gen_extract_i64 140 #define tcg_gen_sextract_reg tcg_gen_sextract_i64 141 #define tcg_gen_extract2_reg tcg_gen_extract2_i64 142 #define tcg_constant_reg tcg_constant_i64 143 #define tcg_gen_movcond_reg tcg_gen_movcond_i64 144 #define tcg_gen_add2_reg tcg_gen_add2_i64 145 #define tcg_gen_sub2_reg tcg_gen_sub2_i64 146 #define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i64 147 #define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i64 148 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i64 149 #define tcg_gen_trunc_reg_ptr tcg_gen_trunc_i64_ptr 150 #else 151 #define TCGv_reg TCGv_i32 152 #define tcg_temp_new tcg_temp_new_i32 153 #define tcg_global_mem_new tcg_global_mem_new_i32 154 155 #define tcg_gen_movi_reg tcg_gen_movi_i32 156 #define tcg_gen_mov_reg tcg_gen_mov_i32 157 #define tcg_gen_ld8u_reg tcg_gen_ld8u_i32 158 #define tcg_gen_ld8s_reg tcg_gen_ld8s_i32 159 #define tcg_gen_ld16u_reg tcg_gen_ld16u_i32 160 #define tcg_gen_ld16s_reg tcg_gen_ld16s_i32 161 #define tcg_gen_ld32u_reg tcg_gen_ld_i32 162 #define tcg_gen_ld32s_reg tcg_gen_ld_i32 163 #define tcg_gen_ld_reg tcg_gen_ld_i32 164 #define tcg_gen_st8_reg tcg_gen_st8_i32 165 #define tcg_gen_st16_reg tcg_gen_st16_i32 166 #define tcg_gen_st32_reg tcg_gen_st32_i32 167 #define tcg_gen_st_reg tcg_gen_st_i32 168 #define tcg_gen_add_reg tcg_gen_add_i32 169 #define tcg_gen_addi_reg tcg_gen_addi_i32 170 #define tcg_gen_sub_reg tcg_gen_sub_i32 171 #define tcg_gen_neg_reg tcg_gen_neg_i32 172 #define tcg_gen_subfi_reg tcg_gen_subfi_i32 173 #define tcg_gen_subi_reg tcg_gen_subi_i32 174 #define tcg_gen_and_reg tcg_gen_and_i32 175 #define tcg_gen_andi_reg tcg_gen_andi_i32 176 #define tcg_gen_or_reg tcg_gen_or_i32 177 #define tcg_gen_ori_reg tcg_gen_ori_i32 178 #define tcg_gen_xor_reg tcg_gen_xor_i32 179 #define tcg_gen_xori_reg tcg_gen_xori_i32 180 #define tcg_gen_not_reg tcg_gen_not_i32 181 #define tcg_gen_shl_reg tcg_gen_shl_i32 182 #define tcg_gen_shli_reg tcg_gen_shli_i32 183 #define tcg_gen_shr_reg tcg_gen_shr_i32 184 #define tcg_gen_shri_reg tcg_gen_shri_i32 185 #define tcg_gen_sar_reg tcg_gen_sar_i32 186 #define tcg_gen_sari_reg tcg_gen_sari_i32 187 #define tcg_gen_brcond_reg tcg_gen_brcond_i32 188 #define tcg_gen_brcondi_reg tcg_gen_brcondi_i32 189 #define tcg_gen_setcond_reg tcg_gen_setcond_i32 190 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i32 191 #define tcg_gen_mul_reg tcg_gen_mul_i32 192 #define tcg_gen_muli_reg tcg_gen_muli_i32 193 #define tcg_gen_div_reg tcg_gen_div_i32 194 #define tcg_gen_rem_reg tcg_gen_rem_i32 195 #define tcg_gen_divu_reg tcg_gen_divu_i32 196 #define tcg_gen_remu_reg tcg_gen_remu_i32 197 #define tcg_gen_discard_reg tcg_gen_discard_i32 198 #define tcg_gen_trunc_reg_i32 tcg_gen_mov_i32 199 #define tcg_gen_trunc_i64_reg tcg_gen_extrl_i64_i32 200 #define tcg_gen_extu_i32_reg tcg_gen_mov_i32 201 #define tcg_gen_ext_i32_reg tcg_gen_mov_i32 202 #define tcg_gen_extu_reg_i64 tcg_gen_extu_i32_i64 203 #define tcg_gen_ext_reg_i64 tcg_gen_ext_i32_i64 204 #define tcg_gen_ext8u_reg tcg_gen_ext8u_i32 205 #define tcg_gen_ext8s_reg tcg_gen_ext8s_i32 206 #define tcg_gen_ext16u_reg tcg_gen_ext16u_i32 207 #define tcg_gen_ext16s_reg tcg_gen_ext16s_i32 208 #define tcg_gen_ext32u_reg tcg_gen_mov_i32 209 #define tcg_gen_ext32s_reg tcg_gen_mov_i32 210 #define tcg_gen_bswap16_reg tcg_gen_bswap16_i32 211 #define tcg_gen_bswap32_reg tcg_gen_bswap32_i32 212 #define tcg_gen_concat_reg_i64 tcg_gen_concat_i32_i64 213 #define tcg_gen_andc_reg tcg_gen_andc_i32 214 #define tcg_gen_eqv_reg tcg_gen_eqv_i32 215 #define tcg_gen_nand_reg tcg_gen_nand_i32 216 #define tcg_gen_nor_reg tcg_gen_nor_i32 217 #define tcg_gen_orc_reg tcg_gen_orc_i32 218 #define tcg_gen_clz_reg tcg_gen_clz_i32 219 #define tcg_gen_ctz_reg tcg_gen_ctz_i32 220 #define tcg_gen_clzi_reg tcg_gen_clzi_i32 221 #define tcg_gen_ctzi_reg tcg_gen_ctzi_i32 222 #define tcg_gen_clrsb_reg tcg_gen_clrsb_i32 223 #define tcg_gen_ctpop_reg tcg_gen_ctpop_i32 224 #define tcg_gen_rotl_reg tcg_gen_rotl_i32 225 #define tcg_gen_rotli_reg tcg_gen_rotli_i32 226 #define tcg_gen_rotr_reg tcg_gen_rotr_i32 227 #define tcg_gen_rotri_reg tcg_gen_rotri_i32 228 #define tcg_gen_deposit_reg tcg_gen_deposit_i32 229 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i32 230 #define tcg_gen_extract_reg tcg_gen_extract_i32 231 #define tcg_gen_sextract_reg tcg_gen_sextract_i32 232 #define tcg_gen_extract2_reg tcg_gen_extract2_i32 233 #define tcg_constant_reg tcg_constant_i32 234 #define tcg_gen_movcond_reg tcg_gen_movcond_i32 235 #define tcg_gen_add2_reg tcg_gen_add2_i32 236 #define tcg_gen_sub2_reg tcg_gen_sub2_i32 237 #define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i32 238 #define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i32 239 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i32 240 #define tcg_gen_trunc_reg_ptr tcg_gen_ext_i32_ptr 241 #endif /* TARGET_REGISTER_BITS */ 242 243 typedef struct DisasCond { 244 TCGCond c; 245 TCGv_reg a0, a1; 246 } DisasCond; 247 248 typedef struct DisasContext { 249 DisasContextBase base; 250 CPUState *cs; 251 252 target_ureg iaoq_f; 253 target_ureg iaoq_b; 254 target_ureg iaoq_n; 255 TCGv_reg iaoq_n_var; 256 257 DisasCond null_cond; 258 TCGLabel *null_lab; 259 260 uint32_t insn; 261 uint32_t tb_flags; 262 int mmu_idx; 263 int privilege; 264 bool psw_n_nonzero; 265 bool is_pa20; 266 267 #ifdef CONFIG_USER_ONLY 268 MemOp unalign; 269 #endif 270 } DisasContext; 271 272 #ifdef CONFIG_USER_ONLY 273 #define UNALIGN(C) (C)->unalign 274 #else 275 #define UNALIGN(C) MO_ALIGN 276 #endif 277 278 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */ 279 static int expand_sm_imm(DisasContext *ctx, int val) 280 { 281 if (val & PSW_SM_E) { 282 val = (val & ~PSW_SM_E) | PSW_E; 283 } 284 if (val & PSW_SM_W) { 285 val = (val & ~PSW_SM_W) | PSW_W; 286 } 287 return val; 288 } 289 290 /* Inverted space register indicates 0 means sr0 not inferred from base. */ 291 static int expand_sr3x(DisasContext *ctx, int val) 292 { 293 return ~val; 294 } 295 296 /* Convert the M:A bits within a memory insn to the tri-state value 297 we use for the final M. */ 298 static int ma_to_m(DisasContext *ctx, int val) 299 { 300 return val & 2 ? (val & 1 ? -1 : 1) : 0; 301 } 302 303 /* Convert the sign of the displacement to a pre or post-modify. */ 304 static int pos_to_m(DisasContext *ctx, int val) 305 { 306 return val ? 1 : -1; 307 } 308 309 static int neg_to_m(DisasContext *ctx, int val) 310 { 311 return val ? -1 : 1; 312 } 313 314 /* Used for branch targets and fp memory ops. */ 315 static int expand_shl2(DisasContext *ctx, int val) 316 { 317 return val << 2; 318 } 319 320 /* Used for fp memory ops. */ 321 static int expand_shl3(DisasContext *ctx, int val) 322 { 323 return val << 3; 324 } 325 326 /* Used for assemble_21. */ 327 static int expand_shl11(DisasContext *ctx, int val) 328 { 329 return val << 11; 330 } 331 332 static int assemble_6(DisasContext *ctx, int val) 333 { 334 /* 335 * Officially, 32 * x + 32 - y. 336 * Here, x is already in bit 5, and y is [4:0]. 337 * Since -y = ~y + 1, in 5 bits 32 - y => y ^ 31 + 1, 338 * with the overflow from bit 4 summing with x. 339 */ 340 return (val ^ 31) + 1; 341 } 342 343 /* Translate CMPI doubleword conditions to standard. */ 344 static int cmpbid_c(DisasContext *ctx, int val) 345 { 346 return val ? val : 4; /* 0 == "*<<" */ 347 } 348 349 350 /* Include the auto-generated decoder. */ 351 #include "decode-insns.c.inc" 352 353 /* We are not using a goto_tb (for whatever reason), but have updated 354 the iaq (for whatever reason), so don't do it again on exit. */ 355 #define DISAS_IAQ_N_UPDATED DISAS_TARGET_0 356 357 /* We are exiting the TB, but have neither emitted a goto_tb, nor 358 updated the iaq for the next instruction to be executed. */ 359 #define DISAS_IAQ_N_STALE DISAS_TARGET_1 360 361 /* Similarly, but we want to return to the main loop immediately 362 to recognize unmasked interrupts. */ 363 #define DISAS_IAQ_N_STALE_EXIT DISAS_TARGET_2 364 #define DISAS_EXIT DISAS_TARGET_3 365 366 /* global register indexes */ 367 static TCGv_reg cpu_gr[32]; 368 static TCGv_i64 cpu_sr[4]; 369 static TCGv_i64 cpu_srH; 370 static TCGv_reg cpu_iaoq_f; 371 static TCGv_reg cpu_iaoq_b; 372 static TCGv_i64 cpu_iasq_f; 373 static TCGv_i64 cpu_iasq_b; 374 static TCGv_reg cpu_sar; 375 static TCGv_reg cpu_psw_n; 376 static TCGv_reg cpu_psw_v; 377 static TCGv_reg cpu_psw_cb; 378 static TCGv_reg cpu_psw_cb_msb; 379 380 void hppa_translate_init(void) 381 { 382 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) } 383 384 typedef struct { TCGv_reg *var; const char *name; int ofs; } GlobalVar; 385 static const GlobalVar vars[] = { 386 { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) }, 387 DEF_VAR(psw_n), 388 DEF_VAR(psw_v), 389 DEF_VAR(psw_cb), 390 DEF_VAR(psw_cb_msb), 391 DEF_VAR(iaoq_f), 392 DEF_VAR(iaoq_b), 393 }; 394 395 #undef DEF_VAR 396 397 /* Use the symbolic register names that match the disassembler. */ 398 static const char gr_names[32][4] = { 399 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", 400 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", 401 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", 402 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31" 403 }; 404 /* SR[4-7] are not global registers so that we can index them. */ 405 static const char sr_names[5][4] = { 406 "sr0", "sr1", "sr2", "sr3", "srH" 407 }; 408 409 int i; 410 411 cpu_gr[0] = NULL; 412 for (i = 1; i < 32; i++) { 413 cpu_gr[i] = tcg_global_mem_new(tcg_env, 414 offsetof(CPUHPPAState, gr[i]), 415 gr_names[i]); 416 } 417 for (i = 0; i < 4; i++) { 418 cpu_sr[i] = tcg_global_mem_new_i64(tcg_env, 419 offsetof(CPUHPPAState, sr[i]), 420 sr_names[i]); 421 } 422 cpu_srH = tcg_global_mem_new_i64(tcg_env, 423 offsetof(CPUHPPAState, sr[4]), 424 sr_names[4]); 425 426 for (i = 0; i < ARRAY_SIZE(vars); ++i) { 427 const GlobalVar *v = &vars[i]; 428 *v->var = tcg_global_mem_new(tcg_env, v->ofs, v->name); 429 } 430 431 cpu_iasq_f = tcg_global_mem_new_i64(tcg_env, 432 offsetof(CPUHPPAState, iasq_f), 433 "iasq_f"); 434 cpu_iasq_b = tcg_global_mem_new_i64(tcg_env, 435 offsetof(CPUHPPAState, iasq_b), 436 "iasq_b"); 437 } 438 439 static DisasCond cond_make_f(void) 440 { 441 return (DisasCond){ 442 .c = TCG_COND_NEVER, 443 .a0 = NULL, 444 .a1 = NULL, 445 }; 446 } 447 448 static DisasCond cond_make_t(void) 449 { 450 return (DisasCond){ 451 .c = TCG_COND_ALWAYS, 452 .a0 = NULL, 453 .a1 = NULL, 454 }; 455 } 456 457 static DisasCond cond_make_n(void) 458 { 459 return (DisasCond){ 460 .c = TCG_COND_NE, 461 .a0 = cpu_psw_n, 462 .a1 = tcg_constant_reg(0) 463 }; 464 } 465 466 static DisasCond cond_make_tmp(TCGCond c, TCGv_reg a0, TCGv_reg a1) 467 { 468 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS); 469 return (DisasCond){ .c = c, .a0 = a0, .a1 = a1 }; 470 } 471 472 static DisasCond cond_make_0_tmp(TCGCond c, TCGv_reg a0) 473 { 474 return cond_make_tmp(c, a0, tcg_constant_reg(0)); 475 } 476 477 static DisasCond cond_make_0(TCGCond c, TCGv_reg a0) 478 { 479 TCGv_reg tmp = tcg_temp_new(); 480 tcg_gen_mov_reg(tmp, a0); 481 return cond_make_0_tmp(c, tmp); 482 } 483 484 static DisasCond cond_make(TCGCond c, TCGv_reg a0, TCGv_reg a1) 485 { 486 TCGv_reg t0 = tcg_temp_new(); 487 TCGv_reg t1 = tcg_temp_new(); 488 489 tcg_gen_mov_reg(t0, a0); 490 tcg_gen_mov_reg(t1, a1); 491 return cond_make_tmp(c, t0, t1); 492 } 493 494 static void cond_free(DisasCond *cond) 495 { 496 switch (cond->c) { 497 default: 498 cond->a0 = NULL; 499 cond->a1 = NULL; 500 /* fallthru */ 501 case TCG_COND_ALWAYS: 502 cond->c = TCG_COND_NEVER; 503 break; 504 case TCG_COND_NEVER: 505 break; 506 } 507 } 508 509 static TCGv_reg load_gpr(DisasContext *ctx, unsigned reg) 510 { 511 if (reg == 0) { 512 TCGv_reg t = tcg_temp_new(); 513 tcg_gen_movi_reg(t, 0); 514 return t; 515 } else { 516 return cpu_gr[reg]; 517 } 518 } 519 520 static TCGv_reg dest_gpr(DisasContext *ctx, unsigned reg) 521 { 522 if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) { 523 return tcg_temp_new(); 524 } else { 525 return cpu_gr[reg]; 526 } 527 } 528 529 static void save_or_nullify(DisasContext *ctx, TCGv_reg dest, TCGv_reg t) 530 { 531 if (ctx->null_cond.c != TCG_COND_NEVER) { 532 tcg_gen_movcond_reg(ctx->null_cond.c, dest, ctx->null_cond.a0, 533 ctx->null_cond.a1, dest, t); 534 } else { 535 tcg_gen_mov_reg(dest, t); 536 } 537 } 538 539 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_reg t) 540 { 541 if (reg != 0) { 542 save_or_nullify(ctx, cpu_gr[reg], t); 543 } 544 } 545 546 #if HOST_BIG_ENDIAN 547 # define HI_OFS 0 548 # define LO_OFS 4 549 #else 550 # define HI_OFS 4 551 # define LO_OFS 0 552 #endif 553 554 static TCGv_i32 load_frw_i32(unsigned rt) 555 { 556 TCGv_i32 ret = tcg_temp_new_i32(); 557 tcg_gen_ld_i32(ret, tcg_env, 558 offsetof(CPUHPPAState, fr[rt & 31]) 559 + (rt & 32 ? LO_OFS : HI_OFS)); 560 return ret; 561 } 562 563 static TCGv_i32 load_frw0_i32(unsigned rt) 564 { 565 if (rt == 0) { 566 TCGv_i32 ret = tcg_temp_new_i32(); 567 tcg_gen_movi_i32(ret, 0); 568 return ret; 569 } else { 570 return load_frw_i32(rt); 571 } 572 } 573 574 static TCGv_i64 load_frw0_i64(unsigned rt) 575 { 576 TCGv_i64 ret = tcg_temp_new_i64(); 577 if (rt == 0) { 578 tcg_gen_movi_i64(ret, 0); 579 } else { 580 tcg_gen_ld32u_i64(ret, tcg_env, 581 offsetof(CPUHPPAState, fr[rt & 31]) 582 + (rt & 32 ? LO_OFS : HI_OFS)); 583 } 584 return ret; 585 } 586 587 static void save_frw_i32(unsigned rt, TCGv_i32 val) 588 { 589 tcg_gen_st_i32(val, tcg_env, 590 offsetof(CPUHPPAState, fr[rt & 31]) 591 + (rt & 32 ? LO_OFS : HI_OFS)); 592 } 593 594 #undef HI_OFS 595 #undef LO_OFS 596 597 static TCGv_i64 load_frd(unsigned rt) 598 { 599 TCGv_i64 ret = tcg_temp_new_i64(); 600 tcg_gen_ld_i64(ret, tcg_env, offsetof(CPUHPPAState, fr[rt])); 601 return ret; 602 } 603 604 static TCGv_i64 load_frd0(unsigned rt) 605 { 606 if (rt == 0) { 607 TCGv_i64 ret = tcg_temp_new_i64(); 608 tcg_gen_movi_i64(ret, 0); 609 return ret; 610 } else { 611 return load_frd(rt); 612 } 613 } 614 615 static void save_frd(unsigned rt, TCGv_i64 val) 616 { 617 tcg_gen_st_i64(val, tcg_env, offsetof(CPUHPPAState, fr[rt])); 618 } 619 620 static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg) 621 { 622 #ifdef CONFIG_USER_ONLY 623 tcg_gen_movi_i64(dest, 0); 624 #else 625 if (reg < 4) { 626 tcg_gen_mov_i64(dest, cpu_sr[reg]); 627 } else if (ctx->tb_flags & TB_FLAG_SR_SAME) { 628 tcg_gen_mov_i64(dest, cpu_srH); 629 } else { 630 tcg_gen_ld_i64(dest, tcg_env, offsetof(CPUHPPAState, sr[reg])); 631 } 632 #endif 633 } 634 635 /* Skip over the implementation of an insn that has been nullified. 636 Use this when the insn is too complex for a conditional move. */ 637 static void nullify_over(DisasContext *ctx) 638 { 639 if (ctx->null_cond.c != TCG_COND_NEVER) { 640 /* The always condition should have been handled in the main loop. */ 641 assert(ctx->null_cond.c != TCG_COND_ALWAYS); 642 643 ctx->null_lab = gen_new_label(); 644 645 /* If we're using PSW[N], copy it to a temp because... */ 646 if (ctx->null_cond.a0 == cpu_psw_n) { 647 ctx->null_cond.a0 = tcg_temp_new(); 648 tcg_gen_mov_reg(ctx->null_cond.a0, cpu_psw_n); 649 } 650 /* ... we clear it before branching over the implementation, 651 so that (1) it's clear after nullifying this insn and 652 (2) if this insn nullifies the next, PSW[N] is valid. */ 653 if (ctx->psw_n_nonzero) { 654 ctx->psw_n_nonzero = false; 655 tcg_gen_movi_reg(cpu_psw_n, 0); 656 } 657 658 tcg_gen_brcond_reg(ctx->null_cond.c, ctx->null_cond.a0, 659 ctx->null_cond.a1, ctx->null_lab); 660 cond_free(&ctx->null_cond); 661 } 662 } 663 664 /* Save the current nullification state to PSW[N]. */ 665 static void nullify_save(DisasContext *ctx) 666 { 667 if (ctx->null_cond.c == TCG_COND_NEVER) { 668 if (ctx->psw_n_nonzero) { 669 tcg_gen_movi_reg(cpu_psw_n, 0); 670 } 671 return; 672 } 673 if (ctx->null_cond.a0 != cpu_psw_n) { 674 tcg_gen_setcond_reg(ctx->null_cond.c, cpu_psw_n, 675 ctx->null_cond.a0, ctx->null_cond.a1); 676 ctx->psw_n_nonzero = true; 677 } 678 cond_free(&ctx->null_cond); 679 } 680 681 /* Set a PSW[N] to X. The intention is that this is used immediately 682 before a goto_tb/exit_tb, so that there is no fallthru path to other 683 code within the TB. Therefore we do not update psw_n_nonzero. */ 684 static void nullify_set(DisasContext *ctx, bool x) 685 { 686 if (ctx->psw_n_nonzero || x) { 687 tcg_gen_movi_reg(cpu_psw_n, x); 688 } 689 } 690 691 /* Mark the end of an instruction that may have been nullified. 692 This is the pair to nullify_over. Always returns true so that 693 it may be tail-called from a translate function. */ 694 static bool nullify_end(DisasContext *ctx) 695 { 696 TCGLabel *null_lab = ctx->null_lab; 697 DisasJumpType status = ctx->base.is_jmp; 698 699 /* For NEXT, NORETURN, STALE, we can easily continue (or exit). 700 For UPDATED, we cannot update on the nullified path. */ 701 assert(status != DISAS_IAQ_N_UPDATED); 702 703 if (likely(null_lab == NULL)) { 704 /* The current insn wasn't conditional or handled the condition 705 applied to it without a branch, so the (new) setting of 706 NULL_COND can be applied directly to the next insn. */ 707 return true; 708 } 709 ctx->null_lab = NULL; 710 711 if (likely(ctx->null_cond.c == TCG_COND_NEVER)) { 712 /* The next instruction will be unconditional, 713 and NULL_COND already reflects that. */ 714 gen_set_label(null_lab); 715 } else { 716 /* The insn that we just executed is itself nullifying the next 717 instruction. Store the condition in the PSW[N] global. 718 We asserted PSW[N] = 0 in nullify_over, so that after the 719 label we have the proper value in place. */ 720 nullify_save(ctx); 721 gen_set_label(null_lab); 722 ctx->null_cond = cond_make_n(); 723 } 724 if (status == DISAS_NORETURN) { 725 ctx->base.is_jmp = DISAS_NEXT; 726 } 727 return true; 728 } 729 730 static target_ureg gva_offset_mask(DisasContext *ctx) 731 { 732 return (ctx->tb_flags & PSW_W 733 ? MAKE_64BIT_MASK(0, 62) 734 : MAKE_64BIT_MASK(0, 32)); 735 } 736 737 static void copy_iaoq_entry(DisasContext *ctx, TCGv_reg dest, 738 target_ureg ival, TCGv_reg vval) 739 { 740 target_ureg mask = gva_offset_mask(ctx); 741 742 if (ival != -1) { 743 tcg_gen_movi_reg(dest, ival & mask); 744 return; 745 } 746 tcg_debug_assert(vval != NULL); 747 748 /* 749 * We know that the IAOQ is already properly masked. 750 * This optimization is primarily for "iaoq_f = iaoq_b". 751 */ 752 if (vval == cpu_iaoq_f || vval == cpu_iaoq_b) { 753 tcg_gen_mov_reg(dest, vval); 754 } else { 755 tcg_gen_andi_reg(dest, vval, mask); 756 } 757 } 758 759 static inline target_ureg iaoq_dest(DisasContext *ctx, target_sreg disp) 760 { 761 return ctx->iaoq_f + disp + 8; 762 } 763 764 static void gen_excp_1(int exception) 765 { 766 gen_helper_excp(tcg_env, tcg_constant_i32(exception)); 767 } 768 769 static void gen_excp(DisasContext *ctx, int exception) 770 { 771 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f); 772 copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b); 773 nullify_save(ctx); 774 gen_excp_1(exception); 775 ctx->base.is_jmp = DISAS_NORETURN; 776 } 777 778 static bool gen_excp_iir(DisasContext *ctx, int exc) 779 { 780 nullify_over(ctx); 781 tcg_gen_st_reg(tcg_constant_reg(ctx->insn), 782 tcg_env, offsetof(CPUHPPAState, cr[CR_IIR])); 783 gen_excp(ctx, exc); 784 return nullify_end(ctx); 785 } 786 787 static bool gen_illegal(DisasContext *ctx) 788 { 789 return gen_excp_iir(ctx, EXCP_ILL); 790 } 791 792 #ifdef CONFIG_USER_ONLY 793 #define CHECK_MOST_PRIVILEGED(EXCP) \ 794 return gen_excp_iir(ctx, EXCP) 795 #else 796 #define CHECK_MOST_PRIVILEGED(EXCP) \ 797 do { \ 798 if (ctx->privilege != 0) { \ 799 return gen_excp_iir(ctx, EXCP); \ 800 } \ 801 } while (0) 802 #endif 803 804 static bool use_goto_tb(DisasContext *ctx, target_ureg dest) 805 { 806 return translator_use_goto_tb(&ctx->base, dest); 807 } 808 809 /* If the next insn is to be nullified, and it's on the same page, 810 and we're not attempting to set a breakpoint on it, then we can 811 totally skip the nullified insn. This avoids creating and 812 executing a TB that merely branches to the next TB. */ 813 static bool use_nullify_skip(DisasContext *ctx) 814 { 815 return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0 816 && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY)); 817 } 818 819 static void gen_goto_tb(DisasContext *ctx, int which, 820 target_ureg f, target_ureg b) 821 { 822 if (f != -1 && b != -1 && use_goto_tb(ctx, f)) { 823 tcg_gen_goto_tb(which); 824 copy_iaoq_entry(ctx, cpu_iaoq_f, f, NULL); 825 copy_iaoq_entry(ctx, cpu_iaoq_b, b, NULL); 826 tcg_gen_exit_tb(ctx->base.tb, which); 827 } else { 828 copy_iaoq_entry(ctx, cpu_iaoq_f, f, cpu_iaoq_b); 829 copy_iaoq_entry(ctx, cpu_iaoq_b, b, ctx->iaoq_n_var); 830 tcg_gen_lookup_and_goto_ptr(); 831 } 832 } 833 834 static bool cond_need_sv(int c) 835 { 836 return c == 2 || c == 3 || c == 6; 837 } 838 839 static bool cond_need_cb(int c) 840 { 841 return c == 4 || c == 5; 842 } 843 844 /* Need extensions from TCGv_i32 to TCGv_reg. */ 845 static bool cond_need_ext(DisasContext *ctx, bool d) 846 { 847 return TARGET_REGISTER_BITS == 64 && !(ctx->is_pa20 && d); 848 } 849 850 /* 851 * Compute conditional for arithmetic. See Page 5-3, Table 5-1, of 852 * the Parisc 1.1 Architecture Reference Manual for details. 853 */ 854 855 static DisasCond do_cond(DisasContext *ctx, unsigned cf, bool d, 856 TCGv_reg res, TCGv_reg cb_msb, TCGv_reg sv) 857 { 858 DisasCond cond; 859 TCGv_reg tmp; 860 861 switch (cf >> 1) { 862 case 0: /* Never / TR (0 / 1) */ 863 cond = cond_make_f(); 864 break; 865 case 1: /* = / <> (Z / !Z) */ 866 if (cond_need_ext(ctx, d)) { 867 tmp = tcg_temp_new(); 868 tcg_gen_ext32u_reg(tmp, res); 869 res = tmp; 870 } 871 cond = cond_make_0(TCG_COND_EQ, res); 872 break; 873 case 2: /* < / >= (N ^ V / !(N ^ V) */ 874 tmp = tcg_temp_new(); 875 tcg_gen_xor_reg(tmp, res, sv); 876 if (cond_need_ext(ctx, d)) { 877 tcg_gen_ext32s_reg(tmp, tmp); 878 } 879 cond = cond_make_0_tmp(TCG_COND_LT, tmp); 880 break; 881 case 3: /* <= / > (N ^ V) | Z / !((N ^ V) | Z) */ 882 /* 883 * Simplify: 884 * (N ^ V) | Z 885 * ((res < 0) ^ (sv < 0)) | !res 886 * ((res ^ sv) < 0) | !res 887 * (~(res ^ sv) >= 0) | !res 888 * !(~(res ^ sv) >> 31) | !res 889 * !(~(res ^ sv) >> 31 & res) 890 */ 891 tmp = tcg_temp_new(); 892 tcg_gen_eqv_reg(tmp, res, sv); 893 if (cond_need_ext(ctx, d)) { 894 tcg_gen_sextract_reg(tmp, tmp, 31, 1); 895 tcg_gen_and_reg(tmp, tmp, res); 896 tcg_gen_ext32u_reg(tmp, tmp); 897 } else { 898 tcg_gen_sari_reg(tmp, tmp, TARGET_REGISTER_BITS - 1); 899 tcg_gen_and_reg(tmp, tmp, res); 900 } 901 cond = cond_make_0_tmp(TCG_COND_EQ, tmp); 902 break; 903 case 4: /* NUV / UV (!C / C) */ 904 /* Only bit 0 of cb_msb is ever set. */ 905 cond = cond_make_0(TCG_COND_EQ, cb_msb); 906 break; 907 case 5: /* ZNV / VNZ (!C | Z / C & !Z) */ 908 tmp = tcg_temp_new(); 909 tcg_gen_neg_reg(tmp, cb_msb); 910 tcg_gen_and_reg(tmp, tmp, res); 911 if (cond_need_ext(ctx, d)) { 912 tcg_gen_ext32u_reg(tmp, tmp); 913 } 914 cond = cond_make_0_tmp(TCG_COND_EQ, tmp); 915 break; 916 case 6: /* SV / NSV (V / !V) */ 917 if (cond_need_ext(ctx, d)) { 918 tmp = tcg_temp_new(); 919 tcg_gen_ext32s_reg(tmp, sv); 920 sv = tmp; 921 } 922 cond = cond_make_0(TCG_COND_LT, sv); 923 break; 924 case 7: /* OD / EV */ 925 tmp = tcg_temp_new(); 926 tcg_gen_andi_reg(tmp, res, 1); 927 cond = cond_make_0_tmp(TCG_COND_NE, tmp); 928 break; 929 default: 930 g_assert_not_reached(); 931 } 932 if (cf & 1) { 933 cond.c = tcg_invert_cond(cond.c); 934 } 935 936 return cond; 937 } 938 939 /* Similar, but for the special case of subtraction without borrow, we 940 can use the inputs directly. This can allow other computation to be 941 deleted as unused. */ 942 943 static DisasCond do_sub_cond(DisasContext *ctx, unsigned cf, bool d, 944 TCGv_reg res, TCGv_reg in1, 945 TCGv_reg in2, TCGv_reg sv) 946 { 947 TCGCond tc; 948 bool ext_uns; 949 950 switch (cf >> 1) { 951 case 1: /* = / <> */ 952 tc = TCG_COND_EQ; 953 ext_uns = true; 954 break; 955 case 2: /* < / >= */ 956 tc = TCG_COND_LT; 957 ext_uns = false; 958 break; 959 case 3: /* <= / > */ 960 tc = TCG_COND_LE; 961 ext_uns = false; 962 break; 963 case 4: /* << / >>= */ 964 tc = TCG_COND_LTU; 965 ext_uns = true; 966 break; 967 case 5: /* <<= / >> */ 968 tc = TCG_COND_LEU; 969 ext_uns = true; 970 break; 971 default: 972 return do_cond(ctx, cf, d, res, NULL, sv); 973 } 974 975 if (cf & 1) { 976 tc = tcg_invert_cond(tc); 977 } 978 if (cond_need_ext(ctx, d)) { 979 TCGv_reg t1 = tcg_temp_new(); 980 TCGv_reg t2 = tcg_temp_new(); 981 982 if (ext_uns) { 983 tcg_gen_ext32u_reg(t1, in1); 984 tcg_gen_ext32u_reg(t2, in2); 985 } else { 986 tcg_gen_ext32s_reg(t1, in1); 987 tcg_gen_ext32s_reg(t2, in2); 988 } 989 return cond_make_tmp(tc, t1, t2); 990 } 991 return cond_make(tc, in1, in2); 992 } 993 994 /* 995 * Similar, but for logicals, where the carry and overflow bits are not 996 * computed, and use of them is undefined. 997 * 998 * Undefined or not, hardware does not trap. It seems reasonable to 999 * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's 1000 * how cases c={2,3} are treated. 1001 */ 1002 1003 static DisasCond do_log_cond(DisasContext *ctx, unsigned cf, bool d, 1004 TCGv_reg res) 1005 { 1006 TCGCond tc; 1007 bool ext_uns; 1008 1009 switch (cf) { 1010 case 0: /* never */ 1011 case 9: /* undef, C */ 1012 case 11: /* undef, C & !Z */ 1013 case 12: /* undef, V */ 1014 return cond_make_f(); 1015 1016 case 1: /* true */ 1017 case 8: /* undef, !C */ 1018 case 10: /* undef, !C | Z */ 1019 case 13: /* undef, !V */ 1020 return cond_make_t(); 1021 1022 case 2: /* == */ 1023 tc = TCG_COND_EQ; 1024 ext_uns = true; 1025 break; 1026 case 3: /* <> */ 1027 tc = TCG_COND_NE; 1028 ext_uns = true; 1029 break; 1030 case 4: /* < */ 1031 tc = TCG_COND_LT; 1032 ext_uns = false; 1033 break; 1034 case 5: /* >= */ 1035 tc = TCG_COND_GE; 1036 ext_uns = false; 1037 break; 1038 case 6: /* <= */ 1039 tc = TCG_COND_LE; 1040 ext_uns = false; 1041 break; 1042 case 7: /* > */ 1043 tc = TCG_COND_GT; 1044 ext_uns = false; 1045 break; 1046 1047 case 14: /* OD */ 1048 case 15: /* EV */ 1049 return do_cond(ctx, cf, d, res, NULL, NULL); 1050 1051 default: 1052 g_assert_not_reached(); 1053 } 1054 1055 if (cond_need_ext(ctx, d)) { 1056 TCGv_reg tmp = tcg_temp_new(); 1057 1058 if (ext_uns) { 1059 tcg_gen_ext32u_reg(tmp, res); 1060 } else { 1061 tcg_gen_ext32s_reg(tmp, res); 1062 } 1063 return cond_make_0_tmp(tc, tmp); 1064 } 1065 return cond_make_0(tc, res); 1066 } 1067 1068 /* Similar, but for shift/extract/deposit conditions. */ 1069 1070 static DisasCond do_sed_cond(DisasContext *ctx, unsigned orig, bool d, 1071 TCGv_reg res) 1072 { 1073 unsigned c, f; 1074 1075 /* Convert the compressed condition codes to standard. 1076 0-2 are the same as logicals (nv,<,<=), while 3 is OD. 1077 4-7 are the reverse of 0-3. */ 1078 c = orig & 3; 1079 if (c == 3) { 1080 c = 7; 1081 } 1082 f = (orig & 4) / 4; 1083 1084 return do_log_cond(ctx, c * 2 + f, d, res); 1085 } 1086 1087 /* Similar, but for unit conditions. */ 1088 1089 static DisasCond do_unit_cond(unsigned cf, bool d, TCGv_reg res, 1090 TCGv_reg in1, TCGv_reg in2) 1091 { 1092 DisasCond cond; 1093 TCGv_reg tmp, cb = NULL; 1094 target_ureg d_repl = d ? 0x0000000100000001ull : 1; 1095 1096 if (cf & 8) { 1097 /* Since we want to test lots of carry-out bits all at once, do not 1098 * do our normal thing and compute carry-in of bit B+1 since that 1099 * leaves us with carry bits spread across two words. 1100 */ 1101 cb = tcg_temp_new(); 1102 tmp = tcg_temp_new(); 1103 tcg_gen_or_reg(cb, in1, in2); 1104 tcg_gen_and_reg(tmp, in1, in2); 1105 tcg_gen_andc_reg(cb, cb, res); 1106 tcg_gen_or_reg(cb, cb, tmp); 1107 } 1108 1109 switch (cf >> 1) { 1110 case 0: /* never / TR */ 1111 case 1: /* undefined */ 1112 case 5: /* undefined */ 1113 cond = cond_make_f(); 1114 break; 1115 1116 case 2: /* SBZ / NBZ */ 1117 /* See hasless(v,1) from 1118 * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord 1119 */ 1120 tmp = tcg_temp_new(); 1121 tcg_gen_subi_reg(tmp, res, d_repl * 0x01010101u); 1122 tcg_gen_andc_reg(tmp, tmp, res); 1123 tcg_gen_andi_reg(tmp, tmp, d_repl * 0x80808080u); 1124 cond = cond_make_0(TCG_COND_NE, tmp); 1125 break; 1126 1127 case 3: /* SHZ / NHZ */ 1128 tmp = tcg_temp_new(); 1129 tcg_gen_subi_reg(tmp, res, d_repl * 0x00010001u); 1130 tcg_gen_andc_reg(tmp, tmp, res); 1131 tcg_gen_andi_reg(tmp, tmp, d_repl * 0x80008000u); 1132 cond = cond_make_0(TCG_COND_NE, tmp); 1133 break; 1134 1135 case 4: /* SDC / NDC */ 1136 tcg_gen_andi_reg(cb, cb, d_repl * 0x88888888u); 1137 cond = cond_make_0(TCG_COND_NE, cb); 1138 break; 1139 1140 case 6: /* SBC / NBC */ 1141 tcg_gen_andi_reg(cb, cb, d_repl * 0x80808080u); 1142 cond = cond_make_0(TCG_COND_NE, cb); 1143 break; 1144 1145 case 7: /* SHC / NHC */ 1146 tcg_gen_andi_reg(cb, cb, d_repl * 0x80008000u); 1147 cond = cond_make_0(TCG_COND_NE, cb); 1148 break; 1149 1150 default: 1151 g_assert_not_reached(); 1152 } 1153 if (cf & 1) { 1154 cond.c = tcg_invert_cond(cond.c); 1155 } 1156 1157 return cond; 1158 } 1159 1160 static TCGv_reg get_carry(DisasContext *ctx, bool d, 1161 TCGv_reg cb, TCGv_reg cb_msb) 1162 { 1163 if (cond_need_ext(ctx, d)) { 1164 TCGv_reg t = tcg_temp_new(); 1165 tcg_gen_extract_reg(t, cb, 32, 1); 1166 return t; 1167 } 1168 return cb_msb; 1169 } 1170 1171 static TCGv_reg get_psw_carry(DisasContext *ctx, bool d) 1172 { 1173 return get_carry(ctx, d, cpu_psw_cb, cpu_psw_cb_msb); 1174 } 1175 1176 /* Compute signed overflow for addition. */ 1177 static TCGv_reg do_add_sv(DisasContext *ctx, TCGv_reg res, 1178 TCGv_reg in1, TCGv_reg in2) 1179 { 1180 TCGv_reg sv = tcg_temp_new(); 1181 TCGv_reg tmp = tcg_temp_new(); 1182 1183 tcg_gen_xor_reg(sv, res, in1); 1184 tcg_gen_xor_reg(tmp, in1, in2); 1185 tcg_gen_andc_reg(sv, sv, tmp); 1186 1187 return sv; 1188 } 1189 1190 /* Compute signed overflow for subtraction. */ 1191 static TCGv_reg do_sub_sv(DisasContext *ctx, TCGv_reg res, 1192 TCGv_reg in1, TCGv_reg in2) 1193 { 1194 TCGv_reg sv = tcg_temp_new(); 1195 TCGv_reg tmp = tcg_temp_new(); 1196 1197 tcg_gen_xor_reg(sv, res, in1); 1198 tcg_gen_xor_reg(tmp, in1, in2); 1199 tcg_gen_and_reg(sv, sv, tmp); 1200 1201 return sv; 1202 } 1203 1204 static void do_add(DisasContext *ctx, unsigned rt, TCGv_reg in1, 1205 TCGv_reg in2, unsigned shift, bool is_l, 1206 bool is_tsv, bool is_tc, bool is_c, unsigned cf, bool d) 1207 { 1208 TCGv_reg dest, cb, cb_msb, cb_cond, sv, tmp; 1209 unsigned c = cf >> 1; 1210 DisasCond cond; 1211 1212 dest = tcg_temp_new(); 1213 cb = NULL; 1214 cb_msb = NULL; 1215 cb_cond = NULL; 1216 1217 if (shift) { 1218 tmp = tcg_temp_new(); 1219 tcg_gen_shli_reg(tmp, in1, shift); 1220 in1 = tmp; 1221 } 1222 1223 if (!is_l || cond_need_cb(c)) { 1224 TCGv_reg zero = tcg_constant_reg(0); 1225 cb_msb = tcg_temp_new(); 1226 cb = tcg_temp_new(); 1227 1228 tcg_gen_add2_reg(dest, cb_msb, in1, zero, in2, zero); 1229 if (is_c) { 1230 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, 1231 get_psw_carry(ctx, d), zero); 1232 } 1233 tcg_gen_xor_reg(cb, in1, in2); 1234 tcg_gen_xor_reg(cb, cb, dest); 1235 if (cond_need_cb(c)) { 1236 cb_cond = get_carry(ctx, d, cb, cb_msb); 1237 } 1238 } else { 1239 tcg_gen_add_reg(dest, in1, in2); 1240 if (is_c) { 1241 tcg_gen_add_reg(dest, dest, get_psw_carry(ctx, d)); 1242 } 1243 } 1244 1245 /* Compute signed overflow if required. */ 1246 sv = NULL; 1247 if (is_tsv || cond_need_sv(c)) { 1248 sv = do_add_sv(ctx, dest, in1, in2); 1249 if (is_tsv) { 1250 /* ??? Need to include overflow from shift. */ 1251 gen_helper_tsv(tcg_env, sv); 1252 } 1253 } 1254 1255 /* Emit any conditional trap before any writeback. */ 1256 cond = do_cond(ctx, cf, d, dest, cb_cond, sv); 1257 if (is_tc) { 1258 tmp = tcg_temp_new(); 1259 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1); 1260 gen_helper_tcond(tcg_env, tmp); 1261 } 1262 1263 /* Write back the result. */ 1264 if (!is_l) { 1265 save_or_nullify(ctx, cpu_psw_cb, cb); 1266 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb); 1267 } 1268 save_gpr(ctx, rt, dest); 1269 1270 /* Install the new nullification. */ 1271 cond_free(&ctx->null_cond); 1272 ctx->null_cond = cond; 1273 } 1274 1275 static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_d_sh *a, 1276 bool is_l, bool is_tsv, bool is_tc, bool is_c) 1277 { 1278 TCGv_reg tcg_r1, tcg_r2; 1279 1280 if (a->cf) { 1281 nullify_over(ctx); 1282 } 1283 tcg_r1 = load_gpr(ctx, a->r1); 1284 tcg_r2 = load_gpr(ctx, a->r2); 1285 do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l, 1286 is_tsv, is_tc, is_c, a->cf, a->d); 1287 return nullify_end(ctx); 1288 } 1289 1290 static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a, 1291 bool is_tsv, bool is_tc) 1292 { 1293 TCGv_reg tcg_im, tcg_r2; 1294 1295 if (a->cf) { 1296 nullify_over(ctx); 1297 } 1298 tcg_im = tcg_constant_reg(a->i); 1299 tcg_r2 = load_gpr(ctx, a->r); 1300 /* All ADDI conditions are 32-bit. */ 1301 do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf, false); 1302 return nullify_end(ctx); 1303 } 1304 1305 static void do_sub(DisasContext *ctx, unsigned rt, TCGv_reg in1, 1306 TCGv_reg in2, bool is_tsv, bool is_b, 1307 bool is_tc, unsigned cf, bool d) 1308 { 1309 TCGv_reg dest, sv, cb, cb_msb, zero, tmp; 1310 unsigned c = cf >> 1; 1311 DisasCond cond; 1312 1313 dest = tcg_temp_new(); 1314 cb = tcg_temp_new(); 1315 cb_msb = tcg_temp_new(); 1316 1317 zero = tcg_constant_reg(0); 1318 if (is_b) { 1319 /* DEST,C = IN1 + ~IN2 + C. */ 1320 tcg_gen_not_reg(cb, in2); 1321 tcg_gen_add2_reg(dest, cb_msb, in1, zero, get_psw_carry(ctx, d), zero); 1322 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cb, zero); 1323 tcg_gen_xor_reg(cb, cb, in1); 1324 tcg_gen_xor_reg(cb, cb, dest); 1325 } else { 1326 /* 1327 * DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer 1328 * operations by seeding the high word with 1 and subtracting. 1329 */ 1330 TCGv_reg one = tcg_constant_reg(1); 1331 tcg_gen_sub2_reg(dest, cb_msb, in1, one, in2, zero); 1332 tcg_gen_eqv_reg(cb, in1, in2); 1333 tcg_gen_xor_reg(cb, cb, dest); 1334 } 1335 1336 /* Compute signed overflow if required. */ 1337 sv = NULL; 1338 if (is_tsv || cond_need_sv(c)) { 1339 sv = do_sub_sv(ctx, dest, in1, in2); 1340 if (is_tsv) { 1341 gen_helper_tsv(tcg_env, sv); 1342 } 1343 } 1344 1345 /* Compute the condition. We cannot use the special case for borrow. */ 1346 if (!is_b) { 1347 cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv); 1348 } else { 1349 cond = do_cond(ctx, cf, d, dest, get_carry(ctx, d, cb, cb_msb), sv); 1350 } 1351 1352 /* Emit any conditional trap before any writeback. */ 1353 if (is_tc) { 1354 tmp = tcg_temp_new(); 1355 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1); 1356 gen_helper_tcond(tcg_env, tmp); 1357 } 1358 1359 /* Write back the result. */ 1360 save_or_nullify(ctx, cpu_psw_cb, cb); 1361 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb); 1362 save_gpr(ctx, rt, dest); 1363 1364 /* Install the new nullification. */ 1365 cond_free(&ctx->null_cond); 1366 ctx->null_cond = cond; 1367 } 1368 1369 static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf_d *a, 1370 bool is_tsv, bool is_b, bool is_tc) 1371 { 1372 TCGv_reg tcg_r1, tcg_r2; 1373 1374 if (a->cf) { 1375 nullify_over(ctx); 1376 } 1377 tcg_r1 = load_gpr(ctx, a->r1); 1378 tcg_r2 = load_gpr(ctx, a->r2); 1379 do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf, a->d); 1380 return nullify_end(ctx); 1381 } 1382 1383 static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv) 1384 { 1385 TCGv_reg tcg_im, tcg_r2; 1386 1387 if (a->cf) { 1388 nullify_over(ctx); 1389 } 1390 tcg_im = tcg_constant_reg(a->i); 1391 tcg_r2 = load_gpr(ctx, a->r); 1392 /* All SUBI conditions are 32-bit. */ 1393 do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf, false); 1394 return nullify_end(ctx); 1395 } 1396 1397 static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_reg in1, 1398 TCGv_reg in2, unsigned cf, bool d) 1399 { 1400 TCGv_reg dest, sv; 1401 DisasCond cond; 1402 1403 dest = tcg_temp_new(); 1404 tcg_gen_sub_reg(dest, in1, in2); 1405 1406 /* Compute signed overflow if required. */ 1407 sv = NULL; 1408 if (cond_need_sv(cf >> 1)) { 1409 sv = do_sub_sv(ctx, dest, in1, in2); 1410 } 1411 1412 /* Form the condition for the compare. */ 1413 cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv); 1414 1415 /* Clear. */ 1416 tcg_gen_movi_reg(dest, 0); 1417 save_gpr(ctx, rt, dest); 1418 1419 /* Install the new nullification. */ 1420 cond_free(&ctx->null_cond); 1421 ctx->null_cond = cond; 1422 } 1423 1424 static void do_log(DisasContext *ctx, unsigned rt, TCGv_reg in1, 1425 TCGv_reg in2, unsigned cf, bool d, 1426 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg)) 1427 { 1428 TCGv_reg dest = dest_gpr(ctx, rt); 1429 1430 /* Perform the operation, and writeback. */ 1431 fn(dest, in1, in2); 1432 save_gpr(ctx, rt, dest); 1433 1434 /* Install the new nullification. */ 1435 cond_free(&ctx->null_cond); 1436 if (cf) { 1437 ctx->null_cond = do_log_cond(ctx, cf, d, dest); 1438 } 1439 } 1440 1441 static bool do_log_reg(DisasContext *ctx, arg_rrr_cf_d *a, 1442 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg)) 1443 { 1444 TCGv_reg tcg_r1, tcg_r2; 1445 1446 if (a->cf) { 1447 nullify_over(ctx); 1448 } 1449 tcg_r1 = load_gpr(ctx, a->r1); 1450 tcg_r2 = load_gpr(ctx, a->r2); 1451 do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d, fn); 1452 return nullify_end(ctx); 1453 } 1454 1455 static void do_unit(DisasContext *ctx, unsigned rt, TCGv_reg in1, 1456 TCGv_reg in2, unsigned cf, bool d, bool is_tc, 1457 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg)) 1458 { 1459 TCGv_reg dest; 1460 DisasCond cond; 1461 1462 if (cf == 0) { 1463 dest = dest_gpr(ctx, rt); 1464 fn(dest, in1, in2); 1465 save_gpr(ctx, rt, dest); 1466 cond_free(&ctx->null_cond); 1467 } else { 1468 dest = tcg_temp_new(); 1469 fn(dest, in1, in2); 1470 1471 cond = do_unit_cond(cf, d, dest, in1, in2); 1472 1473 if (is_tc) { 1474 TCGv_reg tmp = tcg_temp_new(); 1475 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1); 1476 gen_helper_tcond(tcg_env, tmp); 1477 } 1478 save_gpr(ctx, rt, dest); 1479 1480 cond_free(&ctx->null_cond); 1481 ctx->null_cond = cond; 1482 } 1483 } 1484 1485 #ifndef CONFIG_USER_ONLY 1486 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space 1487 from the top 2 bits of the base register. There are a few system 1488 instructions that have a 3-bit space specifier, for which SR0 is 1489 not special. To handle this, pass ~SP. */ 1490 static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_reg base) 1491 { 1492 TCGv_ptr ptr; 1493 TCGv_reg tmp; 1494 TCGv_i64 spc; 1495 1496 if (sp != 0) { 1497 if (sp < 0) { 1498 sp = ~sp; 1499 } 1500 spc = tcg_temp_new_tl(); 1501 load_spr(ctx, spc, sp); 1502 return spc; 1503 } 1504 if (ctx->tb_flags & TB_FLAG_SR_SAME) { 1505 return cpu_srH; 1506 } 1507 1508 ptr = tcg_temp_new_ptr(); 1509 tmp = tcg_temp_new(); 1510 spc = tcg_temp_new_tl(); 1511 1512 /* Extract top 2 bits of the address, shift left 3 for uint64_t index. */ 1513 tcg_gen_shri_reg(tmp, base, (ctx->tb_flags & PSW_W ? 64 : 32) - 5); 1514 tcg_gen_andi_reg(tmp, tmp, 030); 1515 tcg_gen_trunc_reg_ptr(ptr, tmp); 1516 1517 tcg_gen_add_ptr(ptr, ptr, tcg_env); 1518 tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4])); 1519 1520 return spc; 1521 } 1522 #endif 1523 1524 static void form_gva(DisasContext *ctx, TCGv_tl *pgva, TCGv_reg *pofs, 1525 unsigned rb, unsigned rx, int scale, target_sreg disp, 1526 unsigned sp, int modify, bool is_phys) 1527 { 1528 TCGv_reg base = load_gpr(ctx, rb); 1529 TCGv_reg ofs; 1530 TCGv_tl addr; 1531 1532 /* Note that RX is mutually exclusive with DISP. */ 1533 if (rx) { 1534 ofs = tcg_temp_new(); 1535 tcg_gen_shli_reg(ofs, cpu_gr[rx], scale); 1536 tcg_gen_add_reg(ofs, ofs, base); 1537 } else if (disp || modify) { 1538 ofs = tcg_temp_new(); 1539 tcg_gen_addi_reg(ofs, base, disp); 1540 } else { 1541 ofs = base; 1542 } 1543 1544 *pofs = ofs; 1545 *pgva = addr = tcg_temp_new_tl(); 1546 tcg_gen_extu_reg_tl(addr, modify <= 0 ? ofs : base); 1547 tcg_gen_andi_tl(addr, addr, gva_offset_mask(ctx)); 1548 #ifndef CONFIG_USER_ONLY 1549 if (!is_phys) { 1550 tcg_gen_or_tl(addr, addr, space_select(ctx, sp, base)); 1551 } 1552 #endif 1553 } 1554 1555 /* Emit a memory load. The modify parameter should be 1556 * < 0 for pre-modify, 1557 * > 0 for post-modify, 1558 * = 0 for no base register update. 1559 */ 1560 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb, 1561 unsigned rx, int scale, target_sreg disp, 1562 unsigned sp, int modify, MemOp mop) 1563 { 1564 TCGv_reg ofs; 1565 TCGv_tl addr; 1566 1567 /* Caller uses nullify_over/nullify_end. */ 1568 assert(ctx->null_cond.c == TCG_COND_NEVER); 1569 1570 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify, 1571 ctx->mmu_idx == MMU_PHYS_IDX); 1572 tcg_gen_qemu_ld_i32(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx)); 1573 if (modify) { 1574 save_gpr(ctx, rb, ofs); 1575 } 1576 } 1577 1578 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb, 1579 unsigned rx, int scale, target_sreg disp, 1580 unsigned sp, int modify, MemOp mop) 1581 { 1582 TCGv_reg ofs; 1583 TCGv_tl addr; 1584 1585 /* Caller uses nullify_over/nullify_end. */ 1586 assert(ctx->null_cond.c == TCG_COND_NEVER); 1587 1588 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify, 1589 ctx->mmu_idx == MMU_PHYS_IDX); 1590 tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx)); 1591 if (modify) { 1592 save_gpr(ctx, rb, ofs); 1593 } 1594 } 1595 1596 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb, 1597 unsigned rx, int scale, target_sreg disp, 1598 unsigned sp, int modify, MemOp mop) 1599 { 1600 TCGv_reg ofs; 1601 TCGv_tl addr; 1602 1603 /* Caller uses nullify_over/nullify_end. */ 1604 assert(ctx->null_cond.c == TCG_COND_NEVER); 1605 1606 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify, 1607 ctx->mmu_idx == MMU_PHYS_IDX); 1608 tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx)); 1609 if (modify) { 1610 save_gpr(ctx, rb, ofs); 1611 } 1612 } 1613 1614 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb, 1615 unsigned rx, int scale, target_sreg disp, 1616 unsigned sp, int modify, MemOp mop) 1617 { 1618 TCGv_reg ofs; 1619 TCGv_tl addr; 1620 1621 /* Caller uses nullify_over/nullify_end. */ 1622 assert(ctx->null_cond.c == TCG_COND_NEVER); 1623 1624 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify, 1625 ctx->mmu_idx == MMU_PHYS_IDX); 1626 tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx)); 1627 if (modify) { 1628 save_gpr(ctx, rb, ofs); 1629 } 1630 } 1631 1632 #if TARGET_REGISTER_BITS == 64 1633 #define do_load_reg do_load_64 1634 #define do_store_reg do_store_64 1635 #else 1636 #define do_load_reg do_load_32 1637 #define do_store_reg do_store_32 1638 #endif 1639 1640 static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb, 1641 unsigned rx, int scale, target_sreg disp, 1642 unsigned sp, int modify, MemOp mop) 1643 { 1644 TCGv_reg dest; 1645 1646 nullify_over(ctx); 1647 1648 if (modify == 0) { 1649 /* No base register update. */ 1650 dest = dest_gpr(ctx, rt); 1651 } else { 1652 /* Make sure if RT == RB, we see the result of the load. */ 1653 dest = tcg_temp_new(); 1654 } 1655 do_load_reg(ctx, dest, rb, rx, scale, disp, sp, modify, mop); 1656 save_gpr(ctx, rt, dest); 1657 1658 return nullify_end(ctx); 1659 } 1660 1661 static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb, 1662 unsigned rx, int scale, target_sreg disp, 1663 unsigned sp, int modify) 1664 { 1665 TCGv_i32 tmp; 1666 1667 nullify_over(ctx); 1668 1669 tmp = tcg_temp_new_i32(); 1670 do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL); 1671 save_frw_i32(rt, tmp); 1672 1673 if (rt == 0) { 1674 gen_helper_loaded_fr0(tcg_env); 1675 } 1676 1677 return nullify_end(ctx); 1678 } 1679 1680 static bool trans_fldw(DisasContext *ctx, arg_ldst *a) 1681 { 1682 return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0, 1683 a->disp, a->sp, a->m); 1684 } 1685 1686 static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb, 1687 unsigned rx, int scale, target_sreg disp, 1688 unsigned sp, int modify) 1689 { 1690 TCGv_i64 tmp; 1691 1692 nullify_over(ctx); 1693 1694 tmp = tcg_temp_new_i64(); 1695 do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ); 1696 save_frd(rt, tmp); 1697 1698 if (rt == 0) { 1699 gen_helper_loaded_fr0(tcg_env); 1700 } 1701 1702 return nullify_end(ctx); 1703 } 1704 1705 static bool trans_fldd(DisasContext *ctx, arg_ldst *a) 1706 { 1707 return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0, 1708 a->disp, a->sp, a->m); 1709 } 1710 1711 static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb, 1712 target_sreg disp, unsigned sp, 1713 int modify, MemOp mop) 1714 { 1715 nullify_over(ctx); 1716 do_store_reg(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop); 1717 return nullify_end(ctx); 1718 } 1719 1720 static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb, 1721 unsigned rx, int scale, target_sreg disp, 1722 unsigned sp, int modify) 1723 { 1724 TCGv_i32 tmp; 1725 1726 nullify_over(ctx); 1727 1728 tmp = load_frw_i32(rt); 1729 do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL); 1730 1731 return nullify_end(ctx); 1732 } 1733 1734 static bool trans_fstw(DisasContext *ctx, arg_ldst *a) 1735 { 1736 return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0, 1737 a->disp, a->sp, a->m); 1738 } 1739 1740 static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb, 1741 unsigned rx, int scale, target_sreg disp, 1742 unsigned sp, int modify) 1743 { 1744 TCGv_i64 tmp; 1745 1746 nullify_over(ctx); 1747 1748 tmp = load_frd(rt); 1749 do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ); 1750 1751 return nullify_end(ctx); 1752 } 1753 1754 static bool trans_fstd(DisasContext *ctx, arg_ldst *a) 1755 { 1756 return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0, 1757 a->disp, a->sp, a->m); 1758 } 1759 1760 static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra, 1761 void (*func)(TCGv_i32, TCGv_env, TCGv_i32)) 1762 { 1763 TCGv_i32 tmp; 1764 1765 nullify_over(ctx); 1766 tmp = load_frw0_i32(ra); 1767 1768 func(tmp, tcg_env, tmp); 1769 1770 save_frw_i32(rt, tmp); 1771 return nullify_end(ctx); 1772 } 1773 1774 static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra, 1775 void (*func)(TCGv_i32, TCGv_env, TCGv_i64)) 1776 { 1777 TCGv_i32 dst; 1778 TCGv_i64 src; 1779 1780 nullify_over(ctx); 1781 src = load_frd(ra); 1782 dst = tcg_temp_new_i32(); 1783 1784 func(dst, tcg_env, src); 1785 1786 save_frw_i32(rt, dst); 1787 return nullify_end(ctx); 1788 } 1789 1790 static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra, 1791 void (*func)(TCGv_i64, TCGv_env, TCGv_i64)) 1792 { 1793 TCGv_i64 tmp; 1794 1795 nullify_over(ctx); 1796 tmp = load_frd0(ra); 1797 1798 func(tmp, tcg_env, tmp); 1799 1800 save_frd(rt, tmp); 1801 return nullify_end(ctx); 1802 } 1803 1804 static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra, 1805 void (*func)(TCGv_i64, TCGv_env, TCGv_i32)) 1806 { 1807 TCGv_i32 src; 1808 TCGv_i64 dst; 1809 1810 nullify_over(ctx); 1811 src = load_frw0_i32(ra); 1812 dst = tcg_temp_new_i64(); 1813 1814 func(dst, tcg_env, src); 1815 1816 save_frd(rt, dst); 1817 return nullify_end(ctx); 1818 } 1819 1820 static bool do_fop_weww(DisasContext *ctx, unsigned rt, 1821 unsigned ra, unsigned rb, 1822 void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32)) 1823 { 1824 TCGv_i32 a, b; 1825 1826 nullify_over(ctx); 1827 a = load_frw0_i32(ra); 1828 b = load_frw0_i32(rb); 1829 1830 func(a, tcg_env, a, b); 1831 1832 save_frw_i32(rt, a); 1833 return nullify_end(ctx); 1834 } 1835 1836 static bool do_fop_dedd(DisasContext *ctx, unsigned rt, 1837 unsigned ra, unsigned rb, 1838 void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64)) 1839 { 1840 TCGv_i64 a, b; 1841 1842 nullify_over(ctx); 1843 a = load_frd0(ra); 1844 b = load_frd0(rb); 1845 1846 func(a, tcg_env, a, b); 1847 1848 save_frd(rt, a); 1849 return nullify_end(ctx); 1850 } 1851 1852 /* Emit an unconditional branch to a direct target, which may or may not 1853 have already had nullification handled. */ 1854 static bool do_dbranch(DisasContext *ctx, target_ureg dest, 1855 unsigned link, bool is_n) 1856 { 1857 if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) { 1858 if (link != 0) { 1859 copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var); 1860 } 1861 ctx->iaoq_n = dest; 1862 if (is_n) { 1863 ctx->null_cond.c = TCG_COND_ALWAYS; 1864 } 1865 } else { 1866 nullify_over(ctx); 1867 1868 if (link != 0) { 1869 copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var); 1870 } 1871 1872 if (is_n && use_nullify_skip(ctx)) { 1873 nullify_set(ctx, 0); 1874 gen_goto_tb(ctx, 0, dest, dest + 4); 1875 } else { 1876 nullify_set(ctx, is_n); 1877 gen_goto_tb(ctx, 0, ctx->iaoq_b, dest); 1878 } 1879 1880 nullify_end(ctx); 1881 1882 nullify_set(ctx, 0); 1883 gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n); 1884 ctx->base.is_jmp = DISAS_NORETURN; 1885 } 1886 return true; 1887 } 1888 1889 /* Emit a conditional branch to a direct target. If the branch itself 1890 is nullified, we should have already used nullify_over. */ 1891 static bool do_cbranch(DisasContext *ctx, target_sreg disp, bool is_n, 1892 DisasCond *cond) 1893 { 1894 target_ureg dest = iaoq_dest(ctx, disp); 1895 TCGLabel *taken = NULL; 1896 TCGCond c = cond->c; 1897 bool n; 1898 1899 assert(ctx->null_cond.c == TCG_COND_NEVER); 1900 1901 /* Handle TRUE and NEVER as direct branches. */ 1902 if (c == TCG_COND_ALWAYS) { 1903 return do_dbranch(ctx, dest, 0, is_n && disp >= 0); 1904 } 1905 if (c == TCG_COND_NEVER) { 1906 return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0); 1907 } 1908 1909 taken = gen_new_label(); 1910 tcg_gen_brcond_reg(c, cond->a0, cond->a1, taken); 1911 cond_free(cond); 1912 1913 /* Not taken: Condition not satisfied; nullify on backward branches. */ 1914 n = is_n && disp < 0; 1915 if (n && use_nullify_skip(ctx)) { 1916 nullify_set(ctx, 0); 1917 gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4); 1918 } else { 1919 if (!n && ctx->null_lab) { 1920 gen_set_label(ctx->null_lab); 1921 ctx->null_lab = NULL; 1922 } 1923 nullify_set(ctx, n); 1924 if (ctx->iaoq_n == -1) { 1925 /* The temporary iaoq_n_var died at the branch above. 1926 Regenerate it here instead of saving it. */ 1927 tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4); 1928 } 1929 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n); 1930 } 1931 1932 gen_set_label(taken); 1933 1934 /* Taken: Condition satisfied; nullify on forward branches. */ 1935 n = is_n && disp >= 0; 1936 if (n && use_nullify_skip(ctx)) { 1937 nullify_set(ctx, 0); 1938 gen_goto_tb(ctx, 1, dest, dest + 4); 1939 } else { 1940 nullify_set(ctx, n); 1941 gen_goto_tb(ctx, 1, ctx->iaoq_b, dest); 1942 } 1943 1944 /* Not taken: the branch itself was nullified. */ 1945 if (ctx->null_lab) { 1946 gen_set_label(ctx->null_lab); 1947 ctx->null_lab = NULL; 1948 ctx->base.is_jmp = DISAS_IAQ_N_STALE; 1949 } else { 1950 ctx->base.is_jmp = DISAS_NORETURN; 1951 } 1952 return true; 1953 } 1954 1955 /* Emit an unconditional branch to an indirect target. This handles 1956 nullification of the branch itself. */ 1957 static bool do_ibranch(DisasContext *ctx, TCGv_reg dest, 1958 unsigned link, bool is_n) 1959 { 1960 TCGv_reg a0, a1, next, tmp; 1961 TCGCond c; 1962 1963 assert(ctx->null_lab == NULL); 1964 1965 if (ctx->null_cond.c == TCG_COND_NEVER) { 1966 if (link != 0) { 1967 copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var); 1968 } 1969 next = tcg_temp_new(); 1970 tcg_gen_mov_reg(next, dest); 1971 if (is_n) { 1972 if (use_nullify_skip(ctx)) { 1973 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, next); 1974 tcg_gen_addi_reg(next, next, 4); 1975 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, next); 1976 nullify_set(ctx, 0); 1977 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED; 1978 return true; 1979 } 1980 ctx->null_cond.c = TCG_COND_ALWAYS; 1981 } 1982 ctx->iaoq_n = -1; 1983 ctx->iaoq_n_var = next; 1984 } else if (is_n && use_nullify_skip(ctx)) { 1985 /* The (conditional) branch, B, nullifies the next insn, N, 1986 and we're allowed to skip execution N (no single-step or 1987 tracepoint in effect). Since the goto_ptr that we must use 1988 for the indirect branch consumes no special resources, we 1989 can (conditionally) skip B and continue execution. */ 1990 /* The use_nullify_skip test implies we have a known control path. */ 1991 tcg_debug_assert(ctx->iaoq_b != -1); 1992 tcg_debug_assert(ctx->iaoq_n != -1); 1993 1994 /* We do have to handle the non-local temporary, DEST, before 1995 branching. Since IOAQ_F is not really live at this point, we 1996 can simply store DEST optimistically. Similarly with IAOQ_B. */ 1997 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, dest); 1998 next = tcg_temp_new(); 1999 tcg_gen_addi_reg(next, dest, 4); 2000 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, next); 2001 2002 nullify_over(ctx); 2003 if (link != 0) { 2004 copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var); 2005 } 2006 tcg_gen_lookup_and_goto_ptr(); 2007 return nullify_end(ctx); 2008 } else { 2009 c = ctx->null_cond.c; 2010 a0 = ctx->null_cond.a0; 2011 a1 = ctx->null_cond.a1; 2012 2013 tmp = tcg_temp_new(); 2014 next = tcg_temp_new(); 2015 2016 copy_iaoq_entry(ctx, tmp, ctx->iaoq_n, ctx->iaoq_n_var); 2017 tcg_gen_movcond_reg(c, next, a0, a1, tmp, dest); 2018 ctx->iaoq_n = -1; 2019 ctx->iaoq_n_var = next; 2020 2021 if (link != 0) { 2022 tcg_gen_movcond_reg(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp); 2023 } 2024 2025 if (is_n) { 2026 /* The branch nullifies the next insn, which means the state of N 2027 after the branch is the inverse of the state of N that applied 2028 to the branch. */ 2029 tcg_gen_setcond_reg(tcg_invert_cond(c), cpu_psw_n, a0, a1); 2030 cond_free(&ctx->null_cond); 2031 ctx->null_cond = cond_make_n(); 2032 ctx->psw_n_nonzero = true; 2033 } else { 2034 cond_free(&ctx->null_cond); 2035 } 2036 } 2037 return true; 2038 } 2039 2040 /* Implement 2041 * if (IAOQ_Front{30..31} < GR[b]{30..31}) 2042 * IAOQ_Next{30..31} ← GR[b]{30..31}; 2043 * else 2044 * IAOQ_Next{30..31} ← IAOQ_Front{30..31}; 2045 * which keeps the privilege level from being increased. 2046 */ 2047 static TCGv_reg do_ibranch_priv(DisasContext *ctx, TCGv_reg offset) 2048 { 2049 TCGv_reg dest; 2050 switch (ctx->privilege) { 2051 case 0: 2052 /* Privilege 0 is maximum and is allowed to decrease. */ 2053 return offset; 2054 case 3: 2055 /* Privilege 3 is minimum and is never allowed to increase. */ 2056 dest = tcg_temp_new(); 2057 tcg_gen_ori_reg(dest, offset, 3); 2058 break; 2059 default: 2060 dest = tcg_temp_new(); 2061 tcg_gen_andi_reg(dest, offset, -4); 2062 tcg_gen_ori_reg(dest, dest, ctx->privilege); 2063 tcg_gen_movcond_reg(TCG_COND_GTU, dest, dest, offset, dest, offset); 2064 break; 2065 } 2066 return dest; 2067 } 2068 2069 #ifdef CONFIG_USER_ONLY 2070 /* On Linux, page zero is normally marked execute only + gateway. 2071 Therefore normal read or write is supposed to fail, but specific 2072 offsets have kernel code mapped to raise permissions to implement 2073 system calls. Handling this via an explicit check here, rather 2074 in than the "be disp(sr2,r0)" instruction that probably sent us 2075 here, is the easiest way to handle the branch delay slot on the 2076 aforementioned BE. */ 2077 static void do_page_zero(DisasContext *ctx) 2078 { 2079 TCGv_reg tmp; 2080 2081 /* If by some means we get here with PSW[N]=1, that implies that 2082 the B,GATE instruction would be skipped, and we'd fault on the 2083 next insn within the privileged page. */ 2084 switch (ctx->null_cond.c) { 2085 case TCG_COND_NEVER: 2086 break; 2087 case TCG_COND_ALWAYS: 2088 tcg_gen_movi_reg(cpu_psw_n, 0); 2089 goto do_sigill; 2090 default: 2091 /* Since this is always the first (and only) insn within the 2092 TB, we should know the state of PSW[N] from TB->FLAGS. */ 2093 g_assert_not_reached(); 2094 } 2095 2096 /* Check that we didn't arrive here via some means that allowed 2097 non-sequential instruction execution. Normally the PSW[B] bit 2098 detects this by disallowing the B,GATE instruction to execute 2099 under such conditions. */ 2100 if (ctx->iaoq_b != ctx->iaoq_f + 4) { 2101 goto do_sigill; 2102 } 2103 2104 switch (ctx->iaoq_f & -4) { 2105 case 0x00: /* Null pointer call */ 2106 gen_excp_1(EXCP_IMP); 2107 ctx->base.is_jmp = DISAS_NORETURN; 2108 break; 2109 2110 case 0xb0: /* LWS */ 2111 gen_excp_1(EXCP_SYSCALL_LWS); 2112 ctx->base.is_jmp = DISAS_NORETURN; 2113 break; 2114 2115 case 0xe0: /* SET_THREAD_POINTER */ 2116 tcg_gen_st_reg(cpu_gr[26], tcg_env, offsetof(CPUHPPAState, cr[27])); 2117 tmp = tcg_temp_new(); 2118 tcg_gen_ori_reg(tmp, cpu_gr[31], 3); 2119 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, tmp); 2120 tcg_gen_addi_reg(tmp, tmp, 4); 2121 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp); 2122 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED; 2123 break; 2124 2125 case 0x100: /* SYSCALL */ 2126 gen_excp_1(EXCP_SYSCALL); 2127 ctx->base.is_jmp = DISAS_NORETURN; 2128 break; 2129 2130 default: 2131 do_sigill: 2132 gen_excp_1(EXCP_ILL); 2133 ctx->base.is_jmp = DISAS_NORETURN; 2134 break; 2135 } 2136 } 2137 #endif 2138 2139 static bool trans_nop(DisasContext *ctx, arg_nop *a) 2140 { 2141 cond_free(&ctx->null_cond); 2142 return true; 2143 } 2144 2145 static bool trans_break(DisasContext *ctx, arg_break *a) 2146 { 2147 return gen_excp_iir(ctx, EXCP_BREAK); 2148 } 2149 2150 static bool trans_sync(DisasContext *ctx, arg_sync *a) 2151 { 2152 /* No point in nullifying the memory barrier. */ 2153 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL); 2154 2155 cond_free(&ctx->null_cond); 2156 return true; 2157 } 2158 2159 static bool trans_mfia(DisasContext *ctx, arg_mfia *a) 2160 { 2161 unsigned rt = a->t; 2162 TCGv_reg tmp = dest_gpr(ctx, rt); 2163 tcg_gen_movi_reg(tmp, ctx->iaoq_f); 2164 save_gpr(ctx, rt, tmp); 2165 2166 cond_free(&ctx->null_cond); 2167 return true; 2168 } 2169 2170 static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a) 2171 { 2172 unsigned rt = a->t; 2173 unsigned rs = a->sp; 2174 TCGv_i64 t0 = tcg_temp_new_i64(); 2175 TCGv_reg t1 = tcg_temp_new(); 2176 2177 load_spr(ctx, t0, rs); 2178 tcg_gen_shri_i64(t0, t0, 32); 2179 tcg_gen_trunc_i64_reg(t1, t0); 2180 2181 save_gpr(ctx, rt, t1); 2182 2183 cond_free(&ctx->null_cond); 2184 return true; 2185 } 2186 2187 static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a) 2188 { 2189 unsigned rt = a->t; 2190 unsigned ctl = a->r; 2191 TCGv_reg tmp; 2192 2193 switch (ctl) { 2194 case CR_SAR: 2195 if (a->e == 0) { 2196 /* MFSAR without ,W masks low 5 bits. */ 2197 tmp = dest_gpr(ctx, rt); 2198 tcg_gen_andi_reg(tmp, cpu_sar, 31); 2199 save_gpr(ctx, rt, tmp); 2200 goto done; 2201 } 2202 save_gpr(ctx, rt, cpu_sar); 2203 goto done; 2204 case CR_IT: /* Interval Timer */ 2205 /* FIXME: Respect PSW_S bit. */ 2206 nullify_over(ctx); 2207 tmp = dest_gpr(ctx, rt); 2208 if (translator_io_start(&ctx->base)) { 2209 gen_helper_read_interval_timer(tmp); 2210 ctx->base.is_jmp = DISAS_IAQ_N_STALE; 2211 } else { 2212 gen_helper_read_interval_timer(tmp); 2213 } 2214 save_gpr(ctx, rt, tmp); 2215 return nullify_end(ctx); 2216 case 26: 2217 case 27: 2218 break; 2219 default: 2220 /* All other control registers are privileged. */ 2221 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG); 2222 break; 2223 } 2224 2225 tmp = tcg_temp_new(); 2226 tcg_gen_ld_reg(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl])); 2227 save_gpr(ctx, rt, tmp); 2228 2229 done: 2230 cond_free(&ctx->null_cond); 2231 return true; 2232 } 2233 2234 static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a) 2235 { 2236 unsigned rr = a->r; 2237 unsigned rs = a->sp; 2238 TCGv_i64 t64; 2239 2240 if (rs >= 5) { 2241 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG); 2242 } 2243 nullify_over(ctx); 2244 2245 t64 = tcg_temp_new_i64(); 2246 tcg_gen_extu_reg_i64(t64, load_gpr(ctx, rr)); 2247 tcg_gen_shli_i64(t64, t64, 32); 2248 2249 if (rs >= 4) { 2250 tcg_gen_st_i64(t64, tcg_env, offsetof(CPUHPPAState, sr[rs])); 2251 ctx->tb_flags &= ~TB_FLAG_SR_SAME; 2252 } else { 2253 tcg_gen_mov_i64(cpu_sr[rs], t64); 2254 } 2255 2256 return nullify_end(ctx); 2257 } 2258 2259 static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a) 2260 { 2261 unsigned ctl = a->t; 2262 TCGv_reg reg; 2263 TCGv_reg tmp; 2264 2265 if (ctl == CR_SAR) { 2266 reg = load_gpr(ctx, a->r); 2267 tmp = tcg_temp_new(); 2268 tcg_gen_andi_reg(tmp, reg, ctx->is_pa20 ? 63 : 31); 2269 save_or_nullify(ctx, cpu_sar, tmp); 2270 2271 cond_free(&ctx->null_cond); 2272 return true; 2273 } 2274 2275 /* All other control registers are privileged or read-only. */ 2276 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG); 2277 2278 #ifndef CONFIG_USER_ONLY 2279 nullify_over(ctx); 2280 reg = load_gpr(ctx, a->r); 2281 2282 switch (ctl) { 2283 case CR_IT: 2284 gen_helper_write_interval_timer(tcg_env, reg); 2285 break; 2286 case CR_EIRR: 2287 gen_helper_write_eirr(tcg_env, reg); 2288 break; 2289 case CR_EIEM: 2290 gen_helper_write_eiem(tcg_env, reg); 2291 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT; 2292 break; 2293 2294 case CR_IIASQ: 2295 case CR_IIAOQ: 2296 /* FIXME: Respect PSW_Q bit */ 2297 /* The write advances the queue and stores to the back element. */ 2298 tmp = tcg_temp_new(); 2299 tcg_gen_ld_reg(tmp, tcg_env, 2300 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ])); 2301 tcg_gen_st_reg(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl])); 2302 tcg_gen_st_reg(reg, tcg_env, 2303 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ])); 2304 break; 2305 2306 case CR_PID1: 2307 case CR_PID2: 2308 case CR_PID3: 2309 case CR_PID4: 2310 tcg_gen_st_reg(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl])); 2311 #ifndef CONFIG_USER_ONLY 2312 gen_helper_change_prot_id(tcg_env); 2313 #endif 2314 break; 2315 2316 default: 2317 tcg_gen_st_reg(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl])); 2318 break; 2319 } 2320 return nullify_end(ctx); 2321 #endif 2322 } 2323 2324 static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a) 2325 { 2326 TCGv_reg tmp = tcg_temp_new(); 2327 2328 tcg_gen_not_reg(tmp, load_gpr(ctx, a->r)); 2329 tcg_gen_andi_reg(tmp, tmp, ctx->is_pa20 ? 63 : 31); 2330 save_or_nullify(ctx, cpu_sar, tmp); 2331 2332 cond_free(&ctx->null_cond); 2333 return true; 2334 } 2335 2336 static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a) 2337 { 2338 TCGv_reg dest = dest_gpr(ctx, a->t); 2339 2340 #ifdef CONFIG_USER_ONLY 2341 /* We don't implement space registers in user mode. */ 2342 tcg_gen_movi_reg(dest, 0); 2343 #else 2344 TCGv_i64 t0 = tcg_temp_new_i64(); 2345 2346 tcg_gen_mov_i64(t0, space_select(ctx, a->sp, load_gpr(ctx, a->b))); 2347 tcg_gen_shri_i64(t0, t0, 32); 2348 tcg_gen_trunc_i64_reg(dest, t0); 2349 #endif 2350 save_gpr(ctx, a->t, dest); 2351 2352 cond_free(&ctx->null_cond); 2353 return true; 2354 } 2355 2356 static bool trans_rsm(DisasContext *ctx, arg_rsm *a) 2357 { 2358 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2359 #ifndef CONFIG_USER_ONLY 2360 TCGv_reg tmp; 2361 2362 nullify_over(ctx); 2363 2364 tmp = tcg_temp_new(); 2365 tcg_gen_ld_reg(tmp, tcg_env, offsetof(CPUHPPAState, psw)); 2366 tcg_gen_andi_reg(tmp, tmp, ~a->i); 2367 gen_helper_swap_system_mask(tmp, tcg_env, tmp); 2368 save_gpr(ctx, a->t, tmp); 2369 2370 /* Exit the TB to recognize new interrupts, e.g. PSW_M. */ 2371 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT; 2372 return nullify_end(ctx); 2373 #endif 2374 } 2375 2376 static bool trans_ssm(DisasContext *ctx, arg_ssm *a) 2377 { 2378 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2379 #ifndef CONFIG_USER_ONLY 2380 TCGv_reg tmp; 2381 2382 nullify_over(ctx); 2383 2384 tmp = tcg_temp_new(); 2385 tcg_gen_ld_reg(tmp, tcg_env, offsetof(CPUHPPAState, psw)); 2386 tcg_gen_ori_reg(tmp, tmp, a->i); 2387 gen_helper_swap_system_mask(tmp, tcg_env, tmp); 2388 save_gpr(ctx, a->t, tmp); 2389 2390 /* Exit the TB to recognize new interrupts, e.g. PSW_I. */ 2391 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT; 2392 return nullify_end(ctx); 2393 #endif 2394 } 2395 2396 static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a) 2397 { 2398 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2399 #ifndef CONFIG_USER_ONLY 2400 TCGv_reg tmp, reg; 2401 nullify_over(ctx); 2402 2403 reg = load_gpr(ctx, a->r); 2404 tmp = tcg_temp_new(); 2405 gen_helper_swap_system_mask(tmp, tcg_env, reg); 2406 2407 /* Exit the TB to recognize new interrupts. */ 2408 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT; 2409 return nullify_end(ctx); 2410 #endif 2411 } 2412 2413 static bool do_rfi(DisasContext *ctx, bool rfi_r) 2414 { 2415 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2416 #ifndef CONFIG_USER_ONLY 2417 nullify_over(ctx); 2418 2419 if (rfi_r) { 2420 gen_helper_rfi_r(tcg_env); 2421 } else { 2422 gen_helper_rfi(tcg_env); 2423 } 2424 /* Exit the TB to recognize new interrupts. */ 2425 tcg_gen_exit_tb(NULL, 0); 2426 ctx->base.is_jmp = DISAS_NORETURN; 2427 2428 return nullify_end(ctx); 2429 #endif 2430 } 2431 2432 static bool trans_rfi(DisasContext *ctx, arg_rfi *a) 2433 { 2434 return do_rfi(ctx, false); 2435 } 2436 2437 static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a) 2438 { 2439 return do_rfi(ctx, true); 2440 } 2441 2442 static bool trans_halt(DisasContext *ctx, arg_halt *a) 2443 { 2444 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2445 #ifndef CONFIG_USER_ONLY 2446 nullify_over(ctx); 2447 gen_helper_halt(tcg_env); 2448 ctx->base.is_jmp = DISAS_NORETURN; 2449 return nullify_end(ctx); 2450 #endif 2451 } 2452 2453 static bool trans_reset(DisasContext *ctx, arg_reset *a) 2454 { 2455 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2456 #ifndef CONFIG_USER_ONLY 2457 nullify_over(ctx); 2458 gen_helper_reset(tcg_env); 2459 ctx->base.is_jmp = DISAS_NORETURN; 2460 return nullify_end(ctx); 2461 #endif 2462 } 2463 2464 static bool trans_getshadowregs(DisasContext *ctx, arg_getshadowregs *a) 2465 { 2466 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2467 #ifndef CONFIG_USER_ONLY 2468 nullify_over(ctx); 2469 gen_helper_getshadowregs(tcg_env); 2470 return nullify_end(ctx); 2471 #endif 2472 } 2473 2474 static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a) 2475 { 2476 if (a->m) { 2477 TCGv_reg dest = dest_gpr(ctx, a->b); 2478 TCGv_reg src1 = load_gpr(ctx, a->b); 2479 TCGv_reg src2 = load_gpr(ctx, a->x); 2480 2481 /* The only thing we need to do is the base register modification. */ 2482 tcg_gen_add_reg(dest, src1, src2); 2483 save_gpr(ctx, a->b, dest); 2484 } 2485 cond_free(&ctx->null_cond); 2486 return true; 2487 } 2488 2489 static bool trans_probe(DisasContext *ctx, arg_probe *a) 2490 { 2491 TCGv_reg dest, ofs; 2492 TCGv_i32 level, want; 2493 TCGv_tl addr; 2494 2495 nullify_over(ctx); 2496 2497 dest = dest_gpr(ctx, a->t); 2498 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false); 2499 2500 if (a->imm) { 2501 level = tcg_constant_i32(a->ri); 2502 } else { 2503 level = tcg_temp_new_i32(); 2504 tcg_gen_trunc_reg_i32(level, load_gpr(ctx, a->ri)); 2505 tcg_gen_andi_i32(level, level, 3); 2506 } 2507 want = tcg_constant_i32(a->write ? PAGE_WRITE : PAGE_READ); 2508 2509 gen_helper_probe(dest, tcg_env, addr, level, want); 2510 2511 save_gpr(ctx, a->t, dest); 2512 return nullify_end(ctx); 2513 } 2514 2515 static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a) 2516 { 2517 if (ctx->is_pa20) { 2518 return false; 2519 } 2520 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2521 #ifndef CONFIG_USER_ONLY 2522 TCGv_tl addr; 2523 TCGv_reg ofs, reg; 2524 2525 nullify_over(ctx); 2526 2527 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false); 2528 reg = load_gpr(ctx, a->r); 2529 if (a->addr) { 2530 gen_helper_itlba_pa11(tcg_env, addr, reg); 2531 } else { 2532 gen_helper_itlbp_pa11(tcg_env, addr, reg); 2533 } 2534 2535 /* Exit TB for TLB change if mmu is enabled. */ 2536 if (ctx->tb_flags & PSW_C) { 2537 ctx->base.is_jmp = DISAS_IAQ_N_STALE; 2538 } 2539 return nullify_end(ctx); 2540 #endif 2541 } 2542 2543 static bool trans_pxtlbx(DisasContext *ctx, arg_pxtlbx *a) 2544 { 2545 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2546 #ifndef CONFIG_USER_ONLY 2547 TCGv_tl addr; 2548 TCGv_reg ofs; 2549 2550 nullify_over(ctx); 2551 2552 form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false); 2553 if (a->m) { 2554 save_gpr(ctx, a->b, ofs); 2555 } 2556 if (a->local) { 2557 gen_helper_ptlbe(tcg_env); 2558 } else { 2559 gen_helper_ptlb(tcg_env, addr); 2560 } 2561 2562 /* Exit TB for TLB change if mmu is enabled. */ 2563 if (ctx->tb_flags & PSW_C) { 2564 ctx->base.is_jmp = DISAS_IAQ_N_STALE; 2565 } 2566 return nullify_end(ctx); 2567 #endif 2568 } 2569 2570 /* 2571 * Implement the pcxl and pcxl2 Fast TLB Insert instructions. 2572 * See 2573 * https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf 2574 * page 13-9 (195/206) 2575 */ 2576 static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a) 2577 { 2578 if (ctx->is_pa20) { 2579 return false; 2580 } 2581 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2582 #ifndef CONFIG_USER_ONLY 2583 TCGv_tl addr, atl, stl; 2584 TCGv_reg reg; 2585 2586 nullify_over(ctx); 2587 2588 /* 2589 * FIXME: 2590 * if (not (pcxl or pcxl2)) 2591 * return gen_illegal(ctx); 2592 */ 2593 2594 atl = tcg_temp_new_tl(); 2595 stl = tcg_temp_new_tl(); 2596 addr = tcg_temp_new_tl(); 2597 2598 tcg_gen_ld32u_i64(stl, tcg_env, 2599 a->data ? offsetof(CPUHPPAState, cr[CR_ISR]) 2600 : offsetof(CPUHPPAState, cr[CR_IIASQ])); 2601 tcg_gen_ld32u_i64(atl, tcg_env, 2602 a->data ? offsetof(CPUHPPAState, cr[CR_IOR]) 2603 : offsetof(CPUHPPAState, cr[CR_IIAOQ])); 2604 tcg_gen_shli_i64(stl, stl, 32); 2605 tcg_gen_or_tl(addr, atl, stl); 2606 2607 reg = load_gpr(ctx, a->r); 2608 if (a->addr) { 2609 gen_helper_itlba_pa11(tcg_env, addr, reg); 2610 } else { 2611 gen_helper_itlbp_pa11(tcg_env, addr, reg); 2612 } 2613 2614 /* Exit TB for TLB change if mmu is enabled. */ 2615 if (ctx->tb_flags & PSW_C) { 2616 ctx->base.is_jmp = DISAS_IAQ_N_STALE; 2617 } 2618 return nullify_end(ctx); 2619 #endif 2620 } 2621 2622 static bool trans_ixtlbt(DisasContext *ctx, arg_ixtlbt *a) 2623 { 2624 if (!ctx->is_pa20) { 2625 return false; 2626 } 2627 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2628 #ifndef CONFIG_USER_ONLY 2629 nullify_over(ctx); 2630 { 2631 TCGv_i64 src1 = load_gpr(ctx, a->r1); 2632 TCGv_i64 src2 = load_gpr(ctx, a->r2); 2633 2634 if (a->data) { 2635 gen_helper_idtlbt_pa20(tcg_env, src1, src2); 2636 } else { 2637 gen_helper_iitlbt_pa20(tcg_env, src1, src2); 2638 } 2639 } 2640 /* Exit TB for TLB change if mmu is enabled. */ 2641 if (ctx->tb_flags & PSW_C) { 2642 ctx->base.is_jmp = DISAS_IAQ_N_STALE; 2643 } 2644 return nullify_end(ctx); 2645 #endif 2646 } 2647 2648 static bool trans_lpa(DisasContext *ctx, arg_ldst *a) 2649 { 2650 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2651 #ifndef CONFIG_USER_ONLY 2652 TCGv_tl vaddr; 2653 TCGv_reg ofs, paddr; 2654 2655 nullify_over(ctx); 2656 2657 form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false); 2658 2659 paddr = tcg_temp_new(); 2660 gen_helper_lpa(paddr, tcg_env, vaddr); 2661 2662 /* Note that physical address result overrides base modification. */ 2663 if (a->m) { 2664 save_gpr(ctx, a->b, ofs); 2665 } 2666 save_gpr(ctx, a->t, paddr); 2667 2668 return nullify_end(ctx); 2669 #endif 2670 } 2671 2672 static bool trans_lci(DisasContext *ctx, arg_lci *a) 2673 { 2674 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 2675 2676 /* The Coherence Index is an implementation-defined function of the 2677 physical address. Two addresses with the same CI have a coherent 2678 view of the cache. Our implementation is to return 0 for all, 2679 since the entire address space is coherent. */ 2680 save_gpr(ctx, a->t, tcg_constant_reg(0)); 2681 2682 cond_free(&ctx->null_cond); 2683 return true; 2684 } 2685 2686 static bool trans_add(DisasContext *ctx, arg_rrr_cf_d_sh *a) 2687 { 2688 return do_add_reg(ctx, a, false, false, false, false); 2689 } 2690 2691 static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_d_sh *a) 2692 { 2693 return do_add_reg(ctx, a, true, false, false, false); 2694 } 2695 2696 static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a) 2697 { 2698 return do_add_reg(ctx, a, false, true, false, false); 2699 } 2700 2701 static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_d_sh *a) 2702 { 2703 return do_add_reg(ctx, a, false, false, false, true); 2704 } 2705 2706 static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a) 2707 { 2708 return do_add_reg(ctx, a, false, true, false, true); 2709 } 2710 2711 static bool trans_sub(DisasContext *ctx, arg_rrr_cf_d *a) 2712 { 2713 return do_sub_reg(ctx, a, false, false, false); 2714 } 2715 2716 static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf_d *a) 2717 { 2718 return do_sub_reg(ctx, a, true, false, false); 2719 } 2720 2721 static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf_d *a) 2722 { 2723 return do_sub_reg(ctx, a, false, false, true); 2724 } 2725 2726 static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf_d *a) 2727 { 2728 return do_sub_reg(ctx, a, true, false, true); 2729 } 2730 2731 static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf_d *a) 2732 { 2733 return do_sub_reg(ctx, a, false, true, false); 2734 } 2735 2736 static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf_d *a) 2737 { 2738 return do_sub_reg(ctx, a, true, true, false); 2739 } 2740 2741 static bool trans_andcm(DisasContext *ctx, arg_rrr_cf_d *a) 2742 { 2743 return do_log_reg(ctx, a, tcg_gen_andc_reg); 2744 } 2745 2746 static bool trans_and(DisasContext *ctx, arg_rrr_cf_d *a) 2747 { 2748 return do_log_reg(ctx, a, tcg_gen_and_reg); 2749 } 2750 2751 static bool trans_or(DisasContext *ctx, arg_rrr_cf_d *a) 2752 { 2753 if (a->cf == 0) { 2754 unsigned r2 = a->r2; 2755 unsigned r1 = a->r1; 2756 unsigned rt = a->t; 2757 2758 if (rt == 0) { /* NOP */ 2759 cond_free(&ctx->null_cond); 2760 return true; 2761 } 2762 if (r2 == 0) { /* COPY */ 2763 if (r1 == 0) { 2764 TCGv_reg dest = dest_gpr(ctx, rt); 2765 tcg_gen_movi_reg(dest, 0); 2766 save_gpr(ctx, rt, dest); 2767 } else { 2768 save_gpr(ctx, rt, cpu_gr[r1]); 2769 } 2770 cond_free(&ctx->null_cond); 2771 return true; 2772 } 2773 #ifndef CONFIG_USER_ONLY 2774 /* These are QEMU extensions and are nops in the real architecture: 2775 * 2776 * or %r10,%r10,%r10 -- idle loop; wait for interrupt 2777 * or %r31,%r31,%r31 -- death loop; offline cpu 2778 * currently implemented as idle. 2779 */ 2780 if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */ 2781 /* No need to check for supervisor, as userland can only pause 2782 until the next timer interrupt. */ 2783 nullify_over(ctx); 2784 2785 /* Advance the instruction queue. */ 2786 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b); 2787 copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var); 2788 nullify_set(ctx, 0); 2789 2790 /* Tell the qemu main loop to halt until this cpu has work. */ 2791 tcg_gen_st_i32(tcg_constant_i32(1), tcg_env, 2792 offsetof(CPUState, halted) - offsetof(HPPACPU, env)); 2793 gen_excp_1(EXCP_HALTED); 2794 ctx->base.is_jmp = DISAS_NORETURN; 2795 2796 return nullify_end(ctx); 2797 } 2798 #endif 2799 } 2800 return do_log_reg(ctx, a, tcg_gen_or_reg); 2801 } 2802 2803 static bool trans_xor(DisasContext *ctx, arg_rrr_cf_d *a) 2804 { 2805 return do_log_reg(ctx, a, tcg_gen_xor_reg); 2806 } 2807 2808 static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf_d *a) 2809 { 2810 TCGv_reg tcg_r1, tcg_r2; 2811 2812 if (a->cf) { 2813 nullify_over(ctx); 2814 } 2815 tcg_r1 = load_gpr(ctx, a->r1); 2816 tcg_r2 = load_gpr(ctx, a->r2); 2817 do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d); 2818 return nullify_end(ctx); 2819 } 2820 2821 static bool trans_uxor(DisasContext *ctx, arg_rrr_cf_d *a) 2822 { 2823 TCGv_reg tcg_r1, tcg_r2; 2824 2825 if (a->cf) { 2826 nullify_over(ctx); 2827 } 2828 tcg_r1 = load_gpr(ctx, a->r1); 2829 tcg_r2 = load_gpr(ctx, a->r2); 2830 do_unit(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d, false, tcg_gen_xor_reg); 2831 return nullify_end(ctx); 2832 } 2833 2834 static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a, bool is_tc) 2835 { 2836 TCGv_reg tcg_r1, tcg_r2, tmp; 2837 2838 if (a->cf) { 2839 nullify_over(ctx); 2840 } 2841 tcg_r1 = load_gpr(ctx, a->r1); 2842 tcg_r2 = load_gpr(ctx, a->r2); 2843 tmp = tcg_temp_new(); 2844 tcg_gen_not_reg(tmp, tcg_r2); 2845 do_unit(ctx, a->t, tcg_r1, tmp, a->cf, a->d, is_tc, tcg_gen_add_reg); 2846 return nullify_end(ctx); 2847 } 2848 2849 static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a) 2850 { 2851 return do_uaddcm(ctx, a, false); 2852 } 2853 2854 static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf_d *a) 2855 { 2856 return do_uaddcm(ctx, a, true); 2857 } 2858 2859 static bool do_dcor(DisasContext *ctx, arg_rr_cf_d *a, bool is_i) 2860 { 2861 TCGv_reg tmp; 2862 2863 nullify_over(ctx); 2864 2865 tmp = tcg_temp_new(); 2866 tcg_gen_shri_reg(tmp, cpu_psw_cb, 3); 2867 if (!is_i) { 2868 tcg_gen_not_reg(tmp, tmp); 2869 } 2870 tcg_gen_andi_reg(tmp, tmp, (target_ureg)0x1111111111111111ull); 2871 tcg_gen_muli_reg(tmp, tmp, 6); 2872 do_unit(ctx, a->t, load_gpr(ctx, a->r), tmp, a->cf, a->d, false, 2873 is_i ? tcg_gen_add_reg : tcg_gen_sub_reg); 2874 return nullify_end(ctx); 2875 } 2876 2877 static bool trans_dcor(DisasContext *ctx, arg_rr_cf_d *a) 2878 { 2879 return do_dcor(ctx, a, false); 2880 } 2881 2882 static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf_d *a) 2883 { 2884 return do_dcor(ctx, a, true); 2885 } 2886 2887 static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a) 2888 { 2889 TCGv_reg dest, add1, add2, addc, zero, in1, in2; 2890 TCGv_reg cout; 2891 2892 nullify_over(ctx); 2893 2894 in1 = load_gpr(ctx, a->r1); 2895 in2 = load_gpr(ctx, a->r2); 2896 2897 add1 = tcg_temp_new(); 2898 add2 = tcg_temp_new(); 2899 addc = tcg_temp_new(); 2900 dest = tcg_temp_new(); 2901 zero = tcg_constant_reg(0); 2902 2903 /* Form R1 << 1 | PSW[CB]{8}. */ 2904 tcg_gen_add_reg(add1, in1, in1); 2905 tcg_gen_add_reg(add1, add1, get_psw_carry(ctx, false)); 2906 2907 /* 2908 * Add or subtract R2, depending on PSW[V]. Proper computation of 2909 * carry requires that we subtract via + ~R2 + 1, as described in 2910 * the manual. By extracting and masking V, we can produce the 2911 * proper inputs to the addition without movcond. 2912 */ 2913 tcg_gen_sextract_reg(addc, cpu_psw_v, 31, 1); 2914 tcg_gen_xor_reg(add2, in2, addc); 2915 tcg_gen_andi_reg(addc, addc, 1); 2916 2917 tcg_gen_add2_reg(dest, cpu_psw_cb_msb, add1, zero, add2, zero); 2918 tcg_gen_add2_reg(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero); 2919 2920 /* Write back the result register. */ 2921 save_gpr(ctx, a->t, dest); 2922 2923 /* Write back PSW[CB]. */ 2924 tcg_gen_xor_reg(cpu_psw_cb, add1, add2); 2925 tcg_gen_xor_reg(cpu_psw_cb, cpu_psw_cb, dest); 2926 2927 /* Write back PSW[V] for the division step. */ 2928 cout = get_psw_carry(ctx, false); 2929 tcg_gen_neg_reg(cpu_psw_v, cout); 2930 tcg_gen_xor_reg(cpu_psw_v, cpu_psw_v, in2); 2931 2932 /* Install the new nullification. */ 2933 if (a->cf) { 2934 TCGv_reg sv = NULL; 2935 if (cond_need_sv(a->cf >> 1)) { 2936 /* ??? The lshift is supposed to contribute to overflow. */ 2937 sv = do_add_sv(ctx, dest, add1, add2); 2938 } 2939 ctx->null_cond = do_cond(ctx, a->cf, false, dest, cout, sv); 2940 } 2941 2942 return nullify_end(ctx); 2943 } 2944 2945 static bool trans_addi(DisasContext *ctx, arg_rri_cf *a) 2946 { 2947 return do_add_imm(ctx, a, false, false); 2948 } 2949 2950 static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a) 2951 { 2952 return do_add_imm(ctx, a, true, false); 2953 } 2954 2955 static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a) 2956 { 2957 return do_add_imm(ctx, a, false, true); 2958 } 2959 2960 static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a) 2961 { 2962 return do_add_imm(ctx, a, true, true); 2963 } 2964 2965 static bool trans_subi(DisasContext *ctx, arg_rri_cf *a) 2966 { 2967 return do_sub_imm(ctx, a, false); 2968 } 2969 2970 static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a) 2971 { 2972 return do_sub_imm(ctx, a, true); 2973 } 2974 2975 static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf_d *a) 2976 { 2977 TCGv_reg tcg_im, tcg_r2; 2978 2979 if (a->cf) { 2980 nullify_over(ctx); 2981 } 2982 2983 tcg_im = tcg_constant_reg(a->i); 2984 tcg_r2 = load_gpr(ctx, a->r); 2985 do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf, a->d); 2986 2987 return nullify_end(ctx); 2988 } 2989 2990 static bool trans_ld(DisasContext *ctx, arg_ldst *a) 2991 { 2992 if (unlikely(TARGET_REGISTER_BITS == 32 && a->size > MO_32)) { 2993 return gen_illegal(ctx); 2994 } else { 2995 return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0, 2996 a->disp, a->sp, a->m, a->size | MO_TE); 2997 } 2998 } 2999 3000 static bool trans_st(DisasContext *ctx, arg_ldst *a) 3001 { 3002 assert(a->x == 0 && a->scale == 0); 3003 if (unlikely(TARGET_REGISTER_BITS == 32 && a->size > MO_32)) { 3004 return gen_illegal(ctx); 3005 } else { 3006 return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE); 3007 } 3008 } 3009 3010 static bool trans_ldc(DisasContext *ctx, arg_ldst *a) 3011 { 3012 MemOp mop = MO_TE | MO_ALIGN | a->size; 3013 TCGv_reg zero, dest, ofs; 3014 TCGv_tl addr; 3015 3016 if (unlikely(TARGET_REGISTER_BITS == 32 && a->size > MO_32)) { 3017 return gen_illegal(ctx); 3018 } 3019 3020 nullify_over(ctx); 3021 3022 if (a->m) { 3023 /* Base register modification. Make sure if RT == RB, 3024 we see the result of the load. */ 3025 dest = tcg_temp_new(); 3026 } else { 3027 dest = dest_gpr(ctx, a->t); 3028 } 3029 3030 form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? a->size : 0, 3031 a->disp, a->sp, a->m, ctx->mmu_idx == MMU_PHYS_IDX); 3032 3033 /* 3034 * For hppa1.1, LDCW is undefined unless aligned mod 16. 3035 * However actual hardware succeeds with aligned mod 4. 3036 * Detect this case and log a GUEST_ERROR. 3037 * 3038 * TODO: HPPA64 relaxes the over-alignment requirement 3039 * with the ,co completer. 3040 */ 3041 gen_helper_ldc_check(addr); 3042 3043 zero = tcg_constant_reg(0); 3044 tcg_gen_atomic_xchg_reg(dest, addr, zero, ctx->mmu_idx, mop); 3045 3046 if (a->m) { 3047 save_gpr(ctx, a->b, ofs); 3048 } 3049 save_gpr(ctx, a->t, dest); 3050 3051 return nullify_end(ctx); 3052 } 3053 3054 static bool trans_stby(DisasContext *ctx, arg_stby *a) 3055 { 3056 TCGv_reg ofs, val; 3057 TCGv_tl addr; 3058 3059 nullify_over(ctx); 3060 3061 form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m, 3062 ctx->mmu_idx == MMU_PHYS_IDX); 3063 val = load_gpr(ctx, a->r); 3064 if (a->a) { 3065 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { 3066 gen_helper_stby_e_parallel(tcg_env, addr, val); 3067 } else { 3068 gen_helper_stby_e(tcg_env, addr, val); 3069 } 3070 } else { 3071 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { 3072 gen_helper_stby_b_parallel(tcg_env, addr, val); 3073 } else { 3074 gen_helper_stby_b(tcg_env, addr, val); 3075 } 3076 } 3077 if (a->m) { 3078 tcg_gen_andi_reg(ofs, ofs, ~3); 3079 save_gpr(ctx, a->b, ofs); 3080 } 3081 3082 return nullify_end(ctx); 3083 } 3084 3085 static bool trans_stdby(DisasContext *ctx, arg_stby *a) 3086 { 3087 TCGv_reg ofs, val; 3088 TCGv_tl addr; 3089 3090 if (!ctx->is_pa20) { 3091 return false; 3092 } 3093 nullify_over(ctx); 3094 3095 form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m, 3096 ctx->mmu_idx == MMU_PHYS_IDX); 3097 val = load_gpr(ctx, a->r); 3098 if (a->a) { 3099 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { 3100 gen_helper_stdby_e_parallel(tcg_env, addr, val); 3101 } else { 3102 gen_helper_stdby_e(tcg_env, addr, val); 3103 } 3104 } else { 3105 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { 3106 gen_helper_stdby_b_parallel(tcg_env, addr, val); 3107 } else { 3108 gen_helper_stdby_b(tcg_env, addr, val); 3109 } 3110 } 3111 if (a->m) { 3112 tcg_gen_andi_reg(ofs, ofs, ~7); 3113 save_gpr(ctx, a->b, ofs); 3114 } 3115 3116 return nullify_end(ctx); 3117 } 3118 3119 static bool trans_lda(DisasContext *ctx, arg_ldst *a) 3120 { 3121 int hold_mmu_idx = ctx->mmu_idx; 3122 3123 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 3124 ctx->mmu_idx = MMU_PHYS_IDX; 3125 trans_ld(ctx, a); 3126 ctx->mmu_idx = hold_mmu_idx; 3127 return true; 3128 } 3129 3130 static bool trans_sta(DisasContext *ctx, arg_ldst *a) 3131 { 3132 int hold_mmu_idx = ctx->mmu_idx; 3133 3134 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 3135 ctx->mmu_idx = MMU_PHYS_IDX; 3136 trans_st(ctx, a); 3137 ctx->mmu_idx = hold_mmu_idx; 3138 return true; 3139 } 3140 3141 static bool trans_ldil(DisasContext *ctx, arg_ldil *a) 3142 { 3143 TCGv_reg tcg_rt = dest_gpr(ctx, a->t); 3144 3145 tcg_gen_movi_reg(tcg_rt, a->i); 3146 save_gpr(ctx, a->t, tcg_rt); 3147 cond_free(&ctx->null_cond); 3148 return true; 3149 } 3150 3151 static bool trans_addil(DisasContext *ctx, arg_addil *a) 3152 { 3153 TCGv_reg tcg_rt = load_gpr(ctx, a->r); 3154 TCGv_reg tcg_r1 = dest_gpr(ctx, 1); 3155 3156 tcg_gen_addi_reg(tcg_r1, tcg_rt, a->i); 3157 save_gpr(ctx, 1, tcg_r1); 3158 cond_free(&ctx->null_cond); 3159 return true; 3160 } 3161 3162 static bool trans_ldo(DisasContext *ctx, arg_ldo *a) 3163 { 3164 TCGv_reg tcg_rt = dest_gpr(ctx, a->t); 3165 3166 /* Special case rb == 0, for the LDI pseudo-op. 3167 The COPY pseudo-op is handled for free within tcg_gen_addi_tl. */ 3168 if (a->b == 0) { 3169 tcg_gen_movi_reg(tcg_rt, a->i); 3170 } else { 3171 tcg_gen_addi_reg(tcg_rt, cpu_gr[a->b], a->i); 3172 } 3173 save_gpr(ctx, a->t, tcg_rt); 3174 cond_free(&ctx->null_cond); 3175 return true; 3176 } 3177 3178 static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_reg in1, 3179 unsigned c, unsigned f, bool d, unsigned n, int disp) 3180 { 3181 TCGv_reg dest, in2, sv; 3182 DisasCond cond; 3183 3184 in2 = load_gpr(ctx, r); 3185 dest = tcg_temp_new(); 3186 3187 tcg_gen_sub_reg(dest, in1, in2); 3188 3189 sv = NULL; 3190 if (cond_need_sv(c)) { 3191 sv = do_sub_sv(ctx, dest, in1, in2); 3192 } 3193 3194 cond = do_sub_cond(ctx, c * 2 + f, d, dest, in1, in2, sv); 3195 return do_cbranch(ctx, disp, n, &cond); 3196 } 3197 3198 static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a) 3199 { 3200 if (!ctx->is_pa20 && a->d) { 3201 return false; 3202 } 3203 nullify_over(ctx); 3204 return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1), 3205 a->c, a->f, a->d, a->n, a->disp); 3206 } 3207 3208 static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a) 3209 { 3210 if (!ctx->is_pa20 && a->d) { 3211 return false; 3212 } 3213 nullify_over(ctx); 3214 return do_cmpb(ctx, a->r, tcg_constant_reg(a->i), 3215 a->c, a->f, a->d, a->n, a->disp); 3216 } 3217 3218 static bool do_addb(DisasContext *ctx, unsigned r, TCGv_reg in1, 3219 unsigned c, unsigned f, unsigned n, int disp) 3220 { 3221 TCGv_reg dest, in2, sv, cb_cond; 3222 DisasCond cond; 3223 bool d = false; 3224 3225 /* 3226 * For hppa64, the ADDB conditions change with PSW.W, 3227 * dropping ZNV, SV, OD in favor of double-word EQ, LT, LE. 3228 */ 3229 if (ctx->tb_flags & PSW_W) { 3230 d = c >= 5; 3231 if (d) { 3232 c &= 3; 3233 } 3234 } 3235 3236 in2 = load_gpr(ctx, r); 3237 dest = tcg_temp_new(); 3238 sv = NULL; 3239 cb_cond = NULL; 3240 3241 if (cond_need_cb(c)) { 3242 TCGv_reg cb = tcg_temp_new(); 3243 TCGv_reg cb_msb = tcg_temp_new(); 3244 3245 tcg_gen_movi_reg(cb_msb, 0); 3246 tcg_gen_add2_reg(dest, cb_msb, in1, cb_msb, in2, cb_msb); 3247 tcg_gen_xor_reg(cb, in1, in2); 3248 tcg_gen_xor_reg(cb, cb, dest); 3249 cb_cond = get_carry(ctx, d, cb, cb_msb); 3250 } else { 3251 tcg_gen_add_reg(dest, in1, in2); 3252 } 3253 if (cond_need_sv(c)) { 3254 sv = do_add_sv(ctx, dest, in1, in2); 3255 } 3256 3257 cond = do_cond(ctx, c * 2 + f, d, dest, cb_cond, sv); 3258 save_gpr(ctx, r, dest); 3259 return do_cbranch(ctx, disp, n, &cond); 3260 } 3261 3262 static bool trans_addb(DisasContext *ctx, arg_addb *a) 3263 { 3264 nullify_over(ctx); 3265 return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp); 3266 } 3267 3268 static bool trans_addbi(DisasContext *ctx, arg_addbi *a) 3269 { 3270 nullify_over(ctx); 3271 return do_addb(ctx, a->r, tcg_constant_reg(a->i), a->c, a->f, a->n, a->disp); 3272 } 3273 3274 static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a) 3275 { 3276 TCGv_reg tmp, tcg_r; 3277 DisasCond cond; 3278 3279 nullify_over(ctx); 3280 3281 tmp = tcg_temp_new(); 3282 tcg_r = load_gpr(ctx, a->r); 3283 if (cond_need_ext(ctx, a->d)) { 3284 /* Force shift into [32,63] */ 3285 tcg_gen_ori_reg(tmp, cpu_sar, 32); 3286 tcg_gen_shl_reg(tmp, tcg_r, tmp); 3287 } else { 3288 tcg_gen_shl_reg(tmp, tcg_r, cpu_sar); 3289 } 3290 3291 cond = cond_make_0_tmp(a->c ? TCG_COND_GE : TCG_COND_LT, tmp); 3292 return do_cbranch(ctx, a->disp, a->n, &cond); 3293 } 3294 3295 static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a) 3296 { 3297 TCGv_reg tmp, tcg_r; 3298 DisasCond cond; 3299 int p; 3300 3301 nullify_over(ctx); 3302 3303 tmp = tcg_temp_new(); 3304 tcg_r = load_gpr(ctx, a->r); 3305 p = a->p | (cond_need_ext(ctx, a->d) ? 32 : 0); 3306 tcg_gen_shli_reg(tmp, tcg_r, p); 3307 3308 cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp); 3309 return do_cbranch(ctx, a->disp, a->n, &cond); 3310 } 3311 3312 static bool trans_movb(DisasContext *ctx, arg_movb *a) 3313 { 3314 TCGv_reg dest; 3315 DisasCond cond; 3316 3317 nullify_over(ctx); 3318 3319 dest = dest_gpr(ctx, a->r2); 3320 if (a->r1 == 0) { 3321 tcg_gen_movi_reg(dest, 0); 3322 } else { 3323 tcg_gen_mov_reg(dest, cpu_gr[a->r1]); 3324 } 3325 3326 /* All MOVB conditions are 32-bit. */ 3327 cond = do_sed_cond(ctx, a->c, false, dest); 3328 return do_cbranch(ctx, a->disp, a->n, &cond); 3329 } 3330 3331 static bool trans_movbi(DisasContext *ctx, arg_movbi *a) 3332 { 3333 TCGv_reg dest; 3334 DisasCond cond; 3335 3336 nullify_over(ctx); 3337 3338 dest = dest_gpr(ctx, a->r); 3339 tcg_gen_movi_reg(dest, a->i); 3340 3341 /* All MOVBI conditions are 32-bit. */ 3342 cond = do_sed_cond(ctx, a->c, false, dest); 3343 return do_cbranch(ctx, a->disp, a->n, &cond); 3344 } 3345 3346 static bool trans_shrp_sar(DisasContext *ctx, arg_shrp_sar *a) 3347 { 3348 TCGv_reg dest, src2; 3349 3350 if (!ctx->is_pa20 && a->d) { 3351 return false; 3352 } 3353 if (a->c) { 3354 nullify_over(ctx); 3355 } 3356 3357 dest = dest_gpr(ctx, a->t); 3358 src2 = load_gpr(ctx, a->r2); 3359 if (a->r1 == 0) { 3360 if (a->d) { 3361 tcg_gen_shr_reg(dest, src2, cpu_sar); 3362 } else { 3363 TCGv_reg tmp = tcg_temp_new(); 3364 3365 tcg_gen_ext32u_reg(dest, src2); 3366 tcg_gen_andi_reg(tmp, cpu_sar, 31); 3367 tcg_gen_shr_reg(dest, dest, tmp); 3368 } 3369 } else if (a->r1 == a->r2) { 3370 if (a->d) { 3371 tcg_gen_rotr_reg(dest, src2, cpu_sar); 3372 } else { 3373 TCGv_i32 t32 = tcg_temp_new_i32(); 3374 TCGv_i32 s32 = tcg_temp_new_i32(); 3375 3376 tcg_gen_trunc_reg_i32(t32, src2); 3377 tcg_gen_trunc_reg_i32(s32, cpu_sar); 3378 tcg_gen_andi_i32(s32, s32, 31); 3379 tcg_gen_rotr_i32(t32, t32, s32); 3380 tcg_gen_extu_i32_reg(dest, t32); 3381 } 3382 } else { 3383 TCGv_reg src1 = load_gpr(ctx, a->r1); 3384 3385 if (a->d) { 3386 TCGv_reg t = tcg_temp_new(); 3387 TCGv_reg n = tcg_temp_new(); 3388 3389 tcg_gen_xori_reg(n, cpu_sar, 63); 3390 tcg_gen_shl_reg(t, src2, n); 3391 tcg_gen_shli_reg(t, t, 1); 3392 tcg_gen_shr_reg(dest, src1, cpu_sar); 3393 tcg_gen_or_reg(dest, dest, t); 3394 } else { 3395 TCGv_i64 t = tcg_temp_new_i64(); 3396 TCGv_i64 s = tcg_temp_new_i64(); 3397 3398 tcg_gen_concat_reg_i64(t, src2, src1); 3399 tcg_gen_extu_reg_i64(s, cpu_sar); 3400 tcg_gen_andi_i64(s, s, 31); 3401 tcg_gen_shr_i64(t, t, s); 3402 tcg_gen_trunc_i64_reg(dest, t); 3403 } 3404 } 3405 save_gpr(ctx, a->t, dest); 3406 3407 /* Install the new nullification. */ 3408 cond_free(&ctx->null_cond); 3409 if (a->c) { 3410 ctx->null_cond = do_sed_cond(ctx, a->c, false, dest); 3411 } 3412 return nullify_end(ctx); 3413 } 3414 3415 static bool trans_shrp_imm(DisasContext *ctx, arg_shrp_imm *a) 3416 { 3417 unsigned width, sa; 3418 TCGv_reg dest, t2; 3419 3420 if (!ctx->is_pa20 && a->d) { 3421 return false; 3422 } 3423 if (a->c) { 3424 nullify_over(ctx); 3425 } 3426 3427 width = a->d ? 64 : 32; 3428 sa = width - 1 - a->cpos; 3429 3430 dest = dest_gpr(ctx, a->t); 3431 t2 = load_gpr(ctx, a->r2); 3432 if (a->r1 == 0) { 3433 tcg_gen_extract_reg(dest, t2, sa, width - sa); 3434 } else if (width == TARGET_REGISTER_BITS) { 3435 tcg_gen_extract2_reg(dest, t2, cpu_gr[a->r1], sa); 3436 } else { 3437 assert(!a->d); 3438 if (a->r1 == a->r2) { 3439 TCGv_i32 t32 = tcg_temp_new_i32(); 3440 tcg_gen_trunc_reg_i32(t32, t2); 3441 tcg_gen_rotri_i32(t32, t32, sa); 3442 tcg_gen_extu_i32_reg(dest, t32); 3443 } else { 3444 TCGv_i64 t64 = tcg_temp_new_i64(); 3445 tcg_gen_concat_reg_i64(t64, t2, cpu_gr[a->r1]); 3446 tcg_gen_shri_i64(t64, t64, sa); 3447 tcg_gen_trunc_i64_reg(dest, t64); 3448 } 3449 } 3450 save_gpr(ctx, a->t, dest); 3451 3452 /* Install the new nullification. */ 3453 cond_free(&ctx->null_cond); 3454 if (a->c) { 3455 ctx->null_cond = do_sed_cond(ctx, a->c, false, dest); 3456 } 3457 return nullify_end(ctx); 3458 } 3459 3460 static bool trans_extr_sar(DisasContext *ctx, arg_extr_sar *a) 3461 { 3462 unsigned widthm1 = a->d ? 63 : 31; 3463 TCGv_reg dest, src, tmp; 3464 3465 if (!ctx->is_pa20 && a->d) { 3466 return false; 3467 } 3468 if (a->c) { 3469 nullify_over(ctx); 3470 } 3471 3472 dest = dest_gpr(ctx, a->t); 3473 src = load_gpr(ctx, a->r); 3474 tmp = tcg_temp_new(); 3475 3476 /* Recall that SAR is using big-endian bit numbering. */ 3477 tcg_gen_andi_reg(tmp, cpu_sar, widthm1); 3478 tcg_gen_xori_reg(tmp, tmp, widthm1); 3479 3480 if (a->se) { 3481 if (!a->d) { 3482 tcg_gen_ext32s_reg(dest, src); 3483 src = dest; 3484 } 3485 tcg_gen_sar_reg(dest, src, tmp); 3486 tcg_gen_sextract_reg(dest, dest, 0, a->len); 3487 } else { 3488 if (!a->d) { 3489 tcg_gen_ext32u_reg(dest, src); 3490 src = dest; 3491 } 3492 tcg_gen_shr_reg(dest, src, tmp); 3493 tcg_gen_extract_reg(dest, dest, 0, a->len); 3494 } 3495 save_gpr(ctx, a->t, dest); 3496 3497 /* Install the new nullification. */ 3498 cond_free(&ctx->null_cond); 3499 if (a->c) { 3500 ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest); 3501 } 3502 return nullify_end(ctx); 3503 } 3504 3505 static bool trans_extr_imm(DisasContext *ctx, arg_extr_imm *a) 3506 { 3507 unsigned len, cpos, width; 3508 TCGv_reg dest, src; 3509 3510 if (!ctx->is_pa20 && a->d) { 3511 return false; 3512 } 3513 if (a->c) { 3514 nullify_over(ctx); 3515 } 3516 3517 len = a->len; 3518 width = a->d ? 64 : 32; 3519 cpos = width - 1 - a->pos; 3520 if (cpos + len > width) { 3521 len = width - cpos; 3522 } 3523 3524 dest = dest_gpr(ctx, a->t); 3525 src = load_gpr(ctx, a->r); 3526 if (a->se) { 3527 tcg_gen_sextract_reg(dest, src, cpos, len); 3528 } else { 3529 tcg_gen_extract_reg(dest, src, cpos, len); 3530 } 3531 save_gpr(ctx, a->t, dest); 3532 3533 /* Install the new nullification. */ 3534 cond_free(&ctx->null_cond); 3535 if (a->c) { 3536 ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest); 3537 } 3538 return nullify_end(ctx); 3539 } 3540 3541 static bool trans_depi_imm(DisasContext *ctx, arg_depi_imm *a) 3542 { 3543 unsigned len, width; 3544 target_sreg mask0, mask1; 3545 TCGv_reg dest; 3546 3547 if (!ctx->is_pa20 && a->d) { 3548 return false; 3549 } 3550 if (a->c) { 3551 nullify_over(ctx); 3552 } 3553 3554 len = a->len; 3555 width = a->d ? 64 : 32; 3556 if (a->cpos + len > width) { 3557 len = width - a->cpos; 3558 } 3559 3560 dest = dest_gpr(ctx, a->t); 3561 mask0 = deposit64(0, a->cpos, len, a->i); 3562 mask1 = deposit64(-1, a->cpos, len, a->i); 3563 3564 if (a->nz) { 3565 TCGv_reg src = load_gpr(ctx, a->t); 3566 tcg_gen_andi_reg(dest, src, mask1); 3567 tcg_gen_ori_reg(dest, dest, mask0); 3568 } else { 3569 tcg_gen_movi_reg(dest, mask0); 3570 } 3571 save_gpr(ctx, a->t, dest); 3572 3573 /* Install the new nullification. */ 3574 cond_free(&ctx->null_cond); 3575 if (a->c) { 3576 ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest); 3577 } 3578 return nullify_end(ctx); 3579 } 3580 3581 static bool trans_dep_imm(DisasContext *ctx, arg_dep_imm *a) 3582 { 3583 unsigned rs = a->nz ? a->t : 0; 3584 unsigned len, width; 3585 TCGv_reg dest, val; 3586 3587 if (!ctx->is_pa20 && a->d) { 3588 return false; 3589 } 3590 if (a->c) { 3591 nullify_over(ctx); 3592 } 3593 3594 len = a->len; 3595 width = a->d ? 64 : 32; 3596 if (a->cpos + len > width) { 3597 len = width - a->cpos; 3598 } 3599 3600 dest = dest_gpr(ctx, a->t); 3601 val = load_gpr(ctx, a->r); 3602 if (rs == 0) { 3603 tcg_gen_deposit_z_reg(dest, val, a->cpos, len); 3604 } else { 3605 tcg_gen_deposit_reg(dest, cpu_gr[rs], val, a->cpos, len); 3606 } 3607 save_gpr(ctx, a->t, dest); 3608 3609 /* Install the new nullification. */ 3610 cond_free(&ctx->null_cond); 3611 if (a->c) { 3612 ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest); 3613 } 3614 return nullify_end(ctx); 3615 } 3616 3617 static bool do_dep_sar(DisasContext *ctx, unsigned rt, unsigned c, 3618 bool d, bool nz, unsigned len, TCGv_reg val) 3619 { 3620 unsigned rs = nz ? rt : 0; 3621 unsigned widthm1 = d ? 63 : 31; 3622 TCGv_reg mask, tmp, shift, dest; 3623 target_ureg msb = 1ULL << (len - 1); 3624 3625 dest = dest_gpr(ctx, rt); 3626 shift = tcg_temp_new(); 3627 tmp = tcg_temp_new(); 3628 3629 /* Convert big-endian bit numbering in SAR to left-shift. */ 3630 tcg_gen_andi_reg(shift, cpu_sar, widthm1); 3631 tcg_gen_xori_reg(shift, shift, widthm1); 3632 3633 mask = tcg_temp_new(); 3634 tcg_gen_movi_reg(mask, msb + (msb - 1)); 3635 tcg_gen_and_reg(tmp, val, mask); 3636 if (rs) { 3637 tcg_gen_shl_reg(mask, mask, shift); 3638 tcg_gen_shl_reg(tmp, tmp, shift); 3639 tcg_gen_andc_reg(dest, cpu_gr[rs], mask); 3640 tcg_gen_or_reg(dest, dest, tmp); 3641 } else { 3642 tcg_gen_shl_reg(dest, tmp, shift); 3643 } 3644 save_gpr(ctx, rt, dest); 3645 3646 /* Install the new nullification. */ 3647 cond_free(&ctx->null_cond); 3648 if (c) { 3649 ctx->null_cond = do_sed_cond(ctx, c, d, dest); 3650 } 3651 return nullify_end(ctx); 3652 } 3653 3654 static bool trans_dep_sar(DisasContext *ctx, arg_dep_sar *a) 3655 { 3656 if (!ctx->is_pa20 && a->d) { 3657 return false; 3658 } 3659 if (a->c) { 3660 nullify_over(ctx); 3661 } 3662 return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len, 3663 load_gpr(ctx, a->r)); 3664 } 3665 3666 static bool trans_depi_sar(DisasContext *ctx, arg_depi_sar *a) 3667 { 3668 if (!ctx->is_pa20 && a->d) { 3669 return false; 3670 } 3671 if (a->c) { 3672 nullify_over(ctx); 3673 } 3674 return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len, 3675 tcg_constant_reg(a->i)); 3676 } 3677 3678 static bool trans_be(DisasContext *ctx, arg_be *a) 3679 { 3680 TCGv_reg tmp; 3681 3682 #ifdef CONFIG_USER_ONLY 3683 /* ??? It seems like there should be a good way of using 3684 "be disp(sr2, r0)", the canonical gateway entry mechanism 3685 to our advantage. But that appears to be inconvenient to 3686 manage along side branch delay slots. Therefore we handle 3687 entry into the gateway page via absolute address. */ 3688 /* Since we don't implement spaces, just branch. Do notice the special 3689 case of "be disp(*,r0)" using a direct branch to disp, so that we can 3690 goto_tb to the TB containing the syscall. */ 3691 if (a->b == 0) { 3692 return do_dbranch(ctx, a->disp, a->l, a->n); 3693 } 3694 #else 3695 nullify_over(ctx); 3696 #endif 3697 3698 tmp = tcg_temp_new(); 3699 tcg_gen_addi_reg(tmp, load_gpr(ctx, a->b), a->disp); 3700 tmp = do_ibranch_priv(ctx, tmp); 3701 3702 #ifdef CONFIG_USER_ONLY 3703 return do_ibranch(ctx, tmp, a->l, a->n); 3704 #else 3705 TCGv_i64 new_spc = tcg_temp_new_i64(); 3706 3707 load_spr(ctx, new_spc, a->sp); 3708 if (a->l) { 3709 copy_iaoq_entry(ctx, cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var); 3710 tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f); 3711 } 3712 if (a->n && use_nullify_skip(ctx)) { 3713 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, tmp); 3714 tcg_gen_addi_reg(tmp, tmp, 4); 3715 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp); 3716 tcg_gen_mov_i64(cpu_iasq_f, new_spc); 3717 tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f); 3718 } else { 3719 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b); 3720 if (ctx->iaoq_b == -1) { 3721 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b); 3722 } 3723 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp); 3724 tcg_gen_mov_i64(cpu_iasq_b, new_spc); 3725 nullify_set(ctx, a->n); 3726 } 3727 tcg_gen_lookup_and_goto_ptr(); 3728 ctx->base.is_jmp = DISAS_NORETURN; 3729 return nullify_end(ctx); 3730 #endif 3731 } 3732 3733 static bool trans_bl(DisasContext *ctx, arg_bl *a) 3734 { 3735 return do_dbranch(ctx, iaoq_dest(ctx, a->disp), a->l, a->n); 3736 } 3737 3738 static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a) 3739 { 3740 target_ureg dest = iaoq_dest(ctx, a->disp); 3741 3742 nullify_over(ctx); 3743 3744 /* Make sure the caller hasn't done something weird with the queue. 3745 * ??? This is not quite the same as the PSW[B] bit, which would be 3746 * expensive to track. Real hardware will trap for 3747 * b gateway 3748 * b gateway+4 (in delay slot of first branch) 3749 * However, checking for a non-sequential instruction queue *will* 3750 * diagnose the security hole 3751 * b gateway 3752 * b evil 3753 * in which instructions at evil would run with increased privs. 3754 */ 3755 if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) { 3756 return gen_illegal(ctx); 3757 } 3758 3759 #ifndef CONFIG_USER_ONLY 3760 if (ctx->tb_flags & PSW_C) { 3761 CPUHPPAState *env = cpu_env(ctx->cs); 3762 int type = hppa_artype_for_page(env, ctx->base.pc_next); 3763 /* If we could not find a TLB entry, then we need to generate an 3764 ITLB miss exception so the kernel will provide it. 3765 The resulting TLB fill operation will invalidate this TB and 3766 we will re-translate, at which point we *will* be able to find 3767 the TLB entry and determine if this is in fact a gateway page. */ 3768 if (type < 0) { 3769 gen_excp(ctx, EXCP_ITLB_MISS); 3770 return true; 3771 } 3772 /* No change for non-gateway pages or for priv decrease. */ 3773 if (type >= 4 && type - 4 < ctx->privilege) { 3774 dest = deposit32(dest, 0, 2, type - 4); 3775 } 3776 } else { 3777 dest &= -4; /* priv = 0 */ 3778 } 3779 #endif 3780 3781 if (a->l) { 3782 TCGv_reg tmp = dest_gpr(ctx, a->l); 3783 if (ctx->privilege < 3) { 3784 tcg_gen_andi_reg(tmp, tmp, -4); 3785 } 3786 tcg_gen_ori_reg(tmp, tmp, ctx->privilege); 3787 save_gpr(ctx, a->l, tmp); 3788 } 3789 3790 return do_dbranch(ctx, dest, 0, a->n); 3791 } 3792 3793 static bool trans_blr(DisasContext *ctx, arg_blr *a) 3794 { 3795 if (a->x) { 3796 TCGv_reg tmp = tcg_temp_new(); 3797 tcg_gen_shli_reg(tmp, load_gpr(ctx, a->x), 3); 3798 tcg_gen_addi_reg(tmp, tmp, ctx->iaoq_f + 8); 3799 /* The computation here never changes privilege level. */ 3800 return do_ibranch(ctx, tmp, a->l, a->n); 3801 } else { 3802 /* BLR R0,RX is a good way to load PC+8 into RX. */ 3803 return do_dbranch(ctx, ctx->iaoq_f + 8, a->l, a->n); 3804 } 3805 } 3806 3807 static bool trans_bv(DisasContext *ctx, arg_bv *a) 3808 { 3809 TCGv_reg dest; 3810 3811 if (a->x == 0) { 3812 dest = load_gpr(ctx, a->b); 3813 } else { 3814 dest = tcg_temp_new(); 3815 tcg_gen_shli_reg(dest, load_gpr(ctx, a->x), 3); 3816 tcg_gen_add_reg(dest, dest, load_gpr(ctx, a->b)); 3817 } 3818 dest = do_ibranch_priv(ctx, dest); 3819 return do_ibranch(ctx, dest, 0, a->n); 3820 } 3821 3822 static bool trans_bve(DisasContext *ctx, arg_bve *a) 3823 { 3824 TCGv_reg dest; 3825 3826 #ifdef CONFIG_USER_ONLY 3827 dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b)); 3828 return do_ibranch(ctx, dest, a->l, a->n); 3829 #else 3830 nullify_over(ctx); 3831 dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b)); 3832 3833 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b); 3834 if (ctx->iaoq_b == -1) { 3835 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b); 3836 } 3837 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, dest); 3838 tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest)); 3839 if (a->l) { 3840 copy_iaoq_entry(ctx, cpu_gr[a->l], ctx->iaoq_n, ctx->iaoq_n_var); 3841 } 3842 nullify_set(ctx, a->n); 3843 tcg_gen_lookup_and_goto_ptr(); 3844 ctx->base.is_jmp = DISAS_NORETURN; 3845 return nullify_end(ctx); 3846 #endif 3847 } 3848 3849 static bool trans_nopbts(DisasContext *ctx, arg_nopbts *a) 3850 { 3851 /* All branch target stack instructions implement as nop. */ 3852 return ctx->is_pa20; 3853 } 3854 3855 /* 3856 * Float class 0 3857 */ 3858 3859 static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src) 3860 { 3861 tcg_gen_mov_i32(dst, src); 3862 } 3863 3864 static bool trans_fid_f(DisasContext *ctx, arg_fid_f *a) 3865 { 3866 uint64_t ret; 3867 3868 if (TARGET_REGISTER_BITS == 64) { 3869 ret = 0x13080000000000ULL; /* PA8700 (PCX-W2) */ 3870 } else { 3871 ret = 0x0f080000000000ULL; /* PA7300LC (PCX-L2) */ 3872 } 3873 3874 nullify_over(ctx); 3875 save_frd(0, tcg_constant_i64(ret)); 3876 return nullify_end(ctx); 3877 } 3878 3879 static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a) 3880 { 3881 return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f); 3882 } 3883 3884 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src) 3885 { 3886 tcg_gen_mov_i64(dst, src); 3887 } 3888 3889 static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a) 3890 { 3891 return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d); 3892 } 3893 3894 static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src) 3895 { 3896 tcg_gen_andi_i32(dst, src, INT32_MAX); 3897 } 3898 3899 static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a) 3900 { 3901 return do_fop_wew(ctx, a->t, a->r, gen_fabs_f); 3902 } 3903 3904 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src) 3905 { 3906 tcg_gen_andi_i64(dst, src, INT64_MAX); 3907 } 3908 3909 static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a) 3910 { 3911 return do_fop_ded(ctx, a->t, a->r, gen_fabs_d); 3912 } 3913 3914 static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a) 3915 { 3916 return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s); 3917 } 3918 3919 static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a) 3920 { 3921 return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d); 3922 } 3923 3924 static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a) 3925 { 3926 return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s); 3927 } 3928 3929 static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a) 3930 { 3931 return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d); 3932 } 3933 3934 static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src) 3935 { 3936 tcg_gen_xori_i32(dst, src, INT32_MIN); 3937 } 3938 3939 static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a) 3940 { 3941 return do_fop_wew(ctx, a->t, a->r, gen_fneg_f); 3942 } 3943 3944 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src) 3945 { 3946 tcg_gen_xori_i64(dst, src, INT64_MIN); 3947 } 3948 3949 static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a) 3950 { 3951 return do_fop_ded(ctx, a->t, a->r, gen_fneg_d); 3952 } 3953 3954 static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src) 3955 { 3956 tcg_gen_ori_i32(dst, src, INT32_MIN); 3957 } 3958 3959 static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a) 3960 { 3961 return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f); 3962 } 3963 3964 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src) 3965 { 3966 tcg_gen_ori_i64(dst, src, INT64_MIN); 3967 } 3968 3969 static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a) 3970 { 3971 return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d); 3972 } 3973 3974 /* 3975 * Float class 1 3976 */ 3977 3978 static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a) 3979 { 3980 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s); 3981 } 3982 3983 static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a) 3984 { 3985 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d); 3986 } 3987 3988 static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a) 3989 { 3990 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s); 3991 } 3992 3993 static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a) 3994 { 3995 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s); 3996 } 3997 3998 static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a) 3999 { 4000 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d); 4001 } 4002 4003 static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a) 4004 { 4005 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d); 4006 } 4007 4008 static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a) 4009 { 4010 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w); 4011 } 4012 4013 static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a) 4014 { 4015 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w); 4016 } 4017 4018 static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a) 4019 { 4020 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw); 4021 } 4022 4023 static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a) 4024 { 4025 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw); 4026 } 4027 4028 static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a) 4029 { 4030 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w); 4031 } 4032 4033 static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a) 4034 { 4035 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w); 4036 } 4037 4038 static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a) 4039 { 4040 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw); 4041 } 4042 4043 static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a) 4044 { 4045 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw); 4046 } 4047 4048 static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a) 4049 { 4050 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s); 4051 } 4052 4053 static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a) 4054 { 4055 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s); 4056 } 4057 4058 static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a) 4059 { 4060 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d); 4061 } 4062 4063 static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a) 4064 { 4065 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d); 4066 } 4067 4068 static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a) 4069 { 4070 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw); 4071 } 4072 4073 static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a) 4074 { 4075 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw); 4076 } 4077 4078 static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a) 4079 { 4080 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw); 4081 } 4082 4083 static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a) 4084 { 4085 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw); 4086 } 4087 4088 static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a) 4089 { 4090 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw); 4091 } 4092 4093 static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a) 4094 { 4095 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw); 4096 } 4097 4098 static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a) 4099 { 4100 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw); 4101 } 4102 4103 static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a) 4104 { 4105 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw); 4106 } 4107 4108 /* 4109 * Float class 2 4110 */ 4111 4112 static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a) 4113 { 4114 TCGv_i32 ta, tb, tc, ty; 4115 4116 nullify_over(ctx); 4117 4118 ta = load_frw0_i32(a->r1); 4119 tb = load_frw0_i32(a->r2); 4120 ty = tcg_constant_i32(a->y); 4121 tc = tcg_constant_i32(a->c); 4122 4123 gen_helper_fcmp_s(tcg_env, ta, tb, ty, tc); 4124 4125 return nullify_end(ctx); 4126 } 4127 4128 static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a) 4129 { 4130 TCGv_i64 ta, tb; 4131 TCGv_i32 tc, ty; 4132 4133 nullify_over(ctx); 4134 4135 ta = load_frd0(a->r1); 4136 tb = load_frd0(a->r2); 4137 ty = tcg_constant_i32(a->y); 4138 tc = tcg_constant_i32(a->c); 4139 4140 gen_helper_fcmp_d(tcg_env, ta, tb, ty, tc); 4141 4142 return nullify_end(ctx); 4143 } 4144 4145 static bool trans_ftest(DisasContext *ctx, arg_ftest *a) 4146 { 4147 TCGv_reg t; 4148 4149 nullify_over(ctx); 4150 4151 t = tcg_temp_new(); 4152 tcg_gen_ld32u_reg(t, tcg_env, offsetof(CPUHPPAState, fr0_shadow)); 4153 4154 if (a->y == 1) { 4155 int mask; 4156 bool inv = false; 4157 4158 switch (a->c) { 4159 case 0: /* simple */ 4160 tcg_gen_andi_reg(t, t, 0x4000000); 4161 ctx->null_cond = cond_make_0(TCG_COND_NE, t); 4162 goto done; 4163 case 2: /* rej */ 4164 inv = true; 4165 /* fallthru */ 4166 case 1: /* acc */ 4167 mask = 0x43ff800; 4168 break; 4169 case 6: /* rej8 */ 4170 inv = true; 4171 /* fallthru */ 4172 case 5: /* acc8 */ 4173 mask = 0x43f8000; 4174 break; 4175 case 9: /* acc6 */ 4176 mask = 0x43e0000; 4177 break; 4178 case 13: /* acc4 */ 4179 mask = 0x4380000; 4180 break; 4181 case 17: /* acc2 */ 4182 mask = 0x4200000; 4183 break; 4184 default: 4185 gen_illegal(ctx); 4186 return true; 4187 } 4188 if (inv) { 4189 TCGv_reg c = tcg_constant_reg(mask); 4190 tcg_gen_or_reg(t, t, c); 4191 ctx->null_cond = cond_make(TCG_COND_EQ, t, c); 4192 } else { 4193 tcg_gen_andi_reg(t, t, mask); 4194 ctx->null_cond = cond_make_0(TCG_COND_EQ, t); 4195 } 4196 } else { 4197 unsigned cbit = (a->y ^ 1) - 1; 4198 4199 tcg_gen_extract_reg(t, t, 21 - cbit, 1); 4200 ctx->null_cond = cond_make_0(TCG_COND_NE, t); 4201 } 4202 4203 done: 4204 return nullify_end(ctx); 4205 } 4206 4207 /* 4208 * Float class 2 4209 */ 4210 4211 static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a) 4212 { 4213 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s); 4214 } 4215 4216 static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a) 4217 { 4218 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d); 4219 } 4220 4221 static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a) 4222 { 4223 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s); 4224 } 4225 4226 static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a) 4227 { 4228 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d); 4229 } 4230 4231 static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a) 4232 { 4233 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s); 4234 } 4235 4236 static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a) 4237 { 4238 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d); 4239 } 4240 4241 static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a) 4242 { 4243 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s); 4244 } 4245 4246 static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a) 4247 { 4248 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d); 4249 } 4250 4251 static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a) 4252 { 4253 TCGv_i64 x, y; 4254 4255 nullify_over(ctx); 4256 4257 x = load_frw0_i64(a->r1); 4258 y = load_frw0_i64(a->r2); 4259 tcg_gen_mul_i64(x, x, y); 4260 save_frd(a->t, x); 4261 4262 return nullify_end(ctx); 4263 } 4264 4265 /* Convert the fmpyadd single-precision register encodings to standard. */ 4266 static inline int fmpyadd_s_reg(unsigned r) 4267 { 4268 return (r & 16) * 2 + 16 + (r & 15); 4269 } 4270 4271 static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub) 4272 { 4273 int tm = fmpyadd_s_reg(a->tm); 4274 int ra = fmpyadd_s_reg(a->ra); 4275 int ta = fmpyadd_s_reg(a->ta); 4276 int rm2 = fmpyadd_s_reg(a->rm2); 4277 int rm1 = fmpyadd_s_reg(a->rm1); 4278 4279 nullify_over(ctx); 4280 4281 do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s); 4282 do_fop_weww(ctx, ta, ta, ra, 4283 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s); 4284 4285 return nullify_end(ctx); 4286 } 4287 4288 static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a) 4289 { 4290 return do_fmpyadd_s(ctx, a, false); 4291 } 4292 4293 static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a) 4294 { 4295 return do_fmpyadd_s(ctx, a, true); 4296 } 4297 4298 static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub) 4299 { 4300 nullify_over(ctx); 4301 4302 do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d); 4303 do_fop_dedd(ctx, a->ta, a->ta, a->ra, 4304 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d); 4305 4306 return nullify_end(ctx); 4307 } 4308 4309 static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a) 4310 { 4311 return do_fmpyadd_d(ctx, a, false); 4312 } 4313 4314 static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a) 4315 { 4316 return do_fmpyadd_d(ctx, a, true); 4317 } 4318 4319 static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a) 4320 { 4321 TCGv_i32 x, y, z; 4322 4323 nullify_over(ctx); 4324 x = load_frw0_i32(a->rm1); 4325 y = load_frw0_i32(a->rm2); 4326 z = load_frw0_i32(a->ra3); 4327 4328 if (a->neg) { 4329 gen_helper_fmpynfadd_s(x, tcg_env, x, y, z); 4330 } else { 4331 gen_helper_fmpyfadd_s(x, tcg_env, x, y, z); 4332 } 4333 4334 save_frw_i32(a->t, x); 4335 return nullify_end(ctx); 4336 } 4337 4338 static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a) 4339 { 4340 TCGv_i64 x, y, z; 4341 4342 nullify_over(ctx); 4343 x = load_frd0(a->rm1); 4344 y = load_frd0(a->rm2); 4345 z = load_frd0(a->ra3); 4346 4347 if (a->neg) { 4348 gen_helper_fmpynfadd_d(x, tcg_env, x, y, z); 4349 } else { 4350 gen_helper_fmpyfadd_d(x, tcg_env, x, y, z); 4351 } 4352 4353 save_frd(a->t, x); 4354 return nullify_end(ctx); 4355 } 4356 4357 static bool trans_diag(DisasContext *ctx, arg_diag *a) 4358 { 4359 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR); 4360 #ifndef CONFIG_USER_ONLY 4361 if (a->i == 0x100) { 4362 /* emulate PDC BTLB, called by SeaBIOS-hppa */ 4363 nullify_over(ctx); 4364 gen_helper_diag_btlb(tcg_env); 4365 return nullify_end(ctx); 4366 } 4367 #endif 4368 qemu_log_mask(LOG_UNIMP, "DIAG opcode 0x%04x ignored\n", a->i); 4369 return true; 4370 } 4371 4372 static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) 4373 { 4374 DisasContext *ctx = container_of(dcbase, DisasContext, base); 4375 int bound; 4376 4377 ctx->cs = cs; 4378 ctx->tb_flags = ctx->base.tb->flags; 4379 ctx->is_pa20 = hppa_is_pa20(cpu_env(cs)); 4380 4381 #ifdef CONFIG_USER_ONLY 4382 ctx->privilege = MMU_IDX_TO_PRIV(MMU_USER_IDX); 4383 ctx->mmu_idx = MMU_USER_IDX; 4384 ctx->iaoq_f = ctx->base.pc_first | ctx->privilege; 4385 ctx->iaoq_b = ctx->base.tb->cs_base | ctx->privilege; 4386 ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN); 4387 #else 4388 ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3; 4389 ctx->mmu_idx = (ctx->tb_flags & PSW_D 4390 ? PRIV_P_TO_MMU_IDX(ctx->privilege, ctx->tb_flags & PSW_P) 4391 : MMU_PHYS_IDX); 4392 4393 /* Recover the IAOQ values from the GVA + PRIV. */ 4394 uint64_t cs_base = ctx->base.tb->cs_base; 4395 uint64_t iasq_f = cs_base & ~0xffffffffull; 4396 int32_t diff = cs_base; 4397 4398 ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege; 4399 ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1); 4400 #endif 4401 ctx->iaoq_n = -1; 4402 ctx->iaoq_n_var = NULL; 4403 4404 /* Bound the number of instructions by those left on the page. */ 4405 bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4; 4406 ctx->base.max_insns = MIN(ctx->base.max_insns, bound); 4407 } 4408 4409 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs) 4410 { 4411 DisasContext *ctx = container_of(dcbase, DisasContext, base); 4412 4413 /* Seed the nullification status from PSW[N], as saved in TB->FLAGS. */ 4414 ctx->null_cond = cond_make_f(); 4415 ctx->psw_n_nonzero = false; 4416 if (ctx->tb_flags & PSW_N) { 4417 ctx->null_cond.c = TCG_COND_ALWAYS; 4418 ctx->psw_n_nonzero = true; 4419 } 4420 ctx->null_lab = NULL; 4421 } 4422 4423 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs) 4424 { 4425 DisasContext *ctx = container_of(dcbase, DisasContext, base); 4426 4427 tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b); 4428 } 4429 4430 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) 4431 { 4432 DisasContext *ctx = container_of(dcbase, DisasContext, base); 4433 CPUHPPAState *env = cpu_env(cs); 4434 DisasJumpType ret; 4435 4436 /* Execute one insn. */ 4437 #ifdef CONFIG_USER_ONLY 4438 if (ctx->base.pc_next < TARGET_PAGE_SIZE) { 4439 do_page_zero(ctx); 4440 ret = ctx->base.is_jmp; 4441 assert(ret != DISAS_NEXT); 4442 } else 4443 #endif 4444 { 4445 /* Always fetch the insn, even if nullified, so that we check 4446 the page permissions for execute. */ 4447 uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next); 4448 4449 /* Set up the IA queue for the next insn. 4450 This will be overwritten by a branch. */ 4451 if (ctx->iaoq_b == -1) { 4452 ctx->iaoq_n = -1; 4453 ctx->iaoq_n_var = tcg_temp_new(); 4454 tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4); 4455 } else { 4456 ctx->iaoq_n = ctx->iaoq_b + 4; 4457 ctx->iaoq_n_var = NULL; 4458 } 4459 4460 if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) { 4461 ctx->null_cond.c = TCG_COND_NEVER; 4462 ret = DISAS_NEXT; 4463 } else { 4464 ctx->insn = insn; 4465 if (!decode(ctx, insn)) { 4466 gen_illegal(ctx); 4467 } 4468 ret = ctx->base.is_jmp; 4469 assert(ctx->null_lab == NULL); 4470 } 4471 } 4472 4473 /* Advance the insn queue. Note that this check also detects 4474 a priority change within the instruction queue. */ 4475 if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) { 4476 if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1 4477 && use_goto_tb(ctx, ctx->iaoq_b) 4478 && (ctx->null_cond.c == TCG_COND_NEVER 4479 || ctx->null_cond.c == TCG_COND_ALWAYS)) { 4480 nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS); 4481 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n); 4482 ctx->base.is_jmp = ret = DISAS_NORETURN; 4483 } else { 4484 ctx->base.is_jmp = ret = DISAS_IAQ_N_STALE; 4485 } 4486 } 4487 ctx->iaoq_f = ctx->iaoq_b; 4488 ctx->iaoq_b = ctx->iaoq_n; 4489 ctx->base.pc_next += 4; 4490 4491 switch (ret) { 4492 case DISAS_NORETURN: 4493 case DISAS_IAQ_N_UPDATED: 4494 break; 4495 4496 case DISAS_NEXT: 4497 case DISAS_IAQ_N_STALE: 4498 case DISAS_IAQ_N_STALE_EXIT: 4499 if (ctx->iaoq_f == -1) { 4500 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, cpu_iaoq_b); 4501 copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var); 4502 #ifndef CONFIG_USER_ONLY 4503 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b); 4504 #endif 4505 nullify_save(ctx); 4506 ctx->base.is_jmp = (ret == DISAS_IAQ_N_STALE_EXIT 4507 ? DISAS_EXIT 4508 : DISAS_IAQ_N_UPDATED); 4509 } else if (ctx->iaoq_b == -1) { 4510 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, ctx->iaoq_n_var); 4511 } 4512 break; 4513 4514 default: 4515 g_assert_not_reached(); 4516 } 4517 } 4518 4519 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) 4520 { 4521 DisasContext *ctx = container_of(dcbase, DisasContext, base); 4522 DisasJumpType is_jmp = ctx->base.is_jmp; 4523 4524 switch (is_jmp) { 4525 case DISAS_NORETURN: 4526 break; 4527 case DISAS_TOO_MANY: 4528 case DISAS_IAQ_N_STALE: 4529 case DISAS_IAQ_N_STALE_EXIT: 4530 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f); 4531 copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b); 4532 nullify_save(ctx); 4533 /* FALLTHRU */ 4534 case DISAS_IAQ_N_UPDATED: 4535 if (is_jmp != DISAS_IAQ_N_STALE_EXIT) { 4536 tcg_gen_lookup_and_goto_ptr(); 4537 break; 4538 } 4539 /* FALLTHRU */ 4540 case DISAS_EXIT: 4541 tcg_gen_exit_tb(NULL, 0); 4542 break; 4543 default: 4544 g_assert_not_reached(); 4545 } 4546 } 4547 4548 static void hppa_tr_disas_log(const DisasContextBase *dcbase, 4549 CPUState *cs, FILE *logfile) 4550 { 4551 target_ulong pc = dcbase->pc_first; 4552 4553 #ifdef CONFIG_USER_ONLY 4554 switch (pc) { 4555 case 0x00: 4556 fprintf(logfile, "IN:\n0x00000000: (null)\n"); 4557 return; 4558 case 0xb0: 4559 fprintf(logfile, "IN:\n0x000000b0: light-weight-syscall\n"); 4560 return; 4561 case 0xe0: 4562 fprintf(logfile, "IN:\n0x000000e0: set-thread-pointer-syscall\n"); 4563 return; 4564 case 0x100: 4565 fprintf(logfile, "IN:\n0x00000100: syscall\n"); 4566 return; 4567 } 4568 #endif 4569 4570 fprintf(logfile, "IN: %s\n", lookup_symbol(pc)); 4571 target_disas(logfile, cs, pc, dcbase->tb->size); 4572 } 4573 4574 static const TranslatorOps hppa_tr_ops = { 4575 .init_disas_context = hppa_tr_init_disas_context, 4576 .tb_start = hppa_tr_tb_start, 4577 .insn_start = hppa_tr_insn_start, 4578 .translate_insn = hppa_tr_translate_insn, 4579 .tb_stop = hppa_tr_tb_stop, 4580 .disas_log = hppa_tr_disas_log, 4581 }; 4582 4583 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, 4584 target_ulong pc, void *host_pc) 4585 { 4586 DisasContext ctx; 4587 translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base); 4588 } 4589