1 /* 2 SPARC translation 3 4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at> 5 Copyright (C) 2003-2005 Fabrice Bellard 6 7 This library is free software; you can redistribute it and/or 8 modify it under the terms of the GNU Lesser General Public 9 License as published by the Free Software Foundation; either 10 version 2.1 of the License, or (at your option) any later version. 11 12 This library is distributed in the hope that it will be useful, 13 but WITHOUT ANY WARRANTY; without even the implied warranty of 14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 Lesser General Public License for more details. 16 17 You should have received a copy of the GNU Lesser General Public 18 License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #include "qemu/osdep.h" 22 23 #include "cpu.h" 24 #include "disas/disas.h" 25 #include "exec/helper-proto.h" 26 #include "exec/exec-all.h" 27 #include "tcg/tcg-op.h" 28 #include "tcg/tcg-op-gvec.h" 29 #include "exec/helper-gen.h" 30 #include "exec/translator.h" 31 #include "exec/log.h" 32 #include "asi.h" 33 34 #define HELPER_H "helper.h" 35 #include "exec/helper-info.c.inc" 36 #undef HELPER_H 37 38 #ifdef TARGET_SPARC64 39 # define gen_helper_rdpsr(D, E) qemu_build_not_reached() 40 # define gen_helper_rett(E) qemu_build_not_reached() 41 # define gen_helper_power_down(E) qemu_build_not_reached() 42 # define gen_helper_wrpsr(E, S) qemu_build_not_reached() 43 #else 44 # define gen_helper_clear_softint(E, S) qemu_build_not_reached() 45 # define gen_helper_done(E) qemu_build_not_reached() 46 # define gen_helper_flushw(E) qemu_build_not_reached() 47 # define gen_helper_rdccr(D, E) qemu_build_not_reached() 48 # define gen_helper_rdcwp(D, E) qemu_build_not_reached() 49 # define gen_helper_restored(E) qemu_build_not_reached() 50 # define gen_helper_retry(E) qemu_build_not_reached() 51 # define gen_helper_saved(E) qemu_build_not_reached() 52 # define gen_helper_set_softint(E, S) qemu_build_not_reached() 53 # define gen_helper_tick_get_count(D, E, T, C) qemu_build_not_reached() 54 # define gen_helper_tick_set_count(P, S) qemu_build_not_reached() 55 # define gen_helper_tick_set_limit(P, S) qemu_build_not_reached() 56 # define gen_helper_wrccr(E, S) qemu_build_not_reached() 57 # define gen_helper_wrcwp(E, S) qemu_build_not_reached() 58 # define gen_helper_wrgl(E, S) qemu_build_not_reached() 59 # define gen_helper_write_softint(E, S) qemu_build_not_reached() 60 # define gen_helper_wrpil(E, S) qemu_build_not_reached() 61 # define gen_helper_wrpstate(E, S) qemu_build_not_reached() 62 # define gen_helper_fcmpeq16 ({ qemu_build_not_reached(); NULL; }) 63 # define gen_helper_fcmpeq32 ({ qemu_build_not_reached(); NULL; }) 64 # define gen_helper_fcmpgt16 ({ qemu_build_not_reached(); NULL; }) 65 # define gen_helper_fcmpgt32 ({ qemu_build_not_reached(); NULL; }) 66 # define gen_helper_fcmple16 ({ qemu_build_not_reached(); NULL; }) 67 # define gen_helper_fcmple32 ({ qemu_build_not_reached(); NULL; }) 68 # define gen_helper_fcmpne16 ({ qemu_build_not_reached(); NULL; }) 69 # define gen_helper_fcmpne32 ({ qemu_build_not_reached(); NULL; }) 70 # define gen_helper_fdtox ({ qemu_build_not_reached(); NULL; }) 71 # define gen_helper_fexpand ({ qemu_build_not_reached(); NULL; }) 72 # define gen_helper_fmul8sux16 ({ qemu_build_not_reached(); NULL; }) 73 # define gen_helper_fmul8ulx16 ({ qemu_build_not_reached(); NULL; }) 74 # define gen_helper_fmul8x16al ({ qemu_build_not_reached(); NULL; }) 75 # define gen_helper_fmul8x16au ({ qemu_build_not_reached(); NULL; }) 76 # define gen_helper_fmul8x16 ({ qemu_build_not_reached(); NULL; }) 77 # define gen_helper_fmuld8sux16 ({ qemu_build_not_reached(); NULL; }) 78 # define gen_helper_fmuld8ulx16 ({ qemu_build_not_reached(); NULL; }) 79 # define gen_helper_fpmerge ({ qemu_build_not_reached(); NULL; }) 80 # define gen_helper_fqtox ({ qemu_build_not_reached(); NULL; }) 81 # define gen_helper_fstox ({ qemu_build_not_reached(); NULL; }) 82 # define gen_helper_fxtod ({ qemu_build_not_reached(); NULL; }) 83 # define gen_helper_fxtoq ({ qemu_build_not_reached(); NULL; }) 84 # define gen_helper_fxtos ({ qemu_build_not_reached(); NULL; }) 85 # define gen_helper_pdist ({ qemu_build_not_reached(); NULL; }) 86 # define MAXTL_MASK 0 87 #endif 88 89 /* Dynamic PC, must exit to main loop. */ 90 #define DYNAMIC_PC 1 91 /* Dynamic PC, one of two values according to jump_pc[T2]. */ 92 #define JUMP_PC 2 93 /* Dynamic PC, may lookup next TB. */ 94 #define DYNAMIC_PC_LOOKUP 3 95 96 #define DISAS_EXIT DISAS_TARGET_0 97 98 /* global register indexes */ 99 static TCGv_ptr cpu_regwptr; 100 static TCGv cpu_pc, cpu_npc; 101 static TCGv cpu_regs[32]; 102 static TCGv cpu_y; 103 static TCGv cpu_tbr; 104 static TCGv cpu_cond; 105 static TCGv cpu_cc_N; 106 static TCGv cpu_cc_V; 107 static TCGv cpu_icc_Z; 108 static TCGv cpu_icc_C; 109 #ifdef TARGET_SPARC64 110 static TCGv cpu_xcc_Z; 111 static TCGv cpu_xcc_C; 112 static TCGv_i32 cpu_fprs; 113 static TCGv cpu_gsr; 114 #else 115 # define cpu_fprs ({ qemu_build_not_reached(); (TCGv)NULL; }) 116 # define cpu_gsr ({ qemu_build_not_reached(); (TCGv)NULL; }) 117 #endif 118 119 #ifdef TARGET_SPARC64 120 #define cpu_cc_Z cpu_xcc_Z 121 #define cpu_cc_C cpu_xcc_C 122 #else 123 #define cpu_cc_Z cpu_icc_Z 124 #define cpu_cc_C cpu_icc_C 125 #define cpu_xcc_Z ({ qemu_build_not_reached(); NULL; }) 126 #define cpu_xcc_C ({ qemu_build_not_reached(); NULL; }) 127 #endif 128 129 /* Floating point registers */ 130 static TCGv_i64 cpu_fpr[TARGET_DPREGS]; 131 static TCGv_i32 cpu_fcc[TARGET_FCCREGS]; 132 133 #define env_field_offsetof(X) offsetof(CPUSPARCState, X) 134 #ifdef TARGET_SPARC64 135 # define env32_field_offsetof(X) ({ qemu_build_not_reached(); 0; }) 136 # define env64_field_offsetof(X) env_field_offsetof(X) 137 #else 138 # define env32_field_offsetof(X) env_field_offsetof(X) 139 # define env64_field_offsetof(X) ({ qemu_build_not_reached(); 0; }) 140 #endif 141 142 typedef struct DisasCompare { 143 TCGCond cond; 144 TCGv c1; 145 int c2; 146 } DisasCompare; 147 148 typedef struct DisasDelayException { 149 struct DisasDelayException *next; 150 TCGLabel *lab; 151 TCGv_i32 excp; 152 /* Saved state at parent insn. */ 153 target_ulong pc; 154 target_ulong npc; 155 } DisasDelayException; 156 157 typedef struct DisasContext { 158 DisasContextBase base; 159 target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */ 160 target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */ 161 162 /* Used when JUMP_PC value is used. */ 163 DisasCompare jump; 164 target_ulong jump_pc[2]; 165 166 int mem_idx; 167 bool cpu_cond_live; 168 bool fpu_enabled; 169 bool address_mask_32bit; 170 #ifndef CONFIG_USER_ONLY 171 bool supervisor; 172 #ifdef TARGET_SPARC64 173 bool hypervisor; 174 #endif 175 #endif 176 177 sparc_def_t *def; 178 #ifdef TARGET_SPARC64 179 int fprs_dirty; 180 int asi; 181 #endif 182 DisasDelayException *delay_excp_list; 183 } DisasContext; 184 185 // This function uses non-native bit order 186 #define GET_FIELD(X, FROM, TO) \ 187 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1)) 188 189 // This function uses the order in the manuals, i.e. bit 0 is 2^0 190 #define GET_FIELD_SP(X, FROM, TO) \ 191 GET_FIELD(X, 31 - (TO), 31 - (FROM)) 192 193 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1) 194 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1)) 195 196 #ifdef TARGET_SPARC64 197 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e)) 198 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c)) 199 #else 200 #define DFPREG(r) (r & 0x1e) 201 #define QFPREG(r) (r & 0x1c) 202 #endif 203 204 #define UA2005_HTRAP_MASK 0xff 205 #define V8_TRAP_MASK 0x7f 206 207 #define IS_IMM (insn & (1<<13)) 208 209 static void gen_update_fprs_dirty(DisasContext *dc, int rd) 210 { 211 #if defined(TARGET_SPARC64) 212 int bit = (rd < 32) ? 1 : 2; 213 /* If we know we've already set this bit within the TB, 214 we can avoid setting it again. */ 215 if (!(dc->fprs_dirty & bit)) { 216 dc->fprs_dirty |= bit; 217 tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit); 218 } 219 #endif 220 } 221 222 /* floating point registers moves */ 223 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src) 224 { 225 TCGv_i32 ret = tcg_temp_new_i32(); 226 if (src & 1) { 227 tcg_gen_extrl_i64_i32(ret, cpu_fpr[src / 2]); 228 } else { 229 tcg_gen_extrh_i64_i32(ret, cpu_fpr[src / 2]); 230 } 231 return ret; 232 } 233 234 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v) 235 { 236 TCGv_i64 t = tcg_temp_new_i64(); 237 238 tcg_gen_extu_i32_i64(t, v); 239 tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t, 240 (dst & 1 ? 0 : 32), 32); 241 gen_update_fprs_dirty(dc, dst); 242 } 243 244 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src) 245 { 246 src = DFPREG(src); 247 return cpu_fpr[src / 2]; 248 } 249 250 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v) 251 { 252 dst = DFPREG(dst); 253 tcg_gen_mov_i64(cpu_fpr[dst / 2], v); 254 gen_update_fprs_dirty(dc, dst); 255 } 256 257 static TCGv_i64 gen_dest_fpr_D(DisasContext *dc, unsigned int dst) 258 { 259 return cpu_fpr[DFPREG(dst) / 2]; 260 } 261 262 static TCGv_i128 gen_load_fpr_Q(DisasContext *dc, unsigned int src) 263 { 264 TCGv_i128 ret = tcg_temp_new_i128(); 265 266 src = QFPREG(src); 267 tcg_gen_concat_i64_i128(ret, cpu_fpr[src / 2 + 1], cpu_fpr[src / 2]); 268 return ret; 269 } 270 271 static void gen_store_fpr_Q(DisasContext *dc, unsigned int dst, TCGv_i128 v) 272 { 273 dst = DFPREG(dst); 274 tcg_gen_extr_i128_i64(cpu_fpr[dst / 2 + 1], cpu_fpr[dst / 2], v); 275 gen_update_fprs_dirty(dc, dst); 276 } 277 278 /* moves */ 279 #ifdef CONFIG_USER_ONLY 280 #define supervisor(dc) 0 281 #define hypervisor(dc) 0 282 #else 283 #ifdef TARGET_SPARC64 284 #define hypervisor(dc) (dc->hypervisor) 285 #define supervisor(dc) (dc->supervisor | dc->hypervisor) 286 #else 287 #define supervisor(dc) (dc->supervisor) 288 #define hypervisor(dc) 0 289 #endif 290 #endif 291 292 #if !defined(TARGET_SPARC64) 293 # define AM_CHECK(dc) false 294 #elif defined(TARGET_ABI32) 295 # define AM_CHECK(dc) true 296 #elif defined(CONFIG_USER_ONLY) 297 # define AM_CHECK(dc) false 298 #else 299 # define AM_CHECK(dc) ((dc)->address_mask_32bit) 300 #endif 301 302 static void gen_address_mask(DisasContext *dc, TCGv addr) 303 { 304 if (AM_CHECK(dc)) { 305 tcg_gen_andi_tl(addr, addr, 0xffffffffULL); 306 } 307 } 308 309 static target_ulong address_mask_i(DisasContext *dc, target_ulong addr) 310 { 311 return AM_CHECK(dc) ? (uint32_t)addr : addr; 312 } 313 314 static TCGv gen_load_gpr(DisasContext *dc, int reg) 315 { 316 if (reg > 0) { 317 assert(reg < 32); 318 return cpu_regs[reg]; 319 } else { 320 TCGv t = tcg_temp_new(); 321 tcg_gen_movi_tl(t, 0); 322 return t; 323 } 324 } 325 326 static void gen_store_gpr(DisasContext *dc, int reg, TCGv v) 327 { 328 if (reg > 0) { 329 assert(reg < 32); 330 tcg_gen_mov_tl(cpu_regs[reg], v); 331 } 332 } 333 334 static TCGv gen_dest_gpr(DisasContext *dc, int reg) 335 { 336 if (reg > 0) { 337 assert(reg < 32); 338 return cpu_regs[reg]; 339 } else { 340 return tcg_temp_new(); 341 } 342 } 343 344 static bool use_goto_tb(DisasContext *s, target_ulong pc, target_ulong npc) 345 { 346 return translator_use_goto_tb(&s->base, pc) && 347 translator_use_goto_tb(&s->base, npc); 348 } 349 350 static void gen_goto_tb(DisasContext *s, int tb_num, 351 target_ulong pc, target_ulong npc) 352 { 353 if (use_goto_tb(s, pc, npc)) { 354 /* jump to same page: we can use a direct jump */ 355 tcg_gen_goto_tb(tb_num); 356 tcg_gen_movi_tl(cpu_pc, pc); 357 tcg_gen_movi_tl(cpu_npc, npc); 358 tcg_gen_exit_tb(s->base.tb, tb_num); 359 } else { 360 /* jump to another page: we can use an indirect jump */ 361 tcg_gen_movi_tl(cpu_pc, pc); 362 tcg_gen_movi_tl(cpu_npc, npc); 363 tcg_gen_lookup_and_goto_ptr(); 364 } 365 } 366 367 static TCGv gen_carry32(void) 368 { 369 if (TARGET_LONG_BITS == 64) { 370 TCGv t = tcg_temp_new(); 371 tcg_gen_extract_tl(t, cpu_icc_C, 32, 1); 372 return t; 373 } 374 return cpu_icc_C; 375 } 376 377 static void gen_op_addcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin) 378 { 379 TCGv z = tcg_constant_tl(0); 380 381 if (cin) { 382 tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z); 383 tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z); 384 } else { 385 tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z); 386 } 387 tcg_gen_xor_tl(cpu_cc_Z, src1, src2); 388 tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src2); 389 tcg_gen_andc_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z); 390 if (TARGET_LONG_BITS == 64) { 391 /* 392 * Carry-in to bit 32 is result ^ src1 ^ src2. 393 * We already have the src xor term in Z, from computation of V. 394 */ 395 tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N); 396 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N); 397 } 398 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N); 399 tcg_gen_mov_tl(dst, cpu_cc_N); 400 } 401 402 static void gen_op_addcc(TCGv dst, TCGv src1, TCGv src2) 403 { 404 gen_op_addcc_int(dst, src1, src2, NULL); 405 } 406 407 static void gen_op_taddcc(TCGv dst, TCGv src1, TCGv src2) 408 { 409 TCGv t = tcg_temp_new(); 410 411 /* Save the tag bits around modification of dst. */ 412 tcg_gen_or_tl(t, src1, src2); 413 414 gen_op_addcc(dst, src1, src2); 415 416 /* Incorprate tag bits into icc.V */ 417 tcg_gen_andi_tl(t, t, 3); 418 tcg_gen_neg_tl(t, t); 419 tcg_gen_ext32u_tl(t, t); 420 tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t); 421 } 422 423 static void gen_op_addc(TCGv dst, TCGv src1, TCGv src2) 424 { 425 tcg_gen_add_tl(dst, src1, src2); 426 tcg_gen_add_tl(dst, dst, gen_carry32()); 427 } 428 429 static void gen_op_addccc(TCGv dst, TCGv src1, TCGv src2) 430 { 431 gen_op_addcc_int(dst, src1, src2, gen_carry32()); 432 } 433 434 static void gen_op_subcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin) 435 { 436 TCGv z = tcg_constant_tl(0); 437 438 if (cin) { 439 tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z); 440 tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z); 441 } else { 442 tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z); 443 } 444 tcg_gen_neg_tl(cpu_cc_C, cpu_cc_C); 445 tcg_gen_xor_tl(cpu_cc_Z, src1, src2); 446 tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src1); 447 tcg_gen_and_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z); 448 #ifdef TARGET_SPARC64 449 tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N); 450 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N); 451 #endif 452 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N); 453 tcg_gen_mov_tl(dst, cpu_cc_N); 454 } 455 456 static void gen_op_subcc(TCGv dst, TCGv src1, TCGv src2) 457 { 458 gen_op_subcc_int(dst, src1, src2, NULL); 459 } 460 461 static void gen_op_tsubcc(TCGv dst, TCGv src1, TCGv src2) 462 { 463 TCGv t = tcg_temp_new(); 464 465 /* Save the tag bits around modification of dst. */ 466 tcg_gen_or_tl(t, src1, src2); 467 468 gen_op_subcc(dst, src1, src2); 469 470 /* Incorprate tag bits into icc.V */ 471 tcg_gen_andi_tl(t, t, 3); 472 tcg_gen_neg_tl(t, t); 473 tcg_gen_ext32u_tl(t, t); 474 tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t); 475 } 476 477 static void gen_op_subc(TCGv dst, TCGv src1, TCGv src2) 478 { 479 tcg_gen_sub_tl(dst, src1, src2); 480 tcg_gen_sub_tl(dst, dst, gen_carry32()); 481 } 482 483 static void gen_op_subccc(TCGv dst, TCGv src1, TCGv src2) 484 { 485 gen_op_subcc_int(dst, src1, src2, gen_carry32()); 486 } 487 488 static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2) 489 { 490 TCGv zero = tcg_constant_tl(0); 491 TCGv one = tcg_constant_tl(1); 492 TCGv t_src1 = tcg_temp_new(); 493 TCGv t_src2 = tcg_temp_new(); 494 TCGv t0 = tcg_temp_new(); 495 496 tcg_gen_ext32u_tl(t_src1, src1); 497 tcg_gen_ext32u_tl(t_src2, src2); 498 499 /* 500 * if (!(env->y & 1)) 501 * src2 = 0; 502 */ 503 tcg_gen_movcond_tl(TCG_COND_TSTEQ, t_src2, cpu_y, one, zero, t_src2); 504 505 /* 506 * b2 = src1 & 1; 507 * y = (b2 << 31) | (y >> 1); 508 */ 509 tcg_gen_extract_tl(t0, cpu_y, 1, 31); 510 tcg_gen_deposit_tl(cpu_y, t0, src1, 31, 1); 511 512 // b1 = N ^ V; 513 tcg_gen_xor_tl(t0, cpu_cc_N, cpu_cc_V); 514 515 /* 516 * src1 = (b1 << 31) | (src1 >> 1) 517 */ 518 tcg_gen_andi_tl(t0, t0, 1u << 31); 519 tcg_gen_shri_tl(t_src1, t_src1, 1); 520 tcg_gen_or_tl(t_src1, t_src1, t0); 521 522 gen_op_addcc(dst, t_src1, t_src2); 523 } 524 525 static void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext) 526 { 527 #if TARGET_LONG_BITS == 32 528 if (sign_ext) { 529 tcg_gen_muls2_tl(dst, cpu_y, src1, src2); 530 } else { 531 tcg_gen_mulu2_tl(dst, cpu_y, src1, src2); 532 } 533 #else 534 TCGv t0 = tcg_temp_new_i64(); 535 TCGv t1 = tcg_temp_new_i64(); 536 537 if (sign_ext) { 538 tcg_gen_ext32s_i64(t0, src1); 539 tcg_gen_ext32s_i64(t1, src2); 540 } else { 541 tcg_gen_ext32u_i64(t0, src1); 542 tcg_gen_ext32u_i64(t1, src2); 543 } 544 545 tcg_gen_mul_i64(dst, t0, t1); 546 tcg_gen_shri_i64(cpu_y, dst, 32); 547 #endif 548 } 549 550 static void gen_op_umul(TCGv dst, TCGv src1, TCGv src2) 551 { 552 /* zero-extend truncated operands before multiplication */ 553 gen_op_multiply(dst, src1, src2, 0); 554 } 555 556 static void gen_op_smul(TCGv dst, TCGv src1, TCGv src2) 557 { 558 /* sign-extend truncated operands before multiplication */ 559 gen_op_multiply(dst, src1, src2, 1); 560 } 561 562 static void gen_op_sdiv(TCGv dst, TCGv src1, TCGv src2) 563 { 564 #ifdef TARGET_SPARC64 565 gen_helper_sdiv(dst, tcg_env, src1, src2); 566 tcg_gen_ext32s_tl(dst, dst); 567 #else 568 TCGv_i64 t64 = tcg_temp_new_i64(); 569 gen_helper_sdiv(t64, tcg_env, src1, src2); 570 tcg_gen_trunc_i64_tl(dst, t64); 571 #endif 572 } 573 574 static void gen_op_udivcc(TCGv dst, TCGv src1, TCGv src2) 575 { 576 TCGv_i64 t64; 577 578 #ifdef TARGET_SPARC64 579 t64 = cpu_cc_V; 580 #else 581 t64 = tcg_temp_new_i64(); 582 #endif 583 584 gen_helper_udiv(t64, tcg_env, src1, src2); 585 586 #ifdef TARGET_SPARC64 587 tcg_gen_ext32u_tl(cpu_cc_N, t64); 588 tcg_gen_shri_tl(cpu_cc_V, t64, 32); 589 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N); 590 tcg_gen_movi_tl(cpu_icc_C, 0); 591 #else 592 tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64); 593 #endif 594 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N); 595 tcg_gen_movi_tl(cpu_cc_C, 0); 596 tcg_gen_mov_tl(dst, cpu_cc_N); 597 } 598 599 static void gen_op_sdivcc(TCGv dst, TCGv src1, TCGv src2) 600 { 601 TCGv_i64 t64; 602 603 #ifdef TARGET_SPARC64 604 t64 = cpu_cc_V; 605 #else 606 t64 = tcg_temp_new_i64(); 607 #endif 608 609 gen_helper_sdiv(t64, tcg_env, src1, src2); 610 611 #ifdef TARGET_SPARC64 612 tcg_gen_ext32s_tl(cpu_cc_N, t64); 613 tcg_gen_shri_tl(cpu_cc_V, t64, 32); 614 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N); 615 tcg_gen_movi_tl(cpu_icc_C, 0); 616 #else 617 tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64); 618 #endif 619 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N); 620 tcg_gen_movi_tl(cpu_cc_C, 0); 621 tcg_gen_mov_tl(dst, cpu_cc_N); 622 } 623 624 static void gen_op_taddcctv(TCGv dst, TCGv src1, TCGv src2) 625 { 626 gen_helper_taddcctv(dst, tcg_env, src1, src2); 627 } 628 629 static void gen_op_tsubcctv(TCGv dst, TCGv src1, TCGv src2) 630 { 631 gen_helper_tsubcctv(dst, tcg_env, src1, src2); 632 } 633 634 static void gen_op_popc(TCGv dst, TCGv src1, TCGv src2) 635 { 636 tcg_gen_ctpop_tl(dst, src2); 637 } 638 639 #ifndef TARGET_SPARC64 640 static void gen_helper_array8(TCGv dst, TCGv src1, TCGv src2) 641 { 642 g_assert_not_reached(); 643 } 644 #endif 645 646 static void gen_op_array16(TCGv dst, TCGv src1, TCGv src2) 647 { 648 gen_helper_array8(dst, src1, src2); 649 tcg_gen_shli_tl(dst, dst, 1); 650 } 651 652 static void gen_op_array32(TCGv dst, TCGv src1, TCGv src2) 653 { 654 gen_helper_array8(dst, src1, src2); 655 tcg_gen_shli_tl(dst, dst, 2); 656 } 657 658 static void gen_op_fpack16(TCGv_i32 dst, TCGv_i64 src) 659 { 660 #ifdef TARGET_SPARC64 661 gen_helper_fpack16(dst, cpu_gsr, src); 662 #else 663 g_assert_not_reached(); 664 #endif 665 } 666 667 static void gen_op_fpackfix(TCGv_i32 dst, TCGv_i64 src) 668 { 669 #ifdef TARGET_SPARC64 670 gen_helper_fpackfix(dst, cpu_gsr, src); 671 #else 672 g_assert_not_reached(); 673 #endif 674 } 675 676 static void gen_op_fpack32(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2) 677 { 678 #ifdef TARGET_SPARC64 679 gen_helper_fpack32(dst, cpu_gsr, src1, src2); 680 #else 681 g_assert_not_reached(); 682 #endif 683 } 684 685 static void gen_op_faligndata(TCGv_i64 dst, TCGv_i64 s1, TCGv_i64 s2) 686 { 687 #ifdef TARGET_SPARC64 688 TCGv t1, t2, shift; 689 690 t1 = tcg_temp_new(); 691 t2 = tcg_temp_new(); 692 shift = tcg_temp_new(); 693 694 tcg_gen_andi_tl(shift, cpu_gsr, 7); 695 tcg_gen_shli_tl(shift, shift, 3); 696 tcg_gen_shl_tl(t1, s1, shift); 697 698 /* 699 * A shift of 64 does not produce 0 in TCG. Divide this into a 700 * shift of (up to 63) followed by a constant shift of 1. 701 */ 702 tcg_gen_xori_tl(shift, shift, 63); 703 tcg_gen_shr_tl(t2, s2, shift); 704 tcg_gen_shri_tl(t2, t2, 1); 705 706 tcg_gen_or_tl(dst, t1, t2); 707 #else 708 g_assert_not_reached(); 709 #endif 710 } 711 712 static void gen_op_bshuffle(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2) 713 { 714 #ifdef TARGET_SPARC64 715 gen_helper_bshuffle(dst, cpu_gsr, src1, src2); 716 #else 717 g_assert_not_reached(); 718 #endif 719 } 720 721 static void finishing_insn(DisasContext *dc) 722 { 723 /* 724 * From here, there is no future path through an unwinding exception. 725 * If the current insn cannot raise an exception, the computation of 726 * cpu_cond may be able to be elided. 727 */ 728 if (dc->cpu_cond_live) { 729 tcg_gen_discard_tl(cpu_cond); 730 dc->cpu_cond_live = false; 731 } 732 } 733 734 static void gen_generic_branch(DisasContext *dc) 735 { 736 TCGv npc0 = tcg_constant_tl(dc->jump_pc[0]); 737 TCGv npc1 = tcg_constant_tl(dc->jump_pc[1]); 738 TCGv c2 = tcg_constant_tl(dc->jump.c2); 739 740 tcg_gen_movcond_tl(dc->jump.cond, cpu_npc, dc->jump.c1, c2, npc0, npc1); 741 } 742 743 /* call this function before using the condition register as it may 744 have been set for a jump */ 745 static void flush_cond(DisasContext *dc) 746 { 747 if (dc->npc == JUMP_PC) { 748 gen_generic_branch(dc); 749 dc->npc = DYNAMIC_PC_LOOKUP; 750 } 751 } 752 753 static void save_npc(DisasContext *dc) 754 { 755 if (dc->npc & 3) { 756 switch (dc->npc) { 757 case JUMP_PC: 758 gen_generic_branch(dc); 759 dc->npc = DYNAMIC_PC_LOOKUP; 760 break; 761 case DYNAMIC_PC: 762 case DYNAMIC_PC_LOOKUP: 763 break; 764 default: 765 g_assert_not_reached(); 766 } 767 } else { 768 tcg_gen_movi_tl(cpu_npc, dc->npc); 769 } 770 } 771 772 static void save_state(DisasContext *dc) 773 { 774 tcg_gen_movi_tl(cpu_pc, dc->pc); 775 save_npc(dc); 776 } 777 778 static void gen_exception(DisasContext *dc, int which) 779 { 780 finishing_insn(dc); 781 save_state(dc); 782 gen_helper_raise_exception(tcg_env, tcg_constant_i32(which)); 783 dc->base.is_jmp = DISAS_NORETURN; 784 } 785 786 static TCGLabel *delay_exceptionv(DisasContext *dc, TCGv_i32 excp) 787 { 788 DisasDelayException *e = g_new0(DisasDelayException, 1); 789 790 e->next = dc->delay_excp_list; 791 dc->delay_excp_list = e; 792 793 e->lab = gen_new_label(); 794 e->excp = excp; 795 e->pc = dc->pc; 796 /* Caller must have used flush_cond before branch. */ 797 assert(e->npc != JUMP_PC); 798 e->npc = dc->npc; 799 800 return e->lab; 801 } 802 803 static TCGLabel *delay_exception(DisasContext *dc, int excp) 804 { 805 return delay_exceptionv(dc, tcg_constant_i32(excp)); 806 } 807 808 static void gen_check_align(DisasContext *dc, TCGv addr, int mask) 809 { 810 TCGv t = tcg_temp_new(); 811 TCGLabel *lab; 812 813 tcg_gen_andi_tl(t, addr, mask); 814 815 flush_cond(dc); 816 lab = delay_exception(dc, TT_UNALIGNED); 817 tcg_gen_brcondi_tl(TCG_COND_NE, t, 0, lab); 818 } 819 820 static void gen_mov_pc_npc(DisasContext *dc) 821 { 822 finishing_insn(dc); 823 824 if (dc->npc & 3) { 825 switch (dc->npc) { 826 case JUMP_PC: 827 gen_generic_branch(dc); 828 tcg_gen_mov_tl(cpu_pc, cpu_npc); 829 dc->pc = DYNAMIC_PC_LOOKUP; 830 break; 831 case DYNAMIC_PC: 832 case DYNAMIC_PC_LOOKUP: 833 tcg_gen_mov_tl(cpu_pc, cpu_npc); 834 dc->pc = dc->npc; 835 break; 836 default: 837 g_assert_not_reached(); 838 } 839 } else { 840 dc->pc = dc->npc; 841 } 842 } 843 844 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond, 845 DisasContext *dc) 846 { 847 TCGv t1; 848 849 cmp->c1 = t1 = tcg_temp_new(); 850 cmp->c2 = 0; 851 852 switch (cond & 7) { 853 case 0x0: /* never */ 854 cmp->cond = TCG_COND_NEVER; 855 cmp->c1 = tcg_constant_tl(0); 856 break; 857 858 case 0x1: /* eq: Z */ 859 cmp->cond = TCG_COND_EQ; 860 if (TARGET_LONG_BITS == 32 || xcc) { 861 tcg_gen_mov_tl(t1, cpu_cc_Z); 862 } else { 863 tcg_gen_ext32u_tl(t1, cpu_icc_Z); 864 } 865 break; 866 867 case 0x2: /* le: Z | (N ^ V) */ 868 /* 869 * Simplify: 870 * cc_Z || (N ^ V) < 0 NE 871 * cc_Z && !((N ^ V) < 0) EQ 872 * cc_Z & ~((N ^ V) >> TLB) EQ 873 */ 874 cmp->cond = TCG_COND_EQ; 875 tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V); 876 tcg_gen_sextract_tl(t1, t1, xcc ? 63 : 31, 1); 877 tcg_gen_andc_tl(t1, xcc ? cpu_cc_Z : cpu_icc_Z, t1); 878 if (TARGET_LONG_BITS == 64 && !xcc) { 879 tcg_gen_ext32u_tl(t1, t1); 880 } 881 break; 882 883 case 0x3: /* lt: N ^ V */ 884 cmp->cond = TCG_COND_LT; 885 tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V); 886 if (TARGET_LONG_BITS == 64 && !xcc) { 887 tcg_gen_ext32s_tl(t1, t1); 888 } 889 break; 890 891 case 0x4: /* leu: Z | C */ 892 /* 893 * Simplify: 894 * cc_Z == 0 || cc_C != 0 NE 895 * cc_Z != 0 && cc_C == 0 EQ 896 * cc_Z & (cc_C ? 0 : -1) EQ 897 * cc_Z & (cc_C - 1) EQ 898 */ 899 cmp->cond = TCG_COND_EQ; 900 if (TARGET_LONG_BITS == 32 || xcc) { 901 tcg_gen_subi_tl(t1, cpu_cc_C, 1); 902 tcg_gen_and_tl(t1, t1, cpu_cc_Z); 903 } else { 904 tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1); 905 tcg_gen_subi_tl(t1, t1, 1); 906 tcg_gen_and_tl(t1, t1, cpu_icc_Z); 907 tcg_gen_ext32u_tl(t1, t1); 908 } 909 break; 910 911 case 0x5: /* ltu: C */ 912 cmp->cond = TCG_COND_NE; 913 if (TARGET_LONG_BITS == 32 || xcc) { 914 tcg_gen_mov_tl(t1, cpu_cc_C); 915 } else { 916 tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1); 917 } 918 break; 919 920 case 0x6: /* neg: N */ 921 cmp->cond = TCG_COND_LT; 922 if (TARGET_LONG_BITS == 32 || xcc) { 923 tcg_gen_mov_tl(t1, cpu_cc_N); 924 } else { 925 tcg_gen_ext32s_tl(t1, cpu_cc_N); 926 } 927 break; 928 929 case 0x7: /* vs: V */ 930 cmp->cond = TCG_COND_LT; 931 if (TARGET_LONG_BITS == 32 || xcc) { 932 tcg_gen_mov_tl(t1, cpu_cc_V); 933 } else { 934 tcg_gen_ext32s_tl(t1, cpu_cc_V); 935 } 936 break; 937 } 938 if (cond & 8) { 939 cmp->cond = tcg_invert_cond(cmp->cond); 940 } 941 } 942 943 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond) 944 { 945 TCGv_i32 fcc = cpu_fcc[cc]; 946 TCGv_i32 c1 = fcc; 947 int c2 = 0; 948 TCGCond tcond; 949 950 /* 951 * FCC values: 952 * 0 = 953 * 1 < 954 * 2 > 955 * 3 unordered 956 */ 957 switch (cond & 7) { 958 case 0x0: /* fbn */ 959 tcond = TCG_COND_NEVER; 960 break; 961 case 0x1: /* fbne : !0 */ 962 tcond = TCG_COND_NE; 963 break; 964 case 0x2: /* fblg : 1 or 2 */ 965 /* fcc in {1,2} - 1 -> fcc in {0,1} */ 966 c1 = tcg_temp_new_i32(); 967 tcg_gen_addi_i32(c1, fcc, -1); 968 c2 = 1; 969 tcond = TCG_COND_LEU; 970 break; 971 case 0x3: /* fbul : 1 or 3 */ 972 c1 = tcg_temp_new_i32(); 973 tcg_gen_andi_i32(c1, fcc, 1); 974 tcond = TCG_COND_NE; 975 break; 976 case 0x4: /* fbl : 1 */ 977 c2 = 1; 978 tcond = TCG_COND_EQ; 979 break; 980 case 0x5: /* fbug : 2 or 3 */ 981 c2 = 2; 982 tcond = TCG_COND_GEU; 983 break; 984 case 0x6: /* fbg : 2 */ 985 c2 = 2; 986 tcond = TCG_COND_EQ; 987 break; 988 case 0x7: /* fbu : 3 */ 989 c2 = 3; 990 tcond = TCG_COND_EQ; 991 break; 992 } 993 if (cond & 8) { 994 tcond = tcg_invert_cond(tcond); 995 } 996 997 cmp->cond = tcond; 998 cmp->c2 = c2; 999 cmp->c1 = tcg_temp_new(); 1000 tcg_gen_extu_i32_tl(cmp->c1, c1); 1001 } 1002 1003 static bool gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src) 1004 { 1005 static const TCGCond cond_reg[4] = { 1006 TCG_COND_NEVER, /* reserved */ 1007 TCG_COND_EQ, 1008 TCG_COND_LE, 1009 TCG_COND_LT, 1010 }; 1011 TCGCond tcond; 1012 1013 if ((cond & 3) == 0) { 1014 return false; 1015 } 1016 tcond = cond_reg[cond & 3]; 1017 if (cond & 4) { 1018 tcond = tcg_invert_cond(tcond); 1019 } 1020 1021 cmp->cond = tcond; 1022 cmp->c1 = tcg_temp_new(); 1023 cmp->c2 = 0; 1024 tcg_gen_mov_tl(cmp->c1, r_src); 1025 return true; 1026 } 1027 1028 static void gen_op_clear_ieee_excp_and_FTT(void) 1029 { 1030 tcg_gen_st_i32(tcg_constant_i32(0), tcg_env, 1031 offsetof(CPUSPARCState, fsr_cexc_ftt)); 1032 } 1033 1034 static void gen_op_fmovs(TCGv_i32 dst, TCGv_i32 src) 1035 { 1036 gen_op_clear_ieee_excp_and_FTT(); 1037 tcg_gen_mov_i32(dst, src); 1038 } 1039 1040 static void gen_op_fnegs(TCGv_i32 dst, TCGv_i32 src) 1041 { 1042 gen_op_clear_ieee_excp_and_FTT(); 1043 tcg_gen_xori_i32(dst, src, 1u << 31); 1044 } 1045 1046 static void gen_op_fabss(TCGv_i32 dst, TCGv_i32 src) 1047 { 1048 gen_op_clear_ieee_excp_and_FTT(); 1049 tcg_gen_andi_i32(dst, src, ~(1u << 31)); 1050 } 1051 1052 static void gen_op_fmovd(TCGv_i64 dst, TCGv_i64 src) 1053 { 1054 gen_op_clear_ieee_excp_and_FTT(); 1055 tcg_gen_mov_i64(dst, src); 1056 } 1057 1058 static void gen_op_fnegd(TCGv_i64 dst, TCGv_i64 src) 1059 { 1060 gen_op_clear_ieee_excp_and_FTT(); 1061 tcg_gen_xori_i64(dst, src, 1ull << 63); 1062 } 1063 1064 static void gen_op_fabsd(TCGv_i64 dst, TCGv_i64 src) 1065 { 1066 gen_op_clear_ieee_excp_and_FTT(); 1067 tcg_gen_andi_i64(dst, src, ~(1ull << 63)); 1068 } 1069 1070 static void gen_op_fnegq(TCGv_i128 dst, TCGv_i128 src) 1071 { 1072 TCGv_i64 l = tcg_temp_new_i64(); 1073 TCGv_i64 h = tcg_temp_new_i64(); 1074 1075 tcg_gen_extr_i128_i64(l, h, src); 1076 tcg_gen_xori_i64(h, h, 1ull << 63); 1077 tcg_gen_concat_i64_i128(dst, l, h); 1078 } 1079 1080 static void gen_op_fabsq(TCGv_i128 dst, TCGv_i128 src) 1081 { 1082 TCGv_i64 l = tcg_temp_new_i64(); 1083 TCGv_i64 h = tcg_temp_new_i64(); 1084 1085 tcg_gen_extr_i128_i64(l, h, src); 1086 tcg_gen_andi_i64(h, h, ~(1ull << 63)); 1087 tcg_gen_concat_i64_i128(dst, l, h); 1088 } 1089 1090 static void gen_op_fpexception_im(DisasContext *dc, int ftt) 1091 { 1092 /* 1093 * CEXC is only set when succesfully completing an FPop, 1094 * or when raising FSR_FTT_IEEE_EXCP, i.e. check_ieee_exception. 1095 * Thus we can simply store FTT into this field. 1096 */ 1097 tcg_gen_st_i32(tcg_constant_i32(ftt), tcg_env, 1098 offsetof(CPUSPARCState, fsr_cexc_ftt)); 1099 gen_exception(dc, TT_FP_EXCP); 1100 } 1101 1102 static int gen_trap_ifnofpu(DisasContext *dc) 1103 { 1104 #if !defined(CONFIG_USER_ONLY) 1105 if (!dc->fpu_enabled) { 1106 gen_exception(dc, TT_NFPU_INSN); 1107 return 1; 1108 } 1109 #endif 1110 return 0; 1111 } 1112 1113 /* asi moves */ 1114 typedef enum { 1115 GET_ASI_HELPER, 1116 GET_ASI_EXCP, 1117 GET_ASI_DIRECT, 1118 GET_ASI_DTWINX, 1119 GET_ASI_BLOCK, 1120 GET_ASI_SHORT, 1121 GET_ASI_BCOPY, 1122 GET_ASI_BFILL, 1123 } ASIType; 1124 1125 typedef struct { 1126 ASIType type; 1127 int asi; 1128 int mem_idx; 1129 MemOp memop; 1130 } DisasASI; 1131 1132 /* 1133 * Build DisasASI. 1134 * For asi == -1, treat as non-asi. 1135 * For ask == -2, treat as immediate offset (v8 error, v9 %asi). 1136 */ 1137 static DisasASI resolve_asi(DisasContext *dc, int asi, MemOp memop) 1138 { 1139 ASIType type = GET_ASI_HELPER; 1140 int mem_idx = dc->mem_idx; 1141 1142 if (asi == -1) { 1143 /* Artificial "non-asi" case. */ 1144 type = GET_ASI_DIRECT; 1145 goto done; 1146 } 1147 1148 #ifndef TARGET_SPARC64 1149 /* Before v9, all asis are immediate and privileged. */ 1150 if (asi < 0) { 1151 gen_exception(dc, TT_ILL_INSN); 1152 type = GET_ASI_EXCP; 1153 } else if (supervisor(dc) 1154 /* Note that LEON accepts ASI_USERDATA in user mode, for 1155 use with CASA. Also note that previous versions of 1156 QEMU allowed (and old versions of gcc emitted) ASI_P 1157 for LEON, which is incorrect. */ 1158 || (asi == ASI_USERDATA 1159 && (dc->def->features & CPU_FEATURE_CASA))) { 1160 switch (asi) { 1161 case ASI_USERDATA: /* User data access */ 1162 mem_idx = MMU_USER_IDX; 1163 type = GET_ASI_DIRECT; 1164 break; 1165 case ASI_KERNELDATA: /* Supervisor data access */ 1166 mem_idx = MMU_KERNEL_IDX; 1167 type = GET_ASI_DIRECT; 1168 break; 1169 case ASI_M_BYPASS: /* MMU passthrough */ 1170 case ASI_LEON_BYPASS: /* LEON MMU passthrough */ 1171 mem_idx = MMU_PHYS_IDX; 1172 type = GET_ASI_DIRECT; 1173 break; 1174 case ASI_M_BCOPY: /* Block copy, sta access */ 1175 mem_idx = MMU_KERNEL_IDX; 1176 type = GET_ASI_BCOPY; 1177 break; 1178 case ASI_M_BFILL: /* Block fill, stda access */ 1179 mem_idx = MMU_KERNEL_IDX; 1180 type = GET_ASI_BFILL; 1181 break; 1182 } 1183 1184 /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the 1185 * permissions check in get_physical_address(..). 1186 */ 1187 mem_idx = (dc->mem_idx == MMU_PHYS_IDX) ? MMU_PHYS_IDX : mem_idx; 1188 } else { 1189 gen_exception(dc, TT_PRIV_INSN); 1190 type = GET_ASI_EXCP; 1191 } 1192 #else 1193 if (asi < 0) { 1194 asi = dc->asi; 1195 } 1196 /* With v9, all asis below 0x80 are privileged. */ 1197 /* ??? We ought to check cpu_has_hypervisor, but we didn't copy 1198 down that bit into DisasContext. For the moment that's ok, 1199 since the direct implementations below doesn't have any ASIs 1200 in the restricted [0x30, 0x7f] range, and the check will be 1201 done properly in the helper. */ 1202 if (!supervisor(dc) && asi < 0x80) { 1203 gen_exception(dc, TT_PRIV_ACT); 1204 type = GET_ASI_EXCP; 1205 } else { 1206 switch (asi) { 1207 case ASI_REAL: /* Bypass */ 1208 case ASI_REAL_IO: /* Bypass, non-cacheable */ 1209 case ASI_REAL_L: /* Bypass LE */ 1210 case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */ 1211 case ASI_TWINX_REAL: /* Real address, twinx */ 1212 case ASI_TWINX_REAL_L: /* Real address, twinx, LE */ 1213 case ASI_QUAD_LDD_PHYS: 1214 case ASI_QUAD_LDD_PHYS_L: 1215 mem_idx = MMU_PHYS_IDX; 1216 break; 1217 case ASI_N: /* Nucleus */ 1218 case ASI_NL: /* Nucleus LE */ 1219 case ASI_TWINX_N: 1220 case ASI_TWINX_NL: 1221 case ASI_NUCLEUS_QUAD_LDD: 1222 case ASI_NUCLEUS_QUAD_LDD_L: 1223 if (hypervisor(dc)) { 1224 mem_idx = MMU_PHYS_IDX; 1225 } else { 1226 mem_idx = MMU_NUCLEUS_IDX; 1227 } 1228 break; 1229 case ASI_AIUP: /* As if user primary */ 1230 case ASI_AIUPL: /* As if user primary LE */ 1231 case ASI_TWINX_AIUP: 1232 case ASI_TWINX_AIUP_L: 1233 case ASI_BLK_AIUP_4V: 1234 case ASI_BLK_AIUP_L_4V: 1235 case ASI_BLK_AIUP: 1236 case ASI_BLK_AIUPL: 1237 mem_idx = MMU_USER_IDX; 1238 break; 1239 case ASI_AIUS: /* As if user secondary */ 1240 case ASI_AIUSL: /* As if user secondary LE */ 1241 case ASI_TWINX_AIUS: 1242 case ASI_TWINX_AIUS_L: 1243 case ASI_BLK_AIUS_4V: 1244 case ASI_BLK_AIUS_L_4V: 1245 case ASI_BLK_AIUS: 1246 case ASI_BLK_AIUSL: 1247 mem_idx = MMU_USER_SECONDARY_IDX; 1248 break; 1249 case ASI_S: /* Secondary */ 1250 case ASI_SL: /* Secondary LE */ 1251 case ASI_TWINX_S: 1252 case ASI_TWINX_SL: 1253 case ASI_BLK_COMMIT_S: 1254 case ASI_BLK_S: 1255 case ASI_BLK_SL: 1256 case ASI_FL8_S: 1257 case ASI_FL8_SL: 1258 case ASI_FL16_S: 1259 case ASI_FL16_SL: 1260 if (mem_idx == MMU_USER_IDX) { 1261 mem_idx = MMU_USER_SECONDARY_IDX; 1262 } else if (mem_idx == MMU_KERNEL_IDX) { 1263 mem_idx = MMU_KERNEL_SECONDARY_IDX; 1264 } 1265 break; 1266 case ASI_P: /* Primary */ 1267 case ASI_PL: /* Primary LE */ 1268 case ASI_TWINX_P: 1269 case ASI_TWINX_PL: 1270 case ASI_BLK_COMMIT_P: 1271 case ASI_BLK_P: 1272 case ASI_BLK_PL: 1273 case ASI_FL8_P: 1274 case ASI_FL8_PL: 1275 case ASI_FL16_P: 1276 case ASI_FL16_PL: 1277 break; 1278 } 1279 switch (asi) { 1280 case ASI_REAL: 1281 case ASI_REAL_IO: 1282 case ASI_REAL_L: 1283 case ASI_REAL_IO_L: 1284 case ASI_N: 1285 case ASI_NL: 1286 case ASI_AIUP: 1287 case ASI_AIUPL: 1288 case ASI_AIUS: 1289 case ASI_AIUSL: 1290 case ASI_S: 1291 case ASI_SL: 1292 case ASI_P: 1293 case ASI_PL: 1294 type = GET_ASI_DIRECT; 1295 break; 1296 case ASI_TWINX_REAL: 1297 case ASI_TWINX_REAL_L: 1298 case ASI_TWINX_N: 1299 case ASI_TWINX_NL: 1300 case ASI_TWINX_AIUP: 1301 case ASI_TWINX_AIUP_L: 1302 case ASI_TWINX_AIUS: 1303 case ASI_TWINX_AIUS_L: 1304 case ASI_TWINX_P: 1305 case ASI_TWINX_PL: 1306 case ASI_TWINX_S: 1307 case ASI_TWINX_SL: 1308 case ASI_QUAD_LDD_PHYS: 1309 case ASI_QUAD_LDD_PHYS_L: 1310 case ASI_NUCLEUS_QUAD_LDD: 1311 case ASI_NUCLEUS_QUAD_LDD_L: 1312 type = GET_ASI_DTWINX; 1313 break; 1314 case ASI_BLK_COMMIT_P: 1315 case ASI_BLK_COMMIT_S: 1316 case ASI_BLK_AIUP_4V: 1317 case ASI_BLK_AIUP_L_4V: 1318 case ASI_BLK_AIUP: 1319 case ASI_BLK_AIUPL: 1320 case ASI_BLK_AIUS_4V: 1321 case ASI_BLK_AIUS_L_4V: 1322 case ASI_BLK_AIUS: 1323 case ASI_BLK_AIUSL: 1324 case ASI_BLK_S: 1325 case ASI_BLK_SL: 1326 case ASI_BLK_P: 1327 case ASI_BLK_PL: 1328 type = GET_ASI_BLOCK; 1329 break; 1330 case ASI_FL8_S: 1331 case ASI_FL8_SL: 1332 case ASI_FL8_P: 1333 case ASI_FL8_PL: 1334 memop = MO_UB; 1335 type = GET_ASI_SHORT; 1336 break; 1337 case ASI_FL16_S: 1338 case ASI_FL16_SL: 1339 case ASI_FL16_P: 1340 case ASI_FL16_PL: 1341 memop = MO_TEUW; 1342 type = GET_ASI_SHORT; 1343 break; 1344 } 1345 /* The little-endian asis all have bit 3 set. */ 1346 if (asi & 8) { 1347 memop ^= MO_BSWAP; 1348 } 1349 } 1350 #endif 1351 1352 done: 1353 return (DisasASI){ type, asi, mem_idx, memop }; 1354 } 1355 1356 #if defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64) 1357 static void gen_helper_ld_asi(TCGv_i64 r, TCGv_env e, TCGv a, 1358 TCGv_i32 asi, TCGv_i32 mop) 1359 { 1360 g_assert_not_reached(); 1361 } 1362 1363 static void gen_helper_st_asi(TCGv_env e, TCGv a, TCGv_i64 r, 1364 TCGv_i32 asi, TCGv_i32 mop) 1365 { 1366 g_assert_not_reached(); 1367 } 1368 #endif 1369 1370 static void gen_ld_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr) 1371 { 1372 switch (da->type) { 1373 case GET_ASI_EXCP: 1374 break; 1375 case GET_ASI_DTWINX: /* Reserved for ldda. */ 1376 gen_exception(dc, TT_ILL_INSN); 1377 break; 1378 case GET_ASI_DIRECT: 1379 tcg_gen_qemu_ld_tl(dst, addr, da->mem_idx, da->memop | MO_ALIGN); 1380 break; 1381 default: 1382 { 1383 TCGv_i32 r_asi = tcg_constant_i32(da->asi); 1384 TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN); 1385 1386 save_state(dc); 1387 #ifdef TARGET_SPARC64 1388 gen_helper_ld_asi(dst, tcg_env, addr, r_asi, r_mop); 1389 #else 1390 { 1391 TCGv_i64 t64 = tcg_temp_new_i64(); 1392 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop); 1393 tcg_gen_trunc_i64_tl(dst, t64); 1394 } 1395 #endif 1396 } 1397 break; 1398 } 1399 } 1400 1401 static void gen_st_asi(DisasContext *dc, DisasASI *da, TCGv src, TCGv addr) 1402 { 1403 switch (da->type) { 1404 case GET_ASI_EXCP: 1405 break; 1406 1407 case GET_ASI_DTWINX: /* Reserved for stda. */ 1408 if (TARGET_LONG_BITS == 32) { 1409 gen_exception(dc, TT_ILL_INSN); 1410 break; 1411 } else if (!(dc->def->features & CPU_FEATURE_HYPV)) { 1412 /* Pre OpenSPARC CPUs don't have these */ 1413 gen_exception(dc, TT_ILL_INSN); 1414 break; 1415 } 1416 /* In OpenSPARC T1+ CPUs TWINX ASIs in store are ST_BLKINIT_ ASIs */ 1417 /* fall through */ 1418 1419 case GET_ASI_DIRECT: 1420 tcg_gen_qemu_st_tl(src, addr, da->mem_idx, da->memop | MO_ALIGN); 1421 break; 1422 1423 case GET_ASI_BCOPY: 1424 assert(TARGET_LONG_BITS == 32); 1425 /* 1426 * Copy 32 bytes from the address in SRC to ADDR. 1427 * 1428 * From Ross RT625 hyperSPARC manual, section 4.6: 1429 * "Block Copy and Block Fill will work only on cache line boundaries." 1430 * 1431 * It does not specify if an unaliged address is truncated or trapped. 1432 * Previous qemu behaviour was to truncate to 4 byte alignment, which 1433 * is obviously wrong. The only place I can see this used is in the 1434 * Linux kernel which begins with page alignment, advancing by 32, 1435 * so is always aligned. Assume truncation as the simpler option. 1436 * 1437 * Since the loads and stores are paired, allow the copy to happen 1438 * in the host endianness. The copy need not be atomic. 1439 */ 1440 { 1441 MemOp mop = MO_128 | MO_ATOM_IFALIGN_PAIR; 1442 TCGv saddr = tcg_temp_new(); 1443 TCGv daddr = tcg_temp_new(); 1444 TCGv_i128 tmp = tcg_temp_new_i128(); 1445 1446 tcg_gen_andi_tl(saddr, src, -32); 1447 tcg_gen_andi_tl(daddr, addr, -32); 1448 tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop); 1449 tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop); 1450 tcg_gen_addi_tl(saddr, saddr, 16); 1451 tcg_gen_addi_tl(daddr, daddr, 16); 1452 tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop); 1453 tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop); 1454 } 1455 break; 1456 1457 default: 1458 { 1459 TCGv_i32 r_asi = tcg_constant_i32(da->asi); 1460 TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN); 1461 1462 save_state(dc); 1463 #ifdef TARGET_SPARC64 1464 gen_helper_st_asi(tcg_env, addr, src, r_asi, r_mop); 1465 #else 1466 { 1467 TCGv_i64 t64 = tcg_temp_new_i64(); 1468 tcg_gen_extu_tl_i64(t64, src); 1469 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop); 1470 } 1471 #endif 1472 1473 /* A write to a TLB register may alter page maps. End the TB. */ 1474 dc->npc = DYNAMIC_PC; 1475 } 1476 break; 1477 } 1478 } 1479 1480 static void gen_swap_asi(DisasContext *dc, DisasASI *da, 1481 TCGv dst, TCGv src, TCGv addr) 1482 { 1483 switch (da->type) { 1484 case GET_ASI_EXCP: 1485 break; 1486 case GET_ASI_DIRECT: 1487 tcg_gen_atomic_xchg_tl(dst, addr, src, 1488 da->mem_idx, da->memop | MO_ALIGN); 1489 break; 1490 default: 1491 /* ??? Should be DAE_invalid_asi. */ 1492 gen_exception(dc, TT_DATA_ACCESS); 1493 break; 1494 } 1495 } 1496 1497 static void gen_cas_asi(DisasContext *dc, DisasASI *da, 1498 TCGv oldv, TCGv newv, TCGv cmpv, TCGv addr) 1499 { 1500 switch (da->type) { 1501 case GET_ASI_EXCP: 1502 return; 1503 case GET_ASI_DIRECT: 1504 tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, newv, 1505 da->mem_idx, da->memop | MO_ALIGN); 1506 break; 1507 default: 1508 /* ??? Should be DAE_invalid_asi. */ 1509 gen_exception(dc, TT_DATA_ACCESS); 1510 break; 1511 } 1512 } 1513 1514 static void gen_ldstub_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr) 1515 { 1516 switch (da->type) { 1517 case GET_ASI_EXCP: 1518 break; 1519 case GET_ASI_DIRECT: 1520 tcg_gen_atomic_xchg_tl(dst, addr, tcg_constant_tl(0xff), 1521 da->mem_idx, MO_UB); 1522 break; 1523 default: 1524 /* ??? In theory, this should be raise DAE_invalid_asi. 1525 But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1. */ 1526 if (tb_cflags(dc->base.tb) & CF_PARALLEL) { 1527 gen_helper_exit_atomic(tcg_env); 1528 } else { 1529 TCGv_i32 r_asi = tcg_constant_i32(da->asi); 1530 TCGv_i32 r_mop = tcg_constant_i32(MO_UB); 1531 TCGv_i64 s64, t64; 1532 1533 save_state(dc); 1534 t64 = tcg_temp_new_i64(); 1535 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop); 1536 1537 s64 = tcg_constant_i64(0xff); 1538 gen_helper_st_asi(tcg_env, addr, s64, r_asi, r_mop); 1539 1540 tcg_gen_trunc_i64_tl(dst, t64); 1541 1542 /* End the TB. */ 1543 dc->npc = DYNAMIC_PC; 1544 } 1545 break; 1546 } 1547 } 1548 1549 static void gen_ldf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size, 1550 TCGv addr, int rd) 1551 { 1552 MemOp memop = da->memop; 1553 MemOp size = memop & MO_SIZE; 1554 TCGv_i32 d32; 1555 TCGv_i64 d64; 1556 TCGv addr_tmp; 1557 1558 /* TODO: Use 128-bit load/store below. */ 1559 if (size == MO_128) { 1560 memop = (memop & ~MO_SIZE) | MO_64; 1561 } 1562 1563 switch (da->type) { 1564 case GET_ASI_EXCP: 1565 break; 1566 1567 case GET_ASI_DIRECT: 1568 memop |= MO_ALIGN_4; 1569 switch (size) { 1570 case MO_32: 1571 d32 = tcg_temp_new_i32(); 1572 tcg_gen_qemu_ld_i32(d32, addr, da->mem_idx, memop); 1573 gen_store_fpr_F(dc, rd, d32); 1574 break; 1575 1576 case MO_64: 1577 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da->mem_idx, memop); 1578 break; 1579 1580 case MO_128: 1581 d64 = tcg_temp_new_i64(); 1582 tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop); 1583 addr_tmp = tcg_temp_new(); 1584 tcg_gen_addi_tl(addr_tmp, addr, 8); 1585 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + 1], addr_tmp, da->mem_idx, memop); 1586 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64); 1587 break; 1588 default: 1589 g_assert_not_reached(); 1590 } 1591 break; 1592 1593 case GET_ASI_BLOCK: 1594 /* Valid for lddfa on aligned registers only. */ 1595 if (orig_size == MO_64 && (rd & 7) == 0) { 1596 /* The first operation checks required alignment. */ 1597 addr_tmp = tcg_temp_new(); 1598 for (int i = 0; ; ++i) { 1599 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr, da->mem_idx, 1600 memop | (i == 0 ? MO_ALIGN_64 : 0)); 1601 if (i == 7) { 1602 break; 1603 } 1604 tcg_gen_addi_tl(addr_tmp, addr, 8); 1605 addr = addr_tmp; 1606 } 1607 } else { 1608 gen_exception(dc, TT_ILL_INSN); 1609 } 1610 break; 1611 1612 case GET_ASI_SHORT: 1613 /* Valid for lddfa only. */ 1614 if (orig_size == MO_64) { 1615 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da->mem_idx, 1616 memop | MO_ALIGN); 1617 } else { 1618 gen_exception(dc, TT_ILL_INSN); 1619 } 1620 break; 1621 1622 default: 1623 { 1624 TCGv_i32 r_asi = tcg_constant_i32(da->asi); 1625 TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN); 1626 1627 save_state(dc); 1628 /* According to the table in the UA2011 manual, the only 1629 other asis that are valid for ldfa/lddfa/ldqfa are 1630 the NO_FAULT asis. We still need a helper for these, 1631 but we can just use the integer asi helper for them. */ 1632 switch (size) { 1633 case MO_32: 1634 d64 = tcg_temp_new_i64(); 1635 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop); 1636 d32 = tcg_temp_new_i32(); 1637 tcg_gen_extrl_i64_i32(d32, d64); 1638 gen_store_fpr_F(dc, rd, d32); 1639 break; 1640 case MO_64: 1641 gen_helper_ld_asi(cpu_fpr[rd / 2], tcg_env, addr, 1642 r_asi, r_mop); 1643 break; 1644 case MO_128: 1645 d64 = tcg_temp_new_i64(); 1646 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop); 1647 addr_tmp = tcg_temp_new(); 1648 tcg_gen_addi_tl(addr_tmp, addr, 8); 1649 gen_helper_ld_asi(cpu_fpr[rd / 2 + 1], tcg_env, addr_tmp, 1650 r_asi, r_mop); 1651 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64); 1652 break; 1653 default: 1654 g_assert_not_reached(); 1655 } 1656 } 1657 break; 1658 } 1659 } 1660 1661 static void gen_stf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size, 1662 TCGv addr, int rd) 1663 { 1664 MemOp memop = da->memop; 1665 MemOp size = memop & MO_SIZE; 1666 TCGv_i32 d32; 1667 TCGv addr_tmp; 1668 1669 /* TODO: Use 128-bit load/store below. */ 1670 if (size == MO_128) { 1671 memop = (memop & ~MO_SIZE) | MO_64; 1672 } 1673 1674 switch (da->type) { 1675 case GET_ASI_EXCP: 1676 break; 1677 1678 case GET_ASI_DIRECT: 1679 memop |= MO_ALIGN_4; 1680 switch (size) { 1681 case MO_32: 1682 d32 = gen_load_fpr_F(dc, rd); 1683 tcg_gen_qemu_st_i32(d32, addr, da->mem_idx, memop | MO_ALIGN); 1684 break; 1685 case MO_64: 1686 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx, 1687 memop | MO_ALIGN_4); 1688 break; 1689 case MO_128: 1690 /* Only 4-byte alignment required. However, it is legal for the 1691 cpu to signal the alignment fault, and the OS trap handler is 1692 required to fix it up. Requiring 16-byte alignment here avoids 1693 having to probe the second page before performing the first 1694 write. */ 1695 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx, 1696 memop | MO_ALIGN_16); 1697 addr_tmp = tcg_temp_new(); 1698 tcg_gen_addi_tl(addr_tmp, addr, 8); 1699 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + 1], addr_tmp, da->mem_idx, memop); 1700 break; 1701 default: 1702 g_assert_not_reached(); 1703 } 1704 break; 1705 1706 case GET_ASI_BLOCK: 1707 /* Valid for stdfa on aligned registers only. */ 1708 if (orig_size == MO_64 && (rd & 7) == 0) { 1709 /* The first operation checks required alignment. */ 1710 addr_tmp = tcg_temp_new(); 1711 for (int i = 0; ; ++i) { 1712 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr, da->mem_idx, 1713 memop | (i == 0 ? MO_ALIGN_64 : 0)); 1714 if (i == 7) { 1715 break; 1716 } 1717 tcg_gen_addi_tl(addr_tmp, addr, 8); 1718 addr = addr_tmp; 1719 } 1720 } else { 1721 gen_exception(dc, TT_ILL_INSN); 1722 } 1723 break; 1724 1725 case GET_ASI_SHORT: 1726 /* Valid for stdfa only. */ 1727 if (orig_size == MO_64) { 1728 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx, 1729 memop | MO_ALIGN); 1730 } else { 1731 gen_exception(dc, TT_ILL_INSN); 1732 } 1733 break; 1734 1735 default: 1736 /* According to the table in the UA2011 manual, the only 1737 other asis that are valid for ldfa/lddfa/ldqfa are 1738 the PST* asis, which aren't currently handled. */ 1739 gen_exception(dc, TT_ILL_INSN); 1740 break; 1741 } 1742 } 1743 1744 static void gen_ldda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd) 1745 { 1746 TCGv hi = gen_dest_gpr(dc, rd); 1747 TCGv lo = gen_dest_gpr(dc, rd + 1); 1748 1749 switch (da->type) { 1750 case GET_ASI_EXCP: 1751 return; 1752 1753 case GET_ASI_DTWINX: 1754 #ifdef TARGET_SPARC64 1755 { 1756 MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16; 1757 TCGv_i128 t = tcg_temp_new_i128(); 1758 1759 tcg_gen_qemu_ld_i128(t, addr, da->mem_idx, mop); 1760 /* 1761 * Note that LE twinx acts as if each 64-bit register result is 1762 * byte swapped. We perform one 128-bit LE load, so must swap 1763 * the order of the writebacks. 1764 */ 1765 if ((mop & MO_BSWAP) == MO_TE) { 1766 tcg_gen_extr_i128_i64(lo, hi, t); 1767 } else { 1768 tcg_gen_extr_i128_i64(hi, lo, t); 1769 } 1770 } 1771 break; 1772 #else 1773 g_assert_not_reached(); 1774 #endif 1775 1776 case GET_ASI_DIRECT: 1777 { 1778 TCGv_i64 tmp = tcg_temp_new_i64(); 1779 1780 tcg_gen_qemu_ld_i64(tmp, addr, da->mem_idx, da->memop | MO_ALIGN); 1781 1782 /* Note that LE ldda acts as if each 32-bit register 1783 result is byte swapped. Having just performed one 1784 64-bit bswap, we need now to swap the writebacks. */ 1785 if ((da->memop & MO_BSWAP) == MO_TE) { 1786 tcg_gen_extr_i64_tl(lo, hi, tmp); 1787 } else { 1788 tcg_gen_extr_i64_tl(hi, lo, tmp); 1789 } 1790 } 1791 break; 1792 1793 default: 1794 /* ??? In theory we've handled all of the ASIs that are valid 1795 for ldda, and this should raise DAE_invalid_asi. However, 1796 real hardware allows others. This can be seen with e.g. 1797 FreeBSD 10.3 wrt ASI_IC_TAG. */ 1798 { 1799 TCGv_i32 r_asi = tcg_constant_i32(da->asi); 1800 TCGv_i32 r_mop = tcg_constant_i32(da->memop); 1801 TCGv_i64 tmp = tcg_temp_new_i64(); 1802 1803 save_state(dc); 1804 gen_helper_ld_asi(tmp, tcg_env, addr, r_asi, r_mop); 1805 1806 /* See above. */ 1807 if ((da->memop & MO_BSWAP) == MO_TE) { 1808 tcg_gen_extr_i64_tl(lo, hi, tmp); 1809 } else { 1810 tcg_gen_extr_i64_tl(hi, lo, tmp); 1811 } 1812 } 1813 break; 1814 } 1815 1816 gen_store_gpr(dc, rd, hi); 1817 gen_store_gpr(dc, rd + 1, lo); 1818 } 1819 1820 static void gen_stda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd) 1821 { 1822 TCGv hi = gen_load_gpr(dc, rd); 1823 TCGv lo = gen_load_gpr(dc, rd + 1); 1824 1825 switch (da->type) { 1826 case GET_ASI_EXCP: 1827 break; 1828 1829 case GET_ASI_DTWINX: 1830 #ifdef TARGET_SPARC64 1831 { 1832 MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16; 1833 TCGv_i128 t = tcg_temp_new_i128(); 1834 1835 /* 1836 * Note that LE twinx acts as if each 64-bit register result is 1837 * byte swapped. We perform one 128-bit LE store, so must swap 1838 * the order of the construction. 1839 */ 1840 if ((mop & MO_BSWAP) == MO_TE) { 1841 tcg_gen_concat_i64_i128(t, lo, hi); 1842 } else { 1843 tcg_gen_concat_i64_i128(t, hi, lo); 1844 } 1845 tcg_gen_qemu_st_i128(t, addr, da->mem_idx, mop); 1846 } 1847 break; 1848 #else 1849 g_assert_not_reached(); 1850 #endif 1851 1852 case GET_ASI_DIRECT: 1853 { 1854 TCGv_i64 t64 = tcg_temp_new_i64(); 1855 1856 /* Note that LE stda acts as if each 32-bit register result is 1857 byte swapped. We will perform one 64-bit LE store, so now 1858 we must swap the order of the construction. */ 1859 if ((da->memop & MO_BSWAP) == MO_TE) { 1860 tcg_gen_concat_tl_i64(t64, lo, hi); 1861 } else { 1862 tcg_gen_concat_tl_i64(t64, hi, lo); 1863 } 1864 tcg_gen_qemu_st_i64(t64, addr, da->mem_idx, da->memop | MO_ALIGN); 1865 } 1866 break; 1867 1868 case GET_ASI_BFILL: 1869 assert(TARGET_LONG_BITS == 32); 1870 /* 1871 * Store 32 bytes of [rd:rd+1] to ADDR. 1872 * See comments for GET_ASI_COPY above. 1873 */ 1874 { 1875 MemOp mop = MO_TE | MO_128 | MO_ATOM_IFALIGN_PAIR; 1876 TCGv_i64 t8 = tcg_temp_new_i64(); 1877 TCGv_i128 t16 = tcg_temp_new_i128(); 1878 TCGv daddr = tcg_temp_new(); 1879 1880 tcg_gen_concat_tl_i64(t8, lo, hi); 1881 tcg_gen_concat_i64_i128(t16, t8, t8); 1882 tcg_gen_andi_tl(daddr, addr, -32); 1883 tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop); 1884 tcg_gen_addi_tl(daddr, daddr, 16); 1885 tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop); 1886 } 1887 break; 1888 1889 default: 1890 /* ??? In theory we've handled all of the ASIs that are valid 1891 for stda, and this should raise DAE_invalid_asi. */ 1892 { 1893 TCGv_i32 r_asi = tcg_constant_i32(da->asi); 1894 TCGv_i32 r_mop = tcg_constant_i32(da->memop); 1895 TCGv_i64 t64 = tcg_temp_new_i64(); 1896 1897 /* See above. */ 1898 if ((da->memop & MO_BSWAP) == MO_TE) { 1899 tcg_gen_concat_tl_i64(t64, lo, hi); 1900 } else { 1901 tcg_gen_concat_tl_i64(t64, hi, lo); 1902 } 1903 1904 save_state(dc); 1905 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop); 1906 } 1907 break; 1908 } 1909 } 1910 1911 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs) 1912 { 1913 #ifdef TARGET_SPARC64 1914 TCGv_i32 c32, zero, dst, s1, s2; 1915 TCGv_i64 c64 = tcg_temp_new_i64(); 1916 1917 /* We have two choices here: extend the 32 bit data and use movcond_i64, 1918 or fold the comparison down to 32 bits and use movcond_i32. Choose 1919 the later. */ 1920 c32 = tcg_temp_new_i32(); 1921 tcg_gen_setcondi_i64(cmp->cond, c64, cmp->c1, cmp->c2); 1922 tcg_gen_extrl_i64_i32(c32, c64); 1923 1924 s1 = gen_load_fpr_F(dc, rs); 1925 s2 = gen_load_fpr_F(dc, rd); 1926 dst = tcg_temp_new_i32(); 1927 zero = tcg_constant_i32(0); 1928 1929 tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2); 1930 1931 gen_store_fpr_F(dc, rd, dst); 1932 #else 1933 qemu_build_not_reached(); 1934 #endif 1935 } 1936 1937 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs) 1938 { 1939 #ifdef TARGET_SPARC64 1940 TCGv_i64 dst = gen_dest_fpr_D(dc, rd); 1941 tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, tcg_constant_tl(cmp->c2), 1942 gen_load_fpr_D(dc, rs), 1943 gen_load_fpr_D(dc, rd)); 1944 gen_store_fpr_D(dc, rd, dst); 1945 #else 1946 qemu_build_not_reached(); 1947 #endif 1948 } 1949 1950 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs) 1951 { 1952 #ifdef TARGET_SPARC64 1953 int qd = QFPREG(rd); 1954 int qs = QFPREG(rs); 1955 TCGv c2 = tcg_constant_tl(cmp->c2); 1956 1957 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, c2, 1958 cpu_fpr[qs / 2], cpu_fpr[qd / 2]); 1959 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, c2, 1960 cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]); 1961 1962 gen_update_fprs_dirty(dc, qd); 1963 #else 1964 qemu_build_not_reached(); 1965 #endif 1966 } 1967 1968 #ifdef TARGET_SPARC64 1969 static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr) 1970 { 1971 TCGv_i32 r_tl = tcg_temp_new_i32(); 1972 1973 /* load env->tl into r_tl */ 1974 tcg_gen_ld_i32(r_tl, tcg_env, offsetof(CPUSPARCState, tl)); 1975 1976 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */ 1977 tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK); 1978 1979 /* calculate offset to current trap state from env->ts, reuse r_tl */ 1980 tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state)); 1981 tcg_gen_addi_ptr(r_tsptr, tcg_env, offsetof(CPUSPARCState, ts)); 1982 1983 /* tsptr = env->ts[env->tl & MAXTL_MASK] */ 1984 { 1985 TCGv_ptr r_tl_tmp = tcg_temp_new_ptr(); 1986 tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl); 1987 tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp); 1988 } 1989 } 1990 #endif 1991 1992 static int extract_dfpreg(DisasContext *dc, int x) 1993 { 1994 return DFPREG(x); 1995 } 1996 1997 static int extract_qfpreg(DisasContext *dc, int x) 1998 { 1999 return QFPREG(x); 2000 } 2001 2002 /* Include the auto-generated decoder. */ 2003 #include "decode-insns.c.inc" 2004 2005 #define TRANS(NAME, AVAIL, FUNC, ...) \ 2006 static bool trans_##NAME(DisasContext *dc, arg_##NAME *a) \ 2007 { return avail_##AVAIL(dc) && FUNC(dc, __VA_ARGS__); } 2008 2009 #define avail_ALL(C) true 2010 #ifdef TARGET_SPARC64 2011 # define avail_32(C) false 2012 # define avail_ASR17(C) false 2013 # define avail_CASA(C) true 2014 # define avail_DIV(C) true 2015 # define avail_MUL(C) true 2016 # define avail_POWERDOWN(C) false 2017 # define avail_64(C) true 2018 # define avail_GL(C) ((C)->def->features & CPU_FEATURE_GL) 2019 # define avail_HYPV(C) ((C)->def->features & CPU_FEATURE_HYPV) 2020 # define avail_VIS1(C) ((C)->def->features & CPU_FEATURE_VIS1) 2021 # define avail_VIS2(C) ((C)->def->features & CPU_FEATURE_VIS2) 2022 #else 2023 # define avail_32(C) true 2024 # define avail_ASR17(C) ((C)->def->features & CPU_FEATURE_ASR17) 2025 # define avail_CASA(C) ((C)->def->features & CPU_FEATURE_CASA) 2026 # define avail_DIV(C) ((C)->def->features & CPU_FEATURE_DIV) 2027 # define avail_MUL(C) ((C)->def->features & CPU_FEATURE_MUL) 2028 # define avail_POWERDOWN(C) ((C)->def->features & CPU_FEATURE_POWERDOWN) 2029 # define avail_64(C) false 2030 # define avail_GL(C) false 2031 # define avail_HYPV(C) false 2032 # define avail_VIS1(C) false 2033 # define avail_VIS2(C) false 2034 #endif 2035 2036 /* Default case for non jump instructions. */ 2037 static bool advance_pc(DisasContext *dc) 2038 { 2039 TCGLabel *l1; 2040 2041 finishing_insn(dc); 2042 2043 if (dc->npc & 3) { 2044 switch (dc->npc) { 2045 case DYNAMIC_PC: 2046 case DYNAMIC_PC_LOOKUP: 2047 dc->pc = dc->npc; 2048 tcg_gen_mov_tl(cpu_pc, cpu_npc); 2049 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4); 2050 break; 2051 2052 case JUMP_PC: 2053 /* we can do a static jump */ 2054 l1 = gen_new_label(); 2055 tcg_gen_brcondi_tl(dc->jump.cond, dc->jump.c1, dc->jump.c2, l1); 2056 2057 /* jump not taken */ 2058 gen_goto_tb(dc, 1, dc->jump_pc[1], dc->jump_pc[1] + 4); 2059 2060 /* jump taken */ 2061 gen_set_label(l1); 2062 gen_goto_tb(dc, 0, dc->jump_pc[0], dc->jump_pc[0] + 4); 2063 2064 dc->base.is_jmp = DISAS_NORETURN; 2065 break; 2066 2067 default: 2068 g_assert_not_reached(); 2069 } 2070 } else { 2071 dc->pc = dc->npc; 2072 dc->npc = dc->npc + 4; 2073 } 2074 return true; 2075 } 2076 2077 /* 2078 * Major opcodes 00 and 01 -- branches, call, and sethi 2079 */ 2080 2081 static bool advance_jump_cond(DisasContext *dc, DisasCompare *cmp, 2082 bool annul, int disp) 2083 { 2084 target_ulong dest = address_mask_i(dc, dc->pc + disp * 4); 2085 target_ulong npc; 2086 2087 finishing_insn(dc); 2088 2089 if (cmp->cond == TCG_COND_ALWAYS) { 2090 if (annul) { 2091 dc->pc = dest; 2092 dc->npc = dest + 4; 2093 } else { 2094 gen_mov_pc_npc(dc); 2095 dc->npc = dest; 2096 } 2097 return true; 2098 } 2099 2100 if (cmp->cond == TCG_COND_NEVER) { 2101 npc = dc->npc; 2102 if (npc & 3) { 2103 gen_mov_pc_npc(dc); 2104 if (annul) { 2105 tcg_gen_addi_tl(cpu_pc, cpu_pc, 4); 2106 } 2107 tcg_gen_addi_tl(cpu_npc, cpu_pc, 4); 2108 } else { 2109 dc->pc = npc + (annul ? 4 : 0); 2110 dc->npc = dc->pc + 4; 2111 } 2112 return true; 2113 } 2114 2115 flush_cond(dc); 2116 npc = dc->npc; 2117 2118 if (annul) { 2119 TCGLabel *l1 = gen_new_label(); 2120 2121 tcg_gen_brcondi_tl(tcg_invert_cond(cmp->cond), cmp->c1, cmp->c2, l1); 2122 gen_goto_tb(dc, 0, npc, dest); 2123 gen_set_label(l1); 2124 gen_goto_tb(dc, 1, npc + 4, npc + 8); 2125 2126 dc->base.is_jmp = DISAS_NORETURN; 2127 } else { 2128 if (npc & 3) { 2129 switch (npc) { 2130 case DYNAMIC_PC: 2131 case DYNAMIC_PC_LOOKUP: 2132 tcg_gen_mov_tl(cpu_pc, cpu_npc); 2133 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4); 2134 tcg_gen_movcond_tl(cmp->cond, cpu_npc, 2135 cmp->c1, tcg_constant_tl(cmp->c2), 2136 tcg_constant_tl(dest), cpu_npc); 2137 dc->pc = npc; 2138 break; 2139 default: 2140 g_assert_not_reached(); 2141 } 2142 } else { 2143 dc->pc = npc; 2144 dc->npc = JUMP_PC; 2145 dc->jump = *cmp; 2146 dc->jump_pc[0] = dest; 2147 dc->jump_pc[1] = npc + 4; 2148 2149 /* The condition for cpu_cond is always NE -- normalize. */ 2150 if (cmp->cond == TCG_COND_NE) { 2151 tcg_gen_xori_tl(cpu_cond, cmp->c1, cmp->c2); 2152 } else { 2153 tcg_gen_setcondi_tl(cmp->cond, cpu_cond, cmp->c1, cmp->c2); 2154 } 2155 dc->cpu_cond_live = true; 2156 } 2157 } 2158 return true; 2159 } 2160 2161 static bool raise_priv(DisasContext *dc) 2162 { 2163 gen_exception(dc, TT_PRIV_INSN); 2164 return true; 2165 } 2166 2167 static bool raise_unimpfpop(DisasContext *dc) 2168 { 2169 gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP); 2170 return true; 2171 } 2172 2173 static bool gen_trap_float128(DisasContext *dc) 2174 { 2175 if (dc->def->features & CPU_FEATURE_FLOAT128) { 2176 return false; 2177 } 2178 return raise_unimpfpop(dc); 2179 } 2180 2181 static bool do_bpcc(DisasContext *dc, arg_bcc *a) 2182 { 2183 DisasCompare cmp; 2184 2185 gen_compare(&cmp, a->cc, a->cond, dc); 2186 return advance_jump_cond(dc, &cmp, a->a, a->i); 2187 } 2188 2189 TRANS(Bicc, ALL, do_bpcc, a) 2190 TRANS(BPcc, 64, do_bpcc, a) 2191 2192 static bool do_fbpfcc(DisasContext *dc, arg_bcc *a) 2193 { 2194 DisasCompare cmp; 2195 2196 if (gen_trap_ifnofpu(dc)) { 2197 return true; 2198 } 2199 gen_fcompare(&cmp, a->cc, a->cond); 2200 return advance_jump_cond(dc, &cmp, a->a, a->i); 2201 } 2202 2203 TRANS(FBPfcc, 64, do_fbpfcc, a) 2204 TRANS(FBfcc, ALL, do_fbpfcc, a) 2205 2206 static bool trans_BPr(DisasContext *dc, arg_BPr *a) 2207 { 2208 DisasCompare cmp; 2209 2210 if (!avail_64(dc)) { 2211 return false; 2212 } 2213 if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) { 2214 return false; 2215 } 2216 return advance_jump_cond(dc, &cmp, a->a, a->i); 2217 } 2218 2219 static bool trans_CALL(DisasContext *dc, arg_CALL *a) 2220 { 2221 target_long target = address_mask_i(dc, dc->pc + a->i * 4); 2222 2223 gen_store_gpr(dc, 15, tcg_constant_tl(dc->pc)); 2224 gen_mov_pc_npc(dc); 2225 dc->npc = target; 2226 return true; 2227 } 2228 2229 static bool trans_NCP(DisasContext *dc, arg_NCP *a) 2230 { 2231 /* 2232 * For sparc32, always generate the no-coprocessor exception. 2233 * For sparc64, always generate illegal instruction. 2234 */ 2235 #ifdef TARGET_SPARC64 2236 return false; 2237 #else 2238 gen_exception(dc, TT_NCP_INSN); 2239 return true; 2240 #endif 2241 } 2242 2243 static bool trans_SETHI(DisasContext *dc, arg_SETHI *a) 2244 { 2245 /* Special-case %g0 because that's the canonical nop. */ 2246 if (a->rd) { 2247 gen_store_gpr(dc, a->rd, tcg_constant_tl((uint32_t)a->i << 10)); 2248 } 2249 return advance_pc(dc); 2250 } 2251 2252 /* 2253 * Major Opcode 10 -- integer, floating-point, vis, and system insns. 2254 */ 2255 2256 static bool do_tcc(DisasContext *dc, int cond, int cc, 2257 int rs1, bool imm, int rs2_or_imm) 2258 { 2259 int mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc) 2260 ? UA2005_HTRAP_MASK : V8_TRAP_MASK); 2261 DisasCompare cmp; 2262 TCGLabel *lab; 2263 TCGv_i32 trap; 2264 2265 /* Trap never. */ 2266 if (cond == 0) { 2267 return advance_pc(dc); 2268 } 2269 2270 /* 2271 * Immediate traps are the most common case. Since this value is 2272 * live across the branch, it really pays to evaluate the constant. 2273 */ 2274 if (rs1 == 0 && (imm || rs2_or_imm == 0)) { 2275 trap = tcg_constant_i32((rs2_or_imm & mask) + TT_TRAP); 2276 } else { 2277 trap = tcg_temp_new_i32(); 2278 tcg_gen_trunc_tl_i32(trap, gen_load_gpr(dc, rs1)); 2279 if (imm) { 2280 tcg_gen_addi_i32(trap, trap, rs2_or_imm); 2281 } else { 2282 TCGv_i32 t2 = tcg_temp_new_i32(); 2283 tcg_gen_trunc_tl_i32(t2, gen_load_gpr(dc, rs2_or_imm)); 2284 tcg_gen_add_i32(trap, trap, t2); 2285 } 2286 tcg_gen_andi_i32(trap, trap, mask); 2287 tcg_gen_addi_i32(trap, trap, TT_TRAP); 2288 } 2289 2290 finishing_insn(dc); 2291 2292 /* Trap always. */ 2293 if (cond == 8) { 2294 save_state(dc); 2295 gen_helper_raise_exception(tcg_env, trap); 2296 dc->base.is_jmp = DISAS_NORETURN; 2297 return true; 2298 } 2299 2300 /* Conditional trap. */ 2301 flush_cond(dc); 2302 lab = delay_exceptionv(dc, trap); 2303 gen_compare(&cmp, cc, cond, dc); 2304 tcg_gen_brcondi_tl(cmp.cond, cmp.c1, cmp.c2, lab); 2305 2306 return advance_pc(dc); 2307 } 2308 2309 static bool trans_Tcc_r(DisasContext *dc, arg_Tcc_r *a) 2310 { 2311 if (avail_32(dc) && a->cc) { 2312 return false; 2313 } 2314 return do_tcc(dc, a->cond, a->cc, a->rs1, false, a->rs2); 2315 } 2316 2317 static bool trans_Tcc_i_v7(DisasContext *dc, arg_Tcc_i_v7 *a) 2318 { 2319 if (avail_64(dc)) { 2320 return false; 2321 } 2322 return do_tcc(dc, a->cond, 0, a->rs1, true, a->i); 2323 } 2324 2325 static bool trans_Tcc_i_v9(DisasContext *dc, arg_Tcc_i_v9 *a) 2326 { 2327 if (avail_32(dc)) { 2328 return false; 2329 } 2330 return do_tcc(dc, a->cond, a->cc, a->rs1, true, a->i); 2331 } 2332 2333 static bool trans_STBAR(DisasContext *dc, arg_STBAR *a) 2334 { 2335 tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC); 2336 return advance_pc(dc); 2337 } 2338 2339 static bool trans_MEMBAR(DisasContext *dc, arg_MEMBAR *a) 2340 { 2341 if (avail_32(dc)) { 2342 return false; 2343 } 2344 if (a->mmask) { 2345 /* Note TCG_MO_* was modeled on sparc64, so mmask matches. */ 2346 tcg_gen_mb(a->mmask | TCG_BAR_SC); 2347 } 2348 if (a->cmask) { 2349 /* For #Sync, etc, end the TB to recognize interrupts. */ 2350 dc->base.is_jmp = DISAS_EXIT; 2351 } 2352 return advance_pc(dc); 2353 } 2354 2355 static bool do_rd_special(DisasContext *dc, bool priv, int rd, 2356 TCGv (*func)(DisasContext *, TCGv)) 2357 { 2358 if (!priv) { 2359 return raise_priv(dc); 2360 } 2361 gen_store_gpr(dc, rd, func(dc, gen_dest_gpr(dc, rd))); 2362 return advance_pc(dc); 2363 } 2364 2365 static TCGv do_rdy(DisasContext *dc, TCGv dst) 2366 { 2367 return cpu_y; 2368 } 2369 2370 static bool trans_RDY(DisasContext *dc, arg_RDY *a) 2371 { 2372 /* 2373 * TODO: Need a feature bit for sparcv8. In the meantime, treat all 2374 * 32-bit cpus like sparcv7, which ignores the rs1 field. 2375 * This matches after all other ASR, so Leon3 Asr17 is handled first. 2376 */ 2377 if (avail_64(dc) && a->rs1 != 0) { 2378 return false; 2379 } 2380 return do_rd_special(dc, true, a->rd, do_rdy); 2381 } 2382 2383 static TCGv do_rd_leon3_config(DisasContext *dc, TCGv dst) 2384 { 2385 uint32_t val; 2386 2387 /* 2388 * TODO: There are many more fields to be filled, 2389 * some of which are writable. 2390 */ 2391 val = dc->def->nwindows - 1; /* [4:0] NWIN */ 2392 val |= 1 << 8; /* [8] V8 */ 2393 2394 return tcg_constant_tl(val); 2395 } 2396 2397 TRANS(RDASR17, ASR17, do_rd_special, true, a->rd, do_rd_leon3_config) 2398 2399 static TCGv do_rdccr(DisasContext *dc, TCGv dst) 2400 { 2401 gen_helper_rdccr(dst, tcg_env); 2402 return dst; 2403 } 2404 2405 TRANS(RDCCR, 64, do_rd_special, true, a->rd, do_rdccr) 2406 2407 static TCGv do_rdasi(DisasContext *dc, TCGv dst) 2408 { 2409 #ifdef TARGET_SPARC64 2410 return tcg_constant_tl(dc->asi); 2411 #else 2412 qemu_build_not_reached(); 2413 #endif 2414 } 2415 2416 TRANS(RDASI, 64, do_rd_special, true, a->rd, do_rdasi) 2417 2418 static TCGv do_rdtick(DisasContext *dc, TCGv dst) 2419 { 2420 TCGv_ptr r_tickptr = tcg_temp_new_ptr(); 2421 2422 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick)); 2423 if (translator_io_start(&dc->base)) { 2424 dc->base.is_jmp = DISAS_EXIT; 2425 } 2426 gen_helper_tick_get_count(dst, tcg_env, r_tickptr, 2427 tcg_constant_i32(dc->mem_idx)); 2428 return dst; 2429 } 2430 2431 /* TODO: non-priv access only allowed when enabled. */ 2432 TRANS(RDTICK, 64, do_rd_special, true, a->rd, do_rdtick) 2433 2434 static TCGv do_rdpc(DisasContext *dc, TCGv dst) 2435 { 2436 return tcg_constant_tl(address_mask_i(dc, dc->pc)); 2437 } 2438 2439 TRANS(RDPC, 64, do_rd_special, true, a->rd, do_rdpc) 2440 2441 static TCGv do_rdfprs(DisasContext *dc, TCGv dst) 2442 { 2443 tcg_gen_ext_i32_tl(dst, cpu_fprs); 2444 return dst; 2445 } 2446 2447 TRANS(RDFPRS, 64, do_rd_special, true, a->rd, do_rdfprs) 2448 2449 static TCGv do_rdgsr(DisasContext *dc, TCGv dst) 2450 { 2451 gen_trap_ifnofpu(dc); 2452 return cpu_gsr; 2453 } 2454 2455 TRANS(RDGSR, 64, do_rd_special, true, a->rd, do_rdgsr) 2456 2457 static TCGv do_rdsoftint(DisasContext *dc, TCGv dst) 2458 { 2459 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(softint)); 2460 return dst; 2461 } 2462 2463 TRANS(RDSOFTINT, 64, do_rd_special, supervisor(dc), a->rd, do_rdsoftint) 2464 2465 static TCGv do_rdtick_cmpr(DisasContext *dc, TCGv dst) 2466 { 2467 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(tick_cmpr)); 2468 return dst; 2469 } 2470 2471 /* TODO: non-priv access only allowed when enabled. */ 2472 TRANS(RDTICK_CMPR, 64, do_rd_special, true, a->rd, do_rdtick_cmpr) 2473 2474 static TCGv do_rdstick(DisasContext *dc, TCGv dst) 2475 { 2476 TCGv_ptr r_tickptr = tcg_temp_new_ptr(); 2477 2478 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick)); 2479 if (translator_io_start(&dc->base)) { 2480 dc->base.is_jmp = DISAS_EXIT; 2481 } 2482 gen_helper_tick_get_count(dst, tcg_env, r_tickptr, 2483 tcg_constant_i32(dc->mem_idx)); 2484 return dst; 2485 } 2486 2487 /* TODO: non-priv access only allowed when enabled. */ 2488 TRANS(RDSTICK, 64, do_rd_special, true, a->rd, do_rdstick) 2489 2490 static TCGv do_rdstick_cmpr(DisasContext *dc, TCGv dst) 2491 { 2492 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(stick_cmpr)); 2493 return dst; 2494 } 2495 2496 /* TODO: supervisor access only allowed when enabled by hypervisor. */ 2497 TRANS(RDSTICK_CMPR, 64, do_rd_special, supervisor(dc), a->rd, do_rdstick_cmpr) 2498 2499 /* 2500 * UltraSPARC-T1 Strand status. 2501 * HYPV check maybe not enough, UA2005 & UA2007 describe 2502 * this ASR as impl. dep 2503 */ 2504 static TCGv do_rdstrand_status(DisasContext *dc, TCGv dst) 2505 { 2506 return tcg_constant_tl(1); 2507 } 2508 2509 TRANS(RDSTRAND_STATUS, HYPV, do_rd_special, true, a->rd, do_rdstrand_status) 2510 2511 static TCGv do_rdpsr(DisasContext *dc, TCGv dst) 2512 { 2513 gen_helper_rdpsr(dst, tcg_env); 2514 return dst; 2515 } 2516 2517 TRANS(RDPSR, 32, do_rd_special, supervisor(dc), a->rd, do_rdpsr) 2518 2519 static TCGv do_rdhpstate(DisasContext *dc, TCGv dst) 2520 { 2521 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hpstate)); 2522 return dst; 2523 } 2524 2525 TRANS(RDHPR_hpstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhpstate) 2526 2527 static TCGv do_rdhtstate(DisasContext *dc, TCGv dst) 2528 { 2529 TCGv_i32 tl = tcg_temp_new_i32(); 2530 TCGv_ptr tp = tcg_temp_new_ptr(); 2531 2532 tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl)); 2533 tcg_gen_andi_i32(tl, tl, MAXTL_MASK); 2534 tcg_gen_shli_i32(tl, tl, 3); 2535 tcg_gen_ext_i32_ptr(tp, tl); 2536 tcg_gen_add_ptr(tp, tp, tcg_env); 2537 2538 tcg_gen_ld_tl(dst, tp, env64_field_offsetof(htstate)); 2539 return dst; 2540 } 2541 2542 TRANS(RDHPR_htstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtstate) 2543 2544 static TCGv do_rdhintp(DisasContext *dc, TCGv dst) 2545 { 2546 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hintp)); 2547 return dst; 2548 } 2549 2550 TRANS(RDHPR_hintp, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhintp) 2551 2552 static TCGv do_rdhtba(DisasContext *dc, TCGv dst) 2553 { 2554 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(htba)); 2555 return dst; 2556 } 2557 2558 TRANS(RDHPR_htba, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtba) 2559 2560 static TCGv do_rdhver(DisasContext *dc, TCGv dst) 2561 { 2562 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hver)); 2563 return dst; 2564 } 2565 2566 TRANS(RDHPR_hver, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhver) 2567 2568 static TCGv do_rdhstick_cmpr(DisasContext *dc, TCGv dst) 2569 { 2570 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hstick_cmpr)); 2571 return dst; 2572 } 2573 2574 TRANS(RDHPR_hstick_cmpr, HYPV, do_rd_special, hypervisor(dc), a->rd, 2575 do_rdhstick_cmpr) 2576 2577 static TCGv do_rdwim(DisasContext *dc, TCGv dst) 2578 { 2579 tcg_gen_ld_tl(dst, tcg_env, env32_field_offsetof(wim)); 2580 return dst; 2581 } 2582 2583 TRANS(RDWIM, 32, do_rd_special, supervisor(dc), a->rd, do_rdwim) 2584 2585 static TCGv do_rdtpc(DisasContext *dc, TCGv dst) 2586 { 2587 #ifdef TARGET_SPARC64 2588 TCGv_ptr r_tsptr = tcg_temp_new_ptr(); 2589 2590 gen_load_trap_state_at_tl(r_tsptr); 2591 tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tpc)); 2592 return dst; 2593 #else 2594 qemu_build_not_reached(); 2595 #endif 2596 } 2597 2598 TRANS(RDPR_tpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtpc) 2599 2600 static TCGv do_rdtnpc(DisasContext *dc, TCGv dst) 2601 { 2602 #ifdef TARGET_SPARC64 2603 TCGv_ptr r_tsptr = tcg_temp_new_ptr(); 2604 2605 gen_load_trap_state_at_tl(r_tsptr); 2606 tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tnpc)); 2607 return dst; 2608 #else 2609 qemu_build_not_reached(); 2610 #endif 2611 } 2612 2613 TRANS(RDPR_tnpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtnpc) 2614 2615 static TCGv do_rdtstate(DisasContext *dc, TCGv dst) 2616 { 2617 #ifdef TARGET_SPARC64 2618 TCGv_ptr r_tsptr = tcg_temp_new_ptr(); 2619 2620 gen_load_trap_state_at_tl(r_tsptr); 2621 tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tstate)); 2622 return dst; 2623 #else 2624 qemu_build_not_reached(); 2625 #endif 2626 } 2627 2628 TRANS(RDPR_tstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdtstate) 2629 2630 static TCGv do_rdtt(DisasContext *dc, TCGv dst) 2631 { 2632 #ifdef TARGET_SPARC64 2633 TCGv_ptr r_tsptr = tcg_temp_new_ptr(); 2634 2635 gen_load_trap_state_at_tl(r_tsptr); 2636 tcg_gen_ld32s_tl(dst, r_tsptr, offsetof(trap_state, tt)); 2637 return dst; 2638 #else 2639 qemu_build_not_reached(); 2640 #endif 2641 } 2642 2643 TRANS(RDPR_tt, 64, do_rd_special, supervisor(dc), a->rd, do_rdtt) 2644 TRANS(RDPR_tick, 64, do_rd_special, supervisor(dc), a->rd, do_rdtick) 2645 2646 static TCGv do_rdtba(DisasContext *dc, TCGv dst) 2647 { 2648 return cpu_tbr; 2649 } 2650 2651 TRANS(RDTBR, 32, do_rd_special, supervisor(dc), a->rd, do_rdtba) 2652 TRANS(RDPR_tba, 64, do_rd_special, supervisor(dc), a->rd, do_rdtba) 2653 2654 static TCGv do_rdpstate(DisasContext *dc, TCGv dst) 2655 { 2656 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(pstate)); 2657 return dst; 2658 } 2659 2660 TRANS(RDPR_pstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdpstate) 2661 2662 static TCGv do_rdtl(DisasContext *dc, TCGv dst) 2663 { 2664 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(tl)); 2665 return dst; 2666 } 2667 2668 TRANS(RDPR_tl, 64, do_rd_special, supervisor(dc), a->rd, do_rdtl) 2669 2670 static TCGv do_rdpil(DisasContext *dc, TCGv dst) 2671 { 2672 tcg_gen_ld32s_tl(dst, tcg_env, env_field_offsetof(psrpil)); 2673 return dst; 2674 } 2675 2676 TRANS(RDPR_pil, 64, do_rd_special, supervisor(dc), a->rd, do_rdpil) 2677 2678 static TCGv do_rdcwp(DisasContext *dc, TCGv dst) 2679 { 2680 gen_helper_rdcwp(dst, tcg_env); 2681 return dst; 2682 } 2683 2684 TRANS(RDPR_cwp, 64, do_rd_special, supervisor(dc), a->rd, do_rdcwp) 2685 2686 static TCGv do_rdcansave(DisasContext *dc, TCGv dst) 2687 { 2688 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cansave)); 2689 return dst; 2690 } 2691 2692 TRANS(RDPR_cansave, 64, do_rd_special, supervisor(dc), a->rd, do_rdcansave) 2693 2694 static TCGv do_rdcanrestore(DisasContext *dc, TCGv dst) 2695 { 2696 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(canrestore)); 2697 return dst; 2698 } 2699 2700 TRANS(RDPR_canrestore, 64, do_rd_special, supervisor(dc), a->rd, 2701 do_rdcanrestore) 2702 2703 static TCGv do_rdcleanwin(DisasContext *dc, TCGv dst) 2704 { 2705 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cleanwin)); 2706 return dst; 2707 } 2708 2709 TRANS(RDPR_cleanwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdcleanwin) 2710 2711 static TCGv do_rdotherwin(DisasContext *dc, TCGv dst) 2712 { 2713 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(otherwin)); 2714 return dst; 2715 } 2716 2717 TRANS(RDPR_otherwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdotherwin) 2718 2719 static TCGv do_rdwstate(DisasContext *dc, TCGv dst) 2720 { 2721 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(wstate)); 2722 return dst; 2723 } 2724 2725 TRANS(RDPR_wstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdwstate) 2726 2727 static TCGv do_rdgl(DisasContext *dc, TCGv dst) 2728 { 2729 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(gl)); 2730 return dst; 2731 } 2732 2733 TRANS(RDPR_gl, GL, do_rd_special, supervisor(dc), a->rd, do_rdgl) 2734 2735 /* UA2005 strand status */ 2736 static TCGv do_rdssr(DisasContext *dc, TCGv dst) 2737 { 2738 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(ssr)); 2739 return dst; 2740 } 2741 2742 TRANS(RDPR_strand_status, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdssr) 2743 2744 static TCGv do_rdver(DisasContext *dc, TCGv dst) 2745 { 2746 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(version)); 2747 return dst; 2748 } 2749 2750 TRANS(RDPR_ver, 64, do_rd_special, supervisor(dc), a->rd, do_rdver) 2751 2752 static bool trans_FLUSHW(DisasContext *dc, arg_FLUSHW *a) 2753 { 2754 if (avail_64(dc)) { 2755 gen_helper_flushw(tcg_env); 2756 return advance_pc(dc); 2757 } 2758 return false; 2759 } 2760 2761 static bool do_wr_special(DisasContext *dc, arg_r_r_ri *a, bool priv, 2762 void (*func)(DisasContext *, TCGv)) 2763 { 2764 TCGv src; 2765 2766 /* For simplicity, we under-decoded the rs2 form. */ 2767 if (!a->imm && (a->rs2_or_imm & ~0x1f)) { 2768 return false; 2769 } 2770 if (!priv) { 2771 return raise_priv(dc); 2772 } 2773 2774 if (a->rs1 == 0 && (a->imm || a->rs2_or_imm == 0)) { 2775 src = tcg_constant_tl(a->rs2_or_imm); 2776 } else { 2777 TCGv src1 = gen_load_gpr(dc, a->rs1); 2778 if (a->rs2_or_imm == 0) { 2779 src = src1; 2780 } else { 2781 src = tcg_temp_new(); 2782 if (a->imm) { 2783 tcg_gen_xori_tl(src, src1, a->rs2_or_imm); 2784 } else { 2785 tcg_gen_xor_tl(src, src1, gen_load_gpr(dc, a->rs2_or_imm)); 2786 } 2787 } 2788 } 2789 func(dc, src); 2790 return advance_pc(dc); 2791 } 2792 2793 static void do_wry(DisasContext *dc, TCGv src) 2794 { 2795 tcg_gen_ext32u_tl(cpu_y, src); 2796 } 2797 2798 TRANS(WRY, ALL, do_wr_special, a, true, do_wry) 2799 2800 static void do_wrccr(DisasContext *dc, TCGv src) 2801 { 2802 gen_helper_wrccr(tcg_env, src); 2803 } 2804 2805 TRANS(WRCCR, 64, do_wr_special, a, true, do_wrccr) 2806 2807 static void do_wrasi(DisasContext *dc, TCGv src) 2808 { 2809 TCGv tmp = tcg_temp_new(); 2810 2811 tcg_gen_ext8u_tl(tmp, src); 2812 tcg_gen_st32_tl(tmp, tcg_env, env64_field_offsetof(asi)); 2813 /* End TB to notice changed ASI. */ 2814 dc->base.is_jmp = DISAS_EXIT; 2815 } 2816 2817 TRANS(WRASI, 64, do_wr_special, a, true, do_wrasi) 2818 2819 static void do_wrfprs(DisasContext *dc, TCGv src) 2820 { 2821 #ifdef TARGET_SPARC64 2822 tcg_gen_trunc_tl_i32(cpu_fprs, src); 2823 dc->fprs_dirty = 0; 2824 dc->base.is_jmp = DISAS_EXIT; 2825 #else 2826 qemu_build_not_reached(); 2827 #endif 2828 } 2829 2830 TRANS(WRFPRS, 64, do_wr_special, a, true, do_wrfprs) 2831 2832 static void do_wrgsr(DisasContext *dc, TCGv src) 2833 { 2834 gen_trap_ifnofpu(dc); 2835 tcg_gen_mov_tl(cpu_gsr, src); 2836 } 2837 2838 TRANS(WRGSR, 64, do_wr_special, a, true, do_wrgsr) 2839 2840 static void do_wrsoftint_set(DisasContext *dc, TCGv src) 2841 { 2842 gen_helper_set_softint(tcg_env, src); 2843 } 2844 2845 TRANS(WRSOFTINT_SET, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_set) 2846 2847 static void do_wrsoftint_clr(DisasContext *dc, TCGv src) 2848 { 2849 gen_helper_clear_softint(tcg_env, src); 2850 } 2851 2852 TRANS(WRSOFTINT_CLR, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_clr) 2853 2854 static void do_wrsoftint(DisasContext *dc, TCGv src) 2855 { 2856 gen_helper_write_softint(tcg_env, src); 2857 } 2858 2859 TRANS(WRSOFTINT, 64, do_wr_special, a, supervisor(dc), do_wrsoftint) 2860 2861 static void do_wrtick_cmpr(DisasContext *dc, TCGv src) 2862 { 2863 TCGv_ptr r_tickptr = tcg_temp_new_ptr(); 2864 2865 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(tick_cmpr)); 2866 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick)); 2867 translator_io_start(&dc->base); 2868 gen_helper_tick_set_limit(r_tickptr, src); 2869 /* End TB to handle timer interrupt */ 2870 dc->base.is_jmp = DISAS_EXIT; 2871 } 2872 2873 TRANS(WRTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrtick_cmpr) 2874 2875 static void do_wrstick(DisasContext *dc, TCGv src) 2876 { 2877 #ifdef TARGET_SPARC64 2878 TCGv_ptr r_tickptr = tcg_temp_new_ptr(); 2879 2880 tcg_gen_ld_ptr(r_tickptr, tcg_env, offsetof(CPUSPARCState, stick)); 2881 translator_io_start(&dc->base); 2882 gen_helper_tick_set_count(r_tickptr, src); 2883 /* End TB to handle timer interrupt */ 2884 dc->base.is_jmp = DISAS_EXIT; 2885 #else 2886 qemu_build_not_reached(); 2887 #endif 2888 } 2889 2890 TRANS(WRSTICK, 64, do_wr_special, a, supervisor(dc), do_wrstick) 2891 2892 static void do_wrstick_cmpr(DisasContext *dc, TCGv src) 2893 { 2894 TCGv_ptr r_tickptr = tcg_temp_new_ptr(); 2895 2896 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(stick_cmpr)); 2897 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick)); 2898 translator_io_start(&dc->base); 2899 gen_helper_tick_set_limit(r_tickptr, src); 2900 /* End TB to handle timer interrupt */ 2901 dc->base.is_jmp = DISAS_EXIT; 2902 } 2903 2904 TRANS(WRSTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrstick_cmpr) 2905 2906 static void do_wrpowerdown(DisasContext *dc, TCGv src) 2907 { 2908 finishing_insn(dc); 2909 save_state(dc); 2910 gen_helper_power_down(tcg_env); 2911 } 2912 2913 TRANS(WRPOWERDOWN, POWERDOWN, do_wr_special, a, supervisor(dc), do_wrpowerdown) 2914 2915 static void do_wrpsr(DisasContext *dc, TCGv src) 2916 { 2917 gen_helper_wrpsr(tcg_env, src); 2918 dc->base.is_jmp = DISAS_EXIT; 2919 } 2920 2921 TRANS(WRPSR, 32, do_wr_special, a, supervisor(dc), do_wrpsr) 2922 2923 static void do_wrwim(DisasContext *dc, TCGv src) 2924 { 2925 target_ulong mask = MAKE_64BIT_MASK(0, dc->def->nwindows); 2926 TCGv tmp = tcg_temp_new(); 2927 2928 tcg_gen_andi_tl(tmp, src, mask); 2929 tcg_gen_st_tl(tmp, tcg_env, env32_field_offsetof(wim)); 2930 } 2931 2932 TRANS(WRWIM, 32, do_wr_special, a, supervisor(dc), do_wrwim) 2933 2934 static void do_wrtpc(DisasContext *dc, TCGv src) 2935 { 2936 #ifdef TARGET_SPARC64 2937 TCGv_ptr r_tsptr = tcg_temp_new_ptr(); 2938 2939 gen_load_trap_state_at_tl(r_tsptr); 2940 tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tpc)); 2941 #else 2942 qemu_build_not_reached(); 2943 #endif 2944 } 2945 2946 TRANS(WRPR_tpc, 64, do_wr_special, a, supervisor(dc), do_wrtpc) 2947 2948 static void do_wrtnpc(DisasContext *dc, TCGv src) 2949 { 2950 #ifdef TARGET_SPARC64 2951 TCGv_ptr r_tsptr = tcg_temp_new_ptr(); 2952 2953 gen_load_trap_state_at_tl(r_tsptr); 2954 tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tnpc)); 2955 #else 2956 qemu_build_not_reached(); 2957 #endif 2958 } 2959 2960 TRANS(WRPR_tnpc, 64, do_wr_special, a, supervisor(dc), do_wrtnpc) 2961 2962 static void do_wrtstate(DisasContext *dc, TCGv src) 2963 { 2964 #ifdef TARGET_SPARC64 2965 TCGv_ptr r_tsptr = tcg_temp_new_ptr(); 2966 2967 gen_load_trap_state_at_tl(r_tsptr); 2968 tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tstate)); 2969 #else 2970 qemu_build_not_reached(); 2971 #endif 2972 } 2973 2974 TRANS(WRPR_tstate, 64, do_wr_special, a, supervisor(dc), do_wrtstate) 2975 2976 static void do_wrtt(DisasContext *dc, TCGv src) 2977 { 2978 #ifdef TARGET_SPARC64 2979 TCGv_ptr r_tsptr = tcg_temp_new_ptr(); 2980 2981 gen_load_trap_state_at_tl(r_tsptr); 2982 tcg_gen_st32_tl(src, r_tsptr, offsetof(trap_state, tt)); 2983 #else 2984 qemu_build_not_reached(); 2985 #endif 2986 } 2987 2988 TRANS(WRPR_tt, 64, do_wr_special, a, supervisor(dc), do_wrtt) 2989 2990 static void do_wrtick(DisasContext *dc, TCGv src) 2991 { 2992 TCGv_ptr r_tickptr = tcg_temp_new_ptr(); 2993 2994 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick)); 2995 translator_io_start(&dc->base); 2996 gen_helper_tick_set_count(r_tickptr, src); 2997 /* End TB to handle timer interrupt */ 2998 dc->base.is_jmp = DISAS_EXIT; 2999 } 3000 3001 TRANS(WRPR_tick, 64, do_wr_special, a, supervisor(dc), do_wrtick) 3002 3003 static void do_wrtba(DisasContext *dc, TCGv src) 3004 { 3005 tcg_gen_mov_tl(cpu_tbr, src); 3006 } 3007 3008 TRANS(WRPR_tba, 64, do_wr_special, a, supervisor(dc), do_wrtba) 3009 3010 static void do_wrpstate(DisasContext *dc, TCGv src) 3011 { 3012 save_state(dc); 3013 if (translator_io_start(&dc->base)) { 3014 dc->base.is_jmp = DISAS_EXIT; 3015 } 3016 gen_helper_wrpstate(tcg_env, src); 3017 dc->npc = DYNAMIC_PC; 3018 } 3019 3020 TRANS(WRPR_pstate, 64, do_wr_special, a, supervisor(dc), do_wrpstate) 3021 3022 static void do_wrtl(DisasContext *dc, TCGv src) 3023 { 3024 save_state(dc); 3025 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(tl)); 3026 dc->npc = DYNAMIC_PC; 3027 } 3028 3029 TRANS(WRPR_tl, 64, do_wr_special, a, supervisor(dc), do_wrtl) 3030 3031 static void do_wrpil(DisasContext *dc, TCGv src) 3032 { 3033 if (translator_io_start(&dc->base)) { 3034 dc->base.is_jmp = DISAS_EXIT; 3035 } 3036 gen_helper_wrpil(tcg_env, src); 3037 } 3038 3039 TRANS(WRPR_pil, 64, do_wr_special, a, supervisor(dc), do_wrpil) 3040 3041 static void do_wrcwp(DisasContext *dc, TCGv src) 3042 { 3043 gen_helper_wrcwp(tcg_env, src); 3044 } 3045 3046 TRANS(WRPR_cwp, 64, do_wr_special, a, supervisor(dc), do_wrcwp) 3047 3048 static void do_wrcansave(DisasContext *dc, TCGv src) 3049 { 3050 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cansave)); 3051 } 3052 3053 TRANS(WRPR_cansave, 64, do_wr_special, a, supervisor(dc), do_wrcansave) 3054 3055 static void do_wrcanrestore(DisasContext *dc, TCGv src) 3056 { 3057 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(canrestore)); 3058 } 3059 3060 TRANS(WRPR_canrestore, 64, do_wr_special, a, supervisor(dc), do_wrcanrestore) 3061 3062 static void do_wrcleanwin(DisasContext *dc, TCGv src) 3063 { 3064 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cleanwin)); 3065 } 3066 3067 TRANS(WRPR_cleanwin, 64, do_wr_special, a, supervisor(dc), do_wrcleanwin) 3068 3069 static void do_wrotherwin(DisasContext *dc, TCGv src) 3070 { 3071 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(otherwin)); 3072 } 3073 3074 TRANS(WRPR_otherwin, 64, do_wr_special, a, supervisor(dc), do_wrotherwin) 3075 3076 static void do_wrwstate(DisasContext *dc, TCGv src) 3077 { 3078 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(wstate)); 3079 } 3080 3081 TRANS(WRPR_wstate, 64, do_wr_special, a, supervisor(dc), do_wrwstate) 3082 3083 static void do_wrgl(DisasContext *dc, TCGv src) 3084 { 3085 gen_helper_wrgl(tcg_env, src); 3086 } 3087 3088 TRANS(WRPR_gl, GL, do_wr_special, a, supervisor(dc), do_wrgl) 3089 3090 /* UA2005 strand status */ 3091 static void do_wrssr(DisasContext *dc, TCGv src) 3092 { 3093 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(ssr)); 3094 } 3095 3096 TRANS(WRPR_strand_status, HYPV, do_wr_special, a, hypervisor(dc), do_wrssr) 3097 3098 TRANS(WRTBR, 32, do_wr_special, a, supervisor(dc), do_wrtba) 3099 3100 static void do_wrhpstate(DisasContext *dc, TCGv src) 3101 { 3102 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hpstate)); 3103 dc->base.is_jmp = DISAS_EXIT; 3104 } 3105 3106 TRANS(WRHPR_hpstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhpstate) 3107 3108 static void do_wrhtstate(DisasContext *dc, TCGv src) 3109 { 3110 TCGv_i32 tl = tcg_temp_new_i32(); 3111 TCGv_ptr tp = tcg_temp_new_ptr(); 3112 3113 tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl)); 3114 tcg_gen_andi_i32(tl, tl, MAXTL_MASK); 3115 tcg_gen_shli_i32(tl, tl, 3); 3116 tcg_gen_ext_i32_ptr(tp, tl); 3117 tcg_gen_add_ptr(tp, tp, tcg_env); 3118 3119 tcg_gen_st_tl(src, tp, env64_field_offsetof(htstate)); 3120 } 3121 3122 TRANS(WRHPR_htstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtstate) 3123 3124 static void do_wrhintp(DisasContext *dc, TCGv src) 3125 { 3126 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hintp)); 3127 } 3128 3129 TRANS(WRHPR_hintp, HYPV, do_wr_special, a, hypervisor(dc), do_wrhintp) 3130 3131 static void do_wrhtba(DisasContext *dc, TCGv src) 3132 { 3133 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(htba)); 3134 } 3135 3136 TRANS(WRHPR_htba, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtba) 3137 3138 static void do_wrhstick_cmpr(DisasContext *dc, TCGv src) 3139 { 3140 TCGv_ptr r_tickptr = tcg_temp_new_ptr(); 3141 3142 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hstick_cmpr)); 3143 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(hstick)); 3144 translator_io_start(&dc->base); 3145 gen_helper_tick_set_limit(r_tickptr, src); 3146 /* End TB to handle timer interrupt */ 3147 dc->base.is_jmp = DISAS_EXIT; 3148 } 3149 3150 TRANS(WRHPR_hstick_cmpr, HYPV, do_wr_special, a, hypervisor(dc), 3151 do_wrhstick_cmpr) 3152 3153 static bool do_saved_restored(DisasContext *dc, bool saved) 3154 { 3155 if (!supervisor(dc)) { 3156 return raise_priv(dc); 3157 } 3158 if (saved) { 3159 gen_helper_saved(tcg_env); 3160 } else { 3161 gen_helper_restored(tcg_env); 3162 } 3163 return advance_pc(dc); 3164 } 3165 3166 TRANS(SAVED, 64, do_saved_restored, true) 3167 TRANS(RESTORED, 64, do_saved_restored, false) 3168 3169 static bool trans_NOP(DisasContext *dc, arg_NOP *a) 3170 { 3171 return advance_pc(dc); 3172 } 3173 3174 /* 3175 * TODO: Need a feature bit for sparcv8. 3176 * In the meantime, treat all 32-bit cpus like sparcv7. 3177 */ 3178 TRANS(NOP_v7, 32, trans_NOP, a) 3179 TRANS(NOP_v9, 64, trans_NOP, a) 3180 3181 static bool do_arith_int(DisasContext *dc, arg_r_r_ri_cc *a, 3182 void (*func)(TCGv, TCGv, TCGv), 3183 void (*funci)(TCGv, TCGv, target_long), 3184 bool logic_cc) 3185 { 3186 TCGv dst, src1; 3187 3188 /* For simplicity, we under-decoded the rs2 form. */ 3189 if (!a->imm && a->rs2_or_imm & ~0x1f) { 3190 return false; 3191 } 3192 3193 if (logic_cc) { 3194 dst = cpu_cc_N; 3195 } else { 3196 dst = gen_dest_gpr(dc, a->rd); 3197 } 3198 src1 = gen_load_gpr(dc, a->rs1); 3199 3200 if (a->imm || a->rs2_or_imm == 0) { 3201 if (funci) { 3202 funci(dst, src1, a->rs2_or_imm); 3203 } else { 3204 func(dst, src1, tcg_constant_tl(a->rs2_or_imm)); 3205 } 3206 } else { 3207 func(dst, src1, cpu_regs[a->rs2_or_imm]); 3208 } 3209 3210 if (logic_cc) { 3211 if (TARGET_LONG_BITS == 64) { 3212 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N); 3213 tcg_gen_movi_tl(cpu_icc_C, 0); 3214 } 3215 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N); 3216 tcg_gen_movi_tl(cpu_cc_C, 0); 3217 tcg_gen_movi_tl(cpu_cc_V, 0); 3218 } 3219 3220 gen_store_gpr(dc, a->rd, dst); 3221 return advance_pc(dc); 3222 } 3223 3224 static bool do_arith(DisasContext *dc, arg_r_r_ri_cc *a, 3225 void (*func)(TCGv, TCGv, TCGv), 3226 void (*funci)(TCGv, TCGv, target_long), 3227 void (*func_cc)(TCGv, TCGv, TCGv)) 3228 { 3229 if (a->cc) { 3230 return do_arith_int(dc, a, func_cc, NULL, false); 3231 } 3232 return do_arith_int(dc, a, func, funci, false); 3233 } 3234 3235 static bool do_logic(DisasContext *dc, arg_r_r_ri_cc *a, 3236 void (*func)(TCGv, TCGv, TCGv), 3237 void (*funci)(TCGv, TCGv, target_long)) 3238 { 3239 return do_arith_int(dc, a, func, funci, a->cc); 3240 } 3241 3242 TRANS(ADD, ALL, do_arith, a, tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_addcc) 3243 TRANS(SUB, ALL, do_arith, a, tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_subcc) 3244 TRANS(ADDC, ALL, do_arith, a, gen_op_addc, NULL, gen_op_addccc) 3245 TRANS(SUBC, ALL, do_arith, a, gen_op_subc, NULL, gen_op_subccc) 3246 3247 TRANS(TADDcc, ALL, do_arith, a, NULL, NULL, gen_op_taddcc) 3248 TRANS(TSUBcc, ALL, do_arith, a, NULL, NULL, gen_op_tsubcc) 3249 TRANS(TADDccTV, ALL, do_arith, a, NULL, NULL, gen_op_taddcctv) 3250 TRANS(TSUBccTV, ALL, do_arith, a, NULL, NULL, gen_op_tsubcctv) 3251 3252 TRANS(AND, ALL, do_logic, a, tcg_gen_and_tl, tcg_gen_andi_tl) 3253 TRANS(XOR, ALL, do_logic, a, tcg_gen_xor_tl, tcg_gen_xori_tl) 3254 TRANS(ANDN, ALL, do_logic, a, tcg_gen_andc_tl, NULL) 3255 TRANS(ORN, ALL, do_logic, a, tcg_gen_orc_tl, NULL) 3256 TRANS(XORN, ALL, do_logic, a, tcg_gen_eqv_tl, NULL) 3257 3258 TRANS(MULX, 64, do_arith, a, tcg_gen_mul_tl, tcg_gen_muli_tl, NULL) 3259 TRANS(UMUL, MUL, do_logic, a, gen_op_umul, NULL) 3260 TRANS(SMUL, MUL, do_logic, a, gen_op_smul, NULL) 3261 TRANS(MULScc, ALL, do_arith, a, NULL, NULL, gen_op_mulscc) 3262 3263 TRANS(UDIVcc, DIV, do_arith, a, NULL, NULL, gen_op_udivcc) 3264 TRANS(SDIV, DIV, do_arith, a, gen_op_sdiv, NULL, gen_op_sdivcc) 3265 3266 /* TODO: Should have feature bit -- comes in with UltraSparc T2. */ 3267 TRANS(POPC, 64, do_arith, a, gen_op_popc, NULL, NULL) 3268 3269 static bool trans_OR(DisasContext *dc, arg_r_r_ri_cc *a) 3270 { 3271 /* OR with %g0 is the canonical alias for MOV. */ 3272 if (!a->cc && a->rs1 == 0) { 3273 if (a->imm || a->rs2_or_imm == 0) { 3274 gen_store_gpr(dc, a->rd, tcg_constant_tl(a->rs2_or_imm)); 3275 } else if (a->rs2_or_imm & ~0x1f) { 3276 /* For simplicity, we under-decoded the rs2 form. */ 3277 return false; 3278 } else { 3279 gen_store_gpr(dc, a->rd, cpu_regs[a->rs2_or_imm]); 3280 } 3281 return advance_pc(dc); 3282 } 3283 return do_logic(dc, a, tcg_gen_or_tl, tcg_gen_ori_tl); 3284 } 3285 3286 static bool trans_UDIV(DisasContext *dc, arg_r_r_ri *a) 3287 { 3288 TCGv_i64 t1, t2; 3289 TCGv dst; 3290 3291 if (!avail_DIV(dc)) { 3292 return false; 3293 } 3294 /* For simplicity, we under-decoded the rs2 form. */ 3295 if (!a->imm && a->rs2_or_imm & ~0x1f) { 3296 return false; 3297 } 3298 3299 if (unlikely(a->rs2_or_imm == 0)) { 3300 gen_exception(dc, TT_DIV_ZERO); 3301 return true; 3302 } 3303 3304 if (a->imm) { 3305 t2 = tcg_constant_i64((uint32_t)a->rs2_or_imm); 3306 } else { 3307 TCGLabel *lab; 3308 TCGv_i32 n2; 3309 3310 finishing_insn(dc); 3311 flush_cond(dc); 3312 3313 n2 = tcg_temp_new_i32(); 3314 tcg_gen_trunc_tl_i32(n2, cpu_regs[a->rs2_or_imm]); 3315 3316 lab = delay_exception(dc, TT_DIV_ZERO); 3317 tcg_gen_brcondi_i32(TCG_COND_EQ, n2, 0, lab); 3318 3319 t2 = tcg_temp_new_i64(); 3320 #ifdef TARGET_SPARC64 3321 tcg_gen_ext32u_i64(t2, cpu_regs[a->rs2_or_imm]); 3322 #else 3323 tcg_gen_extu_i32_i64(t2, cpu_regs[a->rs2_or_imm]); 3324 #endif 3325 } 3326 3327 t1 = tcg_temp_new_i64(); 3328 tcg_gen_concat_tl_i64(t1, gen_load_gpr(dc, a->rs1), cpu_y); 3329 3330 tcg_gen_divu_i64(t1, t1, t2); 3331 tcg_gen_umin_i64(t1, t1, tcg_constant_i64(UINT32_MAX)); 3332 3333 dst = gen_dest_gpr(dc, a->rd); 3334 tcg_gen_trunc_i64_tl(dst, t1); 3335 gen_store_gpr(dc, a->rd, dst); 3336 return advance_pc(dc); 3337 } 3338 3339 static bool trans_UDIVX(DisasContext *dc, arg_r_r_ri *a) 3340 { 3341 TCGv dst, src1, src2; 3342 3343 if (!avail_64(dc)) { 3344 return false; 3345 } 3346 /* For simplicity, we under-decoded the rs2 form. */ 3347 if (!a->imm && a->rs2_or_imm & ~0x1f) { 3348 return false; 3349 } 3350 3351 if (unlikely(a->rs2_or_imm == 0)) { 3352 gen_exception(dc, TT_DIV_ZERO); 3353 return true; 3354 } 3355 3356 if (a->imm) { 3357 src2 = tcg_constant_tl(a->rs2_or_imm); 3358 } else { 3359 TCGLabel *lab; 3360 3361 finishing_insn(dc); 3362 flush_cond(dc); 3363 3364 lab = delay_exception(dc, TT_DIV_ZERO); 3365 src2 = cpu_regs[a->rs2_or_imm]; 3366 tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab); 3367 } 3368 3369 dst = gen_dest_gpr(dc, a->rd); 3370 src1 = gen_load_gpr(dc, a->rs1); 3371 3372 tcg_gen_divu_tl(dst, src1, src2); 3373 gen_store_gpr(dc, a->rd, dst); 3374 return advance_pc(dc); 3375 } 3376 3377 static bool trans_SDIVX(DisasContext *dc, arg_r_r_ri *a) 3378 { 3379 TCGv dst, src1, src2; 3380 3381 if (!avail_64(dc)) { 3382 return false; 3383 } 3384 /* For simplicity, we under-decoded the rs2 form. */ 3385 if (!a->imm && a->rs2_or_imm & ~0x1f) { 3386 return false; 3387 } 3388 3389 if (unlikely(a->rs2_or_imm == 0)) { 3390 gen_exception(dc, TT_DIV_ZERO); 3391 return true; 3392 } 3393 3394 dst = gen_dest_gpr(dc, a->rd); 3395 src1 = gen_load_gpr(dc, a->rs1); 3396 3397 if (a->imm) { 3398 if (unlikely(a->rs2_or_imm == -1)) { 3399 tcg_gen_neg_tl(dst, src1); 3400 gen_store_gpr(dc, a->rd, dst); 3401 return advance_pc(dc); 3402 } 3403 src2 = tcg_constant_tl(a->rs2_or_imm); 3404 } else { 3405 TCGLabel *lab; 3406 TCGv t1, t2; 3407 3408 finishing_insn(dc); 3409 flush_cond(dc); 3410 3411 lab = delay_exception(dc, TT_DIV_ZERO); 3412 src2 = cpu_regs[a->rs2_or_imm]; 3413 tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab); 3414 3415 /* 3416 * Need to avoid INT64_MIN / -1, which will trap on x86 host. 3417 * Set SRC2 to 1 as a new divisor, to produce the correct result. 3418 */ 3419 t1 = tcg_temp_new(); 3420 t2 = tcg_temp_new(); 3421 tcg_gen_setcondi_tl(TCG_COND_EQ, t1, src1, (target_long)INT64_MIN); 3422 tcg_gen_setcondi_tl(TCG_COND_EQ, t2, src2, -1); 3423 tcg_gen_and_tl(t1, t1, t2); 3424 tcg_gen_movcond_tl(TCG_COND_NE, t1, t1, tcg_constant_tl(0), 3425 tcg_constant_tl(1), src2); 3426 src2 = t1; 3427 } 3428 3429 tcg_gen_div_tl(dst, src1, src2); 3430 gen_store_gpr(dc, a->rd, dst); 3431 return advance_pc(dc); 3432 } 3433 3434 static bool gen_edge(DisasContext *dc, arg_r_r_r *a, 3435 int width, bool cc, bool left) 3436 { 3437 TCGv dst, s1, s2, lo1, lo2; 3438 uint64_t amask, tabl, tabr; 3439 int shift, imask, omask; 3440 3441 dst = gen_dest_gpr(dc, a->rd); 3442 s1 = gen_load_gpr(dc, a->rs1); 3443 s2 = gen_load_gpr(dc, a->rs2); 3444 3445 if (cc) { 3446 gen_op_subcc(cpu_cc_N, s1, s2); 3447 } 3448 3449 /* 3450 * Theory of operation: there are two tables, left and right (not to 3451 * be confused with the left and right versions of the opcode). These 3452 * are indexed by the low 3 bits of the inputs. To make things "easy", 3453 * these tables are loaded into two constants, TABL and TABR below. 3454 * The operation index = (input & imask) << shift calculates the index 3455 * into the constant, while val = (table >> index) & omask calculates 3456 * the value we're looking for. 3457 */ 3458 switch (width) { 3459 case 8: 3460 imask = 0x7; 3461 shift = 3; 3462 omask = 0xff; 3463 if (left) { 3464 tabl = 0x80c0e0f0f8fcfeffULL; 3465 tabr = 0xff7f3f1f0f070301ULL; 3466 } else { 3467 tabl = 0x0103070f1f3f7fffULL; 3468 tabr = 0xfffefcf8f0e0c080ULL; 3469 } 3470 break; 3471 case 16: 3472 imask = 0x6; 3473 shift = 1; 3474 omask = 0xf; 3475 if (left) { 3476 tabl = 0x8cef; 3477 tabr = 0xf731; 3478 } else { 3479 tabl = 0x137f; 3480 tabr = 0xfec8; 3481 } 3482 break; 3483 case 32: 3484 imask = 0x4; 3485 shift = 0; 3486 omask = 0x3; 3487 if (left) { 3488 tabl = (2 << 2) | 3; 3489 tabr = (3 << 2) | 1; 3490 } else { 3491 tabl = (1 << 2) | 3; 3492 tabr = (3 << 2) | 2; 3493 } 3494 break; 3495 default: 3496 abort(); 3497 } 3498 3499 lo1 = tcg_temp_new(); 3500 lo2 = tcg_temp_new(); 3501 tcg_gen_andi_tl(lo1, s1, imask); 3502 tcg_gen_andi_tl(lo2, s2, imask); 3503 tcg_gen_shli_tl(lo1, lo1, shift); 3504 tcg_gen_shli_tl(lo2, lo2, shift); 3505 3506 tcg_gen_shr_tl(lo1, tcg_constant_tl(tabl), lo1); 3507 tcg_gen_shr_tl(lo2, tcg_constant_tl(tabr), lo2); 3508 tcg_gen_andi_tl(lo1, lo1, omask); 3509 tcg_gen_andi_tl(lo2, lo2, omask); 3510 3511 amask = address_mask_i(dc, -8); 3512 tcg_gen_andi_tl(s1, s1, amask); 3513 tcg_gen_andi_tl(s2, s2, amask); 3514 3515 /* Compute dst = (s1 == s2 ? lo1 : lo1 & lo2). */ 3516 tcg_gen_and_tl(lo2, lo2, lo1); 3517 tcg_gen_movcond_tl(TCG_COND_EQ, dst, s1, s2, lo1, lo2); 3518 3519 gen_store_gpr(dc, a->rd, dst); 3520 return advance_pc(dc); 3521 } 3522 3523 TRANS(EDGE8cc, VIS1, gen_edge, a, 8, 1, 0) 3524 TRANS(EDGE8Lcc, VIS1, gen_edge, a, 8, 1, 1) 3525 TRANS(EDGE16cc, VIS1, gen_edge, a, 16, 1, 0) 3526 TRANS(EDGE16Lcc, VIS1, gen_edge, a, 16, 1, 1) 3527 TRANS(EDGE32cc, VIS1, gen_edge, a, 32, 1, 0) 3528 TRANS(EDGE32Lcc, VIS1, gen_edge, a, 32, 1, 1) 3529 3530 TRANS(EDGE8N, VIS2, gen_edge, a, 8, 0, 0) 3531 TRANS(EDGE8LN, VIS2, gen_edge, a, 8, 0, 1) 3532 TRANS(EDGE16N, VIS2, gen_edge, a, 16, 0, 0) 3533 TRANS(EDGE16LN, VIS2, gen_edge, a, 16, 0, 1) 3534 TRANS(EDGE32N, VIS2, gen_edge, a, 32, 0, 0) 3535 TRANS(EDGE32LN, VIS2, gen_edge, a, 32, 0, 1) 3536 3537 static bool do_rrr(DisasContext *dc, arg_r_r_r *a, 3538 void (*func)(TCGv, TCGv, TCGv)) 3539 { 3540 TCGv dst = gen_dest_gpr(dc, a->rd); 3541 TCGv src1 = gen_load_gpr(dc, a->rs1); 3542 TCGv src2 = gen_load_gpr(dc, a->rs2); 3543 3544 func(dst, src1, src2); 3545 gen_store_gpr(dc, a->rd, dst); 3546 return advance_pc(dc); 3547 } 3548 3549 TRANS(ARRAY8, VIS1, do_rrr, a, gen_helper_array8) 3550 TRANS(ARRAY16, VIS1, do_rrr, a, gen_op_array16) 3551 TRANS(ARRAY32, VIS1, do_rrr, a, gen_op_array32) 3552 3553 static void gen_op_alignaddr(TCGv dst, TCGv s1, TCGv s2) 3554 { 3555 #ifdef TARGET_SPARC64 3556 TCGv tmp = tcg_temp_new(); 3557 3558 tcg_gen_add_tl(tmp, s1, s2); 3559 tcg_gen_andi_tl(dst, tmp, -8); 3560 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3); 3561 #else 3562 g_assert_not_reached(); 3563 #endif 3564 } 3565 3566 static void gen_op_alignaddrl(TCGv dst, TCGv s1, TCGv s2) 3567 { 3568 #ifdef TARGET_SPARC64 3569 TCGv tmp = tcg_temp_new(); 3570 3571 tcg_gen_add_tl(tmp, s1, s2); 3572 tcg_gen_andi_tl(dst, tmp, -8); 3573 tcg_gen_neg_tl(tmp, tmp); 3574 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3); 3575 #else 3576 g_assert_not_reached(); 3577 #endif 3578 } 3579 3580 TRANS(ALIGNADDR, VIS1, do_rrr, a, gen_op_alignaddr) 3581 TRANS(ALIGNADDRL, VIS1, do_rrr, a, gen_op_alignaddrl) 3582 3583 static void gen_op_bmask(TCGv dst, TCGv s1, TCGv s2) 3584 { 3585 #ifdef TARGET_SPARC64 3586 tcg_gen_add_tl(dst, s1, s2); 3587 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, dst, 32, 32); 3588 #else 3589 g_assert_not_reached(); 3590 #endif 3591 } 3592 3593 TRANS(BMASK, VIS2, do_rrr, a, gen_op_bmask) 3594 3595 static bool do_shift_r(DisasContext *dc, arg_shiftr *a, bool l, bool u) 3596 { 3597 TCGv dst, src1, src2; 3598 3599 /* Reject 64-bit shifts for sparc32. */ 3600 if (avail_32(dc) && a->x) { 3601 return false; 3602 } 3603 3604 src2 = tcg_temp_new(); 3605 tcg_gen_andi_tl(src2, gen_load_gpr(dc, a->rs2), a->x ? 63 : 31); 3606 src1 = gen_load_gpr(dc, a->rs1); 3607 dst = gen_dest_gpr(dc, a->rd); 3608 3609 if (l) { 3610 tcg_gen_shl_tl(dst, src1, src2); 3611 if (!a->x) { 3612 tcg_gen_ext32u_tl(dst, dst); 3613 } 3614 } else if (u) { 3615 if (!a->x) { 3616 tcg_gen_ext32u_tl(dst, src1); 3617 src1 = dst; 3618 } 3619 tcg_gen_shr_tl(dst, src1, src2); 3620 } else { 3621 if (!a->x) { 3622 tcg_gen_ext32s_tl(dst, src1); 3623 src1 = dst; 3624 } 3625 tcg_gen_sar_tl(dst, src1, src2); 3626 } 3627 gen_store_gpr(dc, a->rd, dst); 3628 return advance_pc(dc); 3629 } 3630 3631 TRANS(SLL_r, ALL, do_shift_r, a, true, true) 3632 TRANS(SRL_r, ALL, do_shift_r, a, false, true) 3633 TRANS(SRA_r, ALL, do_shift_r, a, false, false) 3634 3635 static bool do_shift_i(DisasContext *dc, arg_shifti *a, bool l, bool u) 3636 { 3637 TCGv dst, src1; 3638 3639 /* Reject 64-bit shifts for sparc32. */ 3640 if (avail_32(dc) && (a->x || a->i >= 32)) { 3641 return false; 3642 } 3643 3644 src1 = gen_load_gpr(dc, a->rs1); 3645 dst = gen_dest_gpr(dc, a->rd); 3646 3647 if (avail_32(dc) || a->x) { 3648 if (l) { 3649 tcg_gen_shli_tl(dst, src1, a->i); 3650 } else if (u) { 3651 tcg_gen_shri_tl(dst, src1, a->i); 3652 } else { 3653 tcg_gen_sari_tl(dst, src1, a->i); 3654 } 3655 } else { 3656 if (l) { 3657 tcg_gen_deposit_z_tl(dst, src1, a->i, 32 - a->i); 3658 } else if (u) { 3659 tcg_gen_extract_tl(dst, src1, a->i, 32 - a->i); 3660 } else { 3661 tcg_gen_sextract_tl(dst, src1, a->i, 32 - a->i); 3662 } 3663 } 3664 gen_store_gpr(dc, a->rd, dst); 3665 return advance_pc(dc); 3666 } 3667 3668 TRANS(SLL_i, ALL, do_shift_i, a, true, true) 3669 TRANS(SRL_i, ALL, do_shift_i, a, false, true) 3670 TRANS(SRA_i, ALL, do_shift_i, a, false, false) 3671 3672 static TCGv gen_rs2_or_imm(DisasContext *dc, bool imm, int rs2_or_imm) 3673 { 3674 /* For simplicity, we under-decoded the rs2 form. */ 3675 if (!imm && rs2_or_imm & ~0x1f) { 3676 return NULL; 3677 } 3678 if (imm || rs2_or_imm == 0) { 3679 return tcg_constant_tl(rs2_or_imm); 3680 } else { 3681 return cpu_regs[rs2_or_imm]; 3682 } 3683 } 3684 3685 static bool do_mov_cond(DisasContext *dc, DisasCompare *cmp, int rd, TCGv src2) 3686 { 3687 TCGv dst = gen_load_gpr(dc, rd); 3688 TCGv c2 = tcg_constant_tl(cmp->c2); 3689 3690 tcg_gen_movcond_tl(cmp->cond, dst, cmp->c1, c2, src2, dst); 3691 gen_store_gpr(dc, rd, dst); 3692 return advance_pc(dc); 3693 } 3694 3695 static bool trans_MOVcc(DisasContext *dc, arg_MOVcc *a) 3696 { 3697 TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm); 3698 DisasCompare cmp; 3699 3700 if (src2 == NULL) { 3701 return false; 3702 } 3703 gen_compare(&cmp, a->cc, a->cond, dc); 3704 return do_mov_cond(dc, &cmp, a->rd, src2); 3705 } 3706 3707 static bool trans_MOVfcc(DisasContext *dc, arg_MOVfcc *a) 3708 { 3709 TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm); 3710 DisasCompare cmp; 3711 3712 if (src2 == NULL) { 3713 return false; 3714 } 3715 gen_fcompare(&cmp, a->cc, a->cond); 3716 return do_mov_cond(dc, &cmp, a->rd, src2); 3717 } 3718 3719 static bool trans_MOVR(DisasContext *dc, arg_MOVR *a) 3720 { 3721 TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm); 3722 DisasCompare cmp; 3723 3724 if (src2 == NULL) { 3725 return false; 3726 } 3727 if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) { 3728 return false; 3729 } 3730 return do_mov_cond(dc, &cmp, a->rd, src2); 3731 } 3732 3733 static bool do_add_special(DisasContext *dc, arg_r_r_ri *a, 3734 bool (*func)(DisasContext *dc, int rd, TCGv src)) 3735 { 3736 TCGv src1, sum; 3737 3738 /* For simplicity, we under-decoded the rs2 form. */ 3739 if (!a->imm && a->rs2_or_imm & ~0x1f) { 3740 return false; 3741 } 3742 3743 /* 3744 * Always load the sum into a new temporary. 3745 * This is required to capture the value across a window change, 3746 * e.g. SAVE and RESTORE, and may be optimized away otherwise. 3747 */ 3748 sum = tcg_temp_new(); 3749 src1 = gen_load_gpr(dc, a->rs1); 3750 if (a->imm || a->rs2_or_imm == 0) { 3751 tcg_gen_addi_tl(sum, src1, a->rs2_or_imm); 3752 } else { 3753 tcg_gen_add_tl(sum, src1, cpu_regs[a->rs2_or_imm]); 3754 } 3755 return func(dc, a->rd, sum); 3756 } 3757 3758 static bool do_jmpl(DisasContext *dc, int rd, TCGv src) 3759 { 3760 /* 3761 * Preserve pc across advance, so that we can delay 3762 * the writeback to rd until after src is consumed. 3763 */ 3764 target_ulong cur_pc = dc->pc; 3765 3766 gen_check_align(dc, src, 3); 3767 3768 gen_mov_pc_npc(dc); 3769 tcg_gen_mov_tl(cpu_npc, src); 3770 gen_address_mask(dc, cpu_npc); 3771 gen_store_gpr(dc, rd, tcg_constant_tl(cur_pc)); 3772 3773 dc->npc = DYNAMIC_PC_LOOKUP; 3774 return true; 3775 } 3776 3777 TRANS(JMPL, ALL, do_add_special, a, do_jmpl) 3778 3779 static bool do_rett(DisasContext *dc, int rd, TCGv src) 3780 { 3781 if (!supervisor(dc)) { 3782 return raise_priv(dc); 3783 } 3784 3785 gen_check_align(dc, src, 3); 3786 3787 gen_mov_pc_npc(dc); 3788 tcg_gen_mov_tl(cpu_npc, src); 3789 gen_helper_rett(tcg_env); 3790 3791 dc->npc = DYNAMIC_PC; 3792 return true; 3793 } 3794 3795 TRANS(RETT, 32, do_add_special, a, do_rett) 3796 3797 static bool do_return(DisasContext *dc, int rd, TCGv src) 3798 { 3799 gen_check_align(dc, src, 3); 3800 gen_helper_restore(tcg_env); 3801 3802 gen_mov_pc_npc(dc); 3803 tcg_gen_mov_tl(cpu_npc, src); 3804 gen_address_mask(dc, cpu_npc); 3805 3806 dc->npc = DYNAMIC_PC_LOOKUP; 3807 return true; 3808 } 3809 3810 TRANS(RETURN, 64, do_add_special, a, do_return) 3811 3812 static bool do_save(DisasContext *dc, int rd, TCGv src) 3813 { 3814 gen_helper_save(tcg_env); 3815 gen_store_gpr(dc, rd, src); 3816 return advance_pc(dc); 3817 } 3818 3819 TRANS(SAVE, ALL, do_add_special, a, do_save) 3820 3821 static bool do_restore(DisasContext *dc, int rd, TCGv src) 3822 { 3823 gen_helper_restore(tcg_env); 3824 gen_store_gpr(dc, rd, src); 3825 return advance_pc(dc); 3826 } 3827 3828 TRANS(RESTORE, ALL, do_add_special, a, do_restore) 3829 3830 static bool do_done_retry(DisasContext *dc, bool done) 3831 { 3832 if (!supervisor(dc)) { 3833 return raise_priv(dc); 3834 } 3835 dc->npc = DYNAMIC_PC; 3836 dc->pc = DYNAMIC_PC; 3837 translator_io_start(&dc->base); 3838 if (done) { 3839 gen_helper_done(tcg_env); 3840 } else { 3841 gen_helper_retry(tcg_env); 3842 } 3843 return true; 3844 } 3845 3846 TRANS(DONE, 64, do_done_retry, true) 3847 TRANS(RETRY, 64, do_done_retry, false) 3848 3849 /* 3850 * Major opcode 11 -- load and store instructions 3851 */ 3852 3853 static TCGv gen_ldst_addr(DisasContext *dc, int rs1, bool imm, int rs2_or_imm) 3854 { 3855 TCGv addr, tmp = NULL; 3856 3857 /* For simplicity, we under-decoded the rs2 form. */ 3858 if (!imm && rs2_or_imm & ~0x1f) { 3859 return NULL; 3860 } 3861 3862 addr = gen_load_gpr(dc, rs1); 3863 if (rs2_or_imm) { 3864 tmp = tcg_temp_new(); 3865 if (imm) { 3866 tcg_gen_addi_tl(tmp, addr, rs2_or_imm); 3867 } else { 3868 tcg_gen_add_tl(tmp, addr, cpu_regs[rs2_or_imm]); 3869 } 3870 addr = tmp; 3871 } 3872 if (AM_CHECK(dc)) { 3873 if (!tmp) { 3874 tmp = tcg_temp_new(); 3875 } 3876 tcg_gen_ext32u_tl(tmp, addr); 3877 addr = tmp; 3878 } 3879 return addr; 3880 } 3881 3882 static bool do_ld_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop) 3883 { 3884 TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm); 3885 DisasASI da; 3886 3887 if (addr == NULL) { 3888 return false; 3889 } 3890 da = resolve_asi(dc, a->asi, mop); 3891 3892 reg = gen_dest_gpr(dc, a->rd); 3893 gen_ld_asi(dc, &da, reg, addr); 3894 gen_store_gpr(dc, a->rd, reg); 3895 return advance_pc(dc); 3896 } 3897 3898 TRANS(LDUW, ALL, do_ld_gpr, a, MO_TEUL) 3899 TRANS(LDUB, ALL, do_ld_gpr, a, MO_UB) 3900 TRANS(LDUH, ALL, do_ld_gpr, a, MO_TEUW) 3901 TRANS(LDSB, ALL, do_ld_gpr, a, MO_SB) 3902 TRANS(LDSH, ALL, do_ld_gpr, a, MO_TESW) 3903 TRANS(LDSW, 64, do_ld_gpr, a, MO_TESL) 3904 TRANS(LDX, 64, do_ld_gpr, a, MO_TEUQ) 3905 3906 static bool do_st_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop) 3907 { 3908 TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm); 3909 DisasASI da; 3910 3911 if (addr == NULL) { 3912 return false; 3913 } 3914 da = resolve_asi(dc, a->asi, mop); 3915 3916 reg = gen_load_gpr(dc, a->rd); 3917 gen_st_asi(dc, &da, reg, addr); 3918 return advance_pc(dc); 3919 } 3920 3921 TRANS(STW, ALL, do_st_gpr, a, MO_TEUL) 3922 TRANS(STB, ALL, do_st_gpr, a, MO_UB) 3923 TRANS(STH, ALL, do_st_gpr, a, MO_TEUW) 3924 TRANS(STX, 64, do_st_gpr, a, MO_TEUQ) 3925 3926 static bool trans_LDD(DisasContext *dc, arg_r_r_ri_asi *a) 3927 { 3928 TCGv addr; 3929 DisasASI da; 3930 3931 if (a->rd & 1) { 3932 return false; 3933 } 3934 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm); 3935 if (addr == NULL) { 3936 return false; 3937 } 3938 da = resolve_asi(dc, a->asi, MO_TEUQ); 3939 gen_ldda_asi(dc, &da, addr, a->rd); 3940 return advance_pc(dc); 3941 } 3942 3943 static bool trans_STD(DisasContext *dc, arg_r_r_ri_asi *a) 3944 { 3945 TCGv addr; 3946 DisasASI da; 3947 3948 if (a->rd & 1) { 3949 return false; 3950 } 3951 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm); 3952 if (addr == NULL) { 3953 return false; 3954 } 3955 da = resolve_asi(dc, a->asi, MO_TEUQ); 3956 gen_stda_asi(dc, &da, addr, a->rd); 3957 return advance_pc(dc); 3958 } 3959 3960 static bool trans_LDSTUB(DisasContext *dc, arg_r_r_ri_asi *a) 3961 { 3962 TCGv addr, reg; 3963 DisasASI da; 3964 3965 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm); 3966 if (addr == NULL) { 3967 return false; 3968 } 3969 da = resolve_asi(dc, a->asi, MO_UB); 3970 3971 reg = gen_dest_gpr(dc, a->rd); 3972 gen_ldstub_asi(dc, &da, reg, addr); 3973 gen_store_gpr(dc, a->rd, reg); 3974 return advance_pc(dc); 3975 } 3976 3977 static bool trans_SWAP(DisasContext *dc, arg_r_r_ri_asi *a) 3978 { 3979 TCGv addr, dst, src; 3980 DisasASI da; 3981 3982 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm); 3983 if (addr == NULL) { 3984 return false; 3985 } 3986 da = resolve_asi(dc, a->asi, MO_TEUL); 3987 3988 dst = gen_dest_gpr(dc, a->rd); 3989 src = gen_load_gpr(dc, a->rd); 3990 gen_swap_asi(dc, &da, dst, src, addr); 3991 gen_store_gpr(dc, a->rd, dst); 3992 return advance_pc(dc); 3993 } 3994 3995 static bool do_casa(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop) 3996 { 3997 TCGv addr, o, n, c; 3998 DisasASI da; 3999 4000 addr = gen_ldst_addr(dc, a->rs1, true, 0); 4001 if (addr == NULL) { 4002 return false; 4003 } 4004 da = resolve_asi(dc, a->asi, mop); 4005 4006 o = gen_dest_gpr(dc, a->rd); 4007 n = gen_load_gpr(dc, a->rd); 4008 c = gen_load_gpr(dc, a->rs2_or_imm); 4009 gen_cas_asi(dc, &da, o, n, c, addr); 4010 gen_store_gpr(dc, a->rd, o); 4011 return advance_pc(dc); 4012 } 4013 4014 TRANS(CASA, CASA, do_casa, a, MO_TEUL) 4015 TRANS(CASXA, 64, do_casa, a, MO_TEUQ) 4016 4017 static bool do_ld_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz) 4018 { 4019 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm); 4020 DisasASI da; 4021 4022 if (addr == NULL) { 4023 return false; 4024 } 4025 if (gen_trap_ifnofpu(dc)) { 4026 return true; 4027 } 4028 if (sz == MO_128 && gen_trap_float128(dc)) { 4029 return true; 4030 } 4031 da = resolve_asi(dc, a->asi, MO_TE | sz); 4032 gen_ldf_asi(dc, &da, sz, addr, a->rd); 4033 gen_update_fprs_dirty(dc, a->rd); 4034 return advance_pc(dc); 4035 } 4036 4037 TRANS(LDF, ALL, do_ld_fpr, a, MO_32) 4038 TRANS(LDDF, ALL, do_ld_fpr, a, MO_64) 4039 TRANS(LDQF, ALL, do_ld_fpr, a, MO_128) 4040 4041 TRANS(LDFA, 64, do_ld_fpr, a, MO_32) 4042 TRANS(LDDFA, 64, do_ld_fpr, a, MO_64) 4043 TRANS(LDQFA, 64, do_ld_fpr, a, MO_128) 4044 4045 static bool do_st_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz) 4046 { 4047 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm); 4048 DisasASI da; 4049 4050 if (addr == NULL) { 4051 return false; 4052 } 4053 if (gen_trap_ifnofpu(dc)) { 4054 return true; 4055 } 4056 if (sz == MO_128 && gen_trap_float128(dc)) { 4057 return true; 4058 } 4059 da = resolve_asi(dc, a->asi, MO_TE | sz); 4060 gen_stf_asi(dc, &da, sz, addr, a->rd); 4061 return advance_pc(dc); 4062 } 4063 4064 TRANS(STF, ALL, do_st_fpr, a, MO_32) 4065 TRANS(STDF, ALL, do_st_fpr, a, MO_64) 4066 TRANS(STQF, ALL, do_st_fpr, a, MO_128) 4067 4068 TRANS(STFA, 64, do_st_fpr, a, MO_32) 4069 TRANS(STDFA, 64, do_st_fpr, a, MO_64) 4070 TRANS(STQFA, 64, do_st_fpr, a, MO_128) 4071 4072 static bool trans_STDFQ(DisasContext *dc, arg_STDFQ *a) 4073 { 4074 if (!avail_32(dc)) { 4075 return false; 4076 } 4077 if (!supervisor(dc)) { 4078 return raise_priv(dc); 4079 } 4080 if (gen_trap_ifnofpu(dc)) { 4081 return true; 4082 } 4083 gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR); 4084 return true; 4085 } 4086 4087 static bool trans_LDFSR(DisasContext *dc, arg_r_r_ri *a) 4088 { 4089 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm); 4090 TCGv_i32 tmp; 4091 4092 if (addr == NULL) { 4093 return false; 4094 } 4095 if (gen_trap_ifnofpu(dc)) { 4096 return true; 4097 } 4098 4099 tmp = tcg_temp_new_i32(); 4100 tcg_gen_qemu_ld_i32(tmp, addr, dc->mem_idx, MO_TEUL | MO_ALIGN); 4101 4102 tcg_gen_extract_i32(cpu_fcc[0], tmp, FSR_FCC0_SHIFT, 2); 4103 /* LDFSR does not change FCC[1-3]. */ 4104 4105 gen_helper_set_fsr_nofcc_noftt(tcg_env, tmp); 4106 return advance_pc(dc); 4107 } 4108 4109 static bool trans_LDXFSR(DisasContext *dc, arg_r_r_ri *a) 4110 { 4111 #ifdef TARGET_SPARC64 4112 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm); 4113 TCGv_i64 t64; 4114 TCGv_i32 lo, hi; 4115 4116 if (addr == NULL) { 4117 return false; 4118 } 4119 if (gen_trap_ifnofpu(dc)) { 4120 return true; 4121 } 4122 4123 t64 = tcg_temp_new_i64(); 4124 tcg_gen_qemu_ld_i64(t64, addr, dc->mem_idx, MO_TEUQ | MO_ALIGN); 4125 4126 lo = tcg_temp_new_i32(); 4127 hi = cpu_fcc[3]; 4128 tcg_gen_extr_i64_i32(lo, hi, t64); 4129 tcg_gen_extract_i32(cpu_fcc[0], lo, FSR_FCC0_SHIFT, 2); 4130 tcg_gen_extract_i32(cpu_fcc[1], hi, FSR_FCC1_SHIFT - 32, 2); 4131 tcg_gen_extract_i32(cpu_fcc[2], hi, FSR_FCC2_SHIFT - 32, 2); 4132 tcg_gen_extract_i32(cpu_fcc[3], hi, FSR_FCC3_SHIFT - 32, 2); 4133 4134 gen_helper_set_fsr_nofcc_noftt(tcg_env, lo); 4135 return advance_pc(dc); 4136 #else 4137 return false; 4138 #endif 4139 } 4140 4141 static bool do_stfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop) 4142 { 4143 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm); 4144 TCGv fsr; 4145 4146 if (addr == NULL) { 4147 return false; 4148 } 4149 if (gen_trap_ifnofpu(dc)) { 4150 return true; 4151 } 4152 4153 fsr = tcg_temp_new(); 4154 gen_helper_get_fsr(fsr, tcg_env); 4155 tcg_gen_qemu_st_tl(fsr, addr, dc->mem_idx, mop | MO_ALIGN); 4156 return advance_pc(dc); 4157 } 4158 4159 TRANS(STFSR, ALL, do_stfsr, a, MO_TEUL) 4160 TRANS(STXFSR, 64, do_stfsr, a, MO_TEUQ) 4161 4162 static bool do_fc(DisasContext *dc, int rd, bool c) 4163 { 4164 uint64_t mask; 4165 4166 if (gen_trap_ifnofpu(dc)) { 4167 return true; 4168 } 4169 4170 if (rd & 1) { 4171 mask = MAKE_64BIT_MASK(0, 32); 4172 } else { 4173 mask = MAKE_64BIT_MASK(32, 32); 4174 } 4175 if (c) { 4176 tcg_gen_ori_i64(cpu_fpr[rd / 2], cpu_fpr[rd / 2], mask); 4177 } else { 4178 tcg_gen_andi_i64(cpu_fpr[rd / 2], cpu_fpr[rd / 2], ~mask); 4179 } 4180 gen_update_fprs_dirty(dc, rd); 4181 return advance_pc(dc); 4182 } 4183 4184 TRANS(FZEROs, VIS1, do_fc, a->rd, 0) 4185 TRANS(FONEs, VIS1, do_fc, a->rd, 1) 4186 4187 static bool do_dc(DisasContext *dc, int rd, int64_t c) 4188 { 4189 if (gen_trap_ifnofpu(dc)) { 4190 return true; 4191 } 4192 4193 tcg_gen_movi_i64(cpu_fpr[rd / 2], c); 4194 gen_update_fprs_dirty(dc, rd); 4195 return advance_pc(dc); 4196 } 4197 4198 TRANS(FZEROd, VIS1, do_dc, a->rd, 0) 4199 TRANS(FONEd, VIS1, do_dc, a->rd, -1) 4200 4201 static bool do_ff(DisasContext *dc, arg_r_r *a, 4202 void (*func)(TCGv_i32, TCGv_i32)) 4203 { 4204 TCGv_i32 tmp; 4205 4206 if (gen_trap_ifnofpu(dc)) { 4207 return true; 4208 } 4209 4210 tmp = gen_load_fpr_F(dc, a->rs); 4211 func(tmp, tmp); 4212 gen_store_fpr_F(dc, a->rd, tmp); 4213 return advance_pc(dc); 4214 } 4215 4216 TRANS(FMOVs, ALL, do_ff, a, gen_op_fmovs) 4217 TRANS(FNEGs, ALL, do_ff, a, gen_op_fnegs) 4218 TRANS(FABSs, ALL, do_ff, a, gen_op_fabss) 4219 TRANS(FSRCs, VIS1, do_ff, a, tcg_gen_mov_i32) 4220 TRANS(FNOTs, VIS1, do_ff, a, tcg_gen_not_i32) 4221 4222 static bool do_fd(DisasContext *dc, arg_r_r *a, 4223 void (*func)(TCGv_i32, TCGv_i64)) 4224 { 4225 TCGv_i32 dst; 4226 TCGv_i64 src; 4227 4228 if (gen_trap_ifnofpu(dc)) { 4229 return true; 4230 } 4231 4232 dst = tcg_temp_new_i32(); 4233 src = gen_load_fpr_D(dc, a->rs); 4234 func(dst, src); 4235 gen_store_fpr_F(dc, a->rd, dst); 4236 return advance_pc(dc); 4237 } 4238 4239 TRANS(FPACK16, VIS1, do_fd, a, gen_op_fpack16) 4240 TRANS(FPACKFIX, VIS1, do_fd, a, gen_op_fpackfix) 4241 4242 static bool do_env_ff(DisasContext *dc, arg_r_r *a, 4243 void (*func)(TCGv_i32, TCGv_env, TCGv_i32)) 4244 { 4245 TCGv_i32 tmp; 4246 4247 if (gen_trap_ifnofpu(dc)) { 4248 return true; 4249 } 4250 4251 tmp = gen_load_fpr_F(dc, a->rs); 4252 func(tmp, tcg_env, tmp); 4253 gen_store_fpr_F(dc, a->rd, tmp); 4254 return advance_pc(dc); 4255 } 4256 4257 TRANS(FSQRTs, ALL, do_env_ff, a, gen_helper_fsqrts) 4258 TRANS(FiTOs, ALL, do_env_ff, a, gen_helper_fitos) 4259 TRANS(FsTOi, ALL, do_env_ff, a, gen_helper_fstoi) 4260 4261 static bool do_env_fd(DisasContext *dc, arg_r_r *a, 4262 void (*func)(TCGv_i32, TCGv_env, TCGv_i64)) 4263 { 4264 TCGv_i32 dst; 4265 TCGv_i64 src; 4266 4267 if (gen_trap_ifnofpu(dc)) { 4268 return true; 4269 } 4270 4271 dst = tcg_temp_new_i32(); 4272 src = gen_load_fpr_D(dc, a->rs); 4273 func(dst, tcg_env, src); 4274 gen_store_fpr_F(dc, a->rd, dst); 4275 return advance_pc(dc); 4276 } 4277 4278 TRANS(FdTOs, ALL, do_env_fd, a, gen_helper_fdtos) 4279 TRANS(FdTOi, ALL, do_env_fd, a, gen_helper_fdtoi) 4280 TRANS(FxTOs, 64, do_env_fd, a, gen_helper_fxtos) 4281 4282 static bool do_dd(DisasContext *dc, arg_r_r *a, 4283 void (*func)(TCGv_i64, TCGv_i64)) 4284 { 4285 TCGv_i64 dst, src; 4286 4287 if (gen_trap_ifnofpu(dc)) { 4288 return true; 4289 } 4290 4291 dst = gen_dest_fpr_D(dc, a->rd); 4292 src = gen_load_fpr_D(dc, a->rs); 4293 func(dst, src); 4294 gen_store_fpr_D(dc, a->rd, dst); 4295 return advance_pc(dc); 4296 } 4297 4298 TRANS(FMOVd, 64, do_dd, a, gen_op_fmovd) 4299 TRANS(FNEGd, 64, do_dd, a, gen_op_fnegd) 4300 TRANS(FABSd, 64, do_dd, a, gen_op_fabsd) 4301 TRANS(FSRCd, VIS1, do_dd, a, tcg_gen_mov_i64) 4302 TRANS(FNOTd, VIS1, do_dd, a, tcg_gen_not_i64) 4303 4304 static bool do_env_dd(DisasContext *dc, arg_r_r *a, 4305 void (*func)(TCGv_i64, TCGv_env, TCGv_i64)) 4306 { 4307 TCGv_i64 dst, src; 4308 4309 if (gen_trap_ifnofpu(dc)) { 4310 return true; 4311 } 4312 4313 dst = gen_dest_fpr_D(dc, a->rd); 4314 src = gen_load_fpr_D(dc, a->rs); 4315 func(dst, tcg_env, src); 4316 gen_store_fpr_D(dc, a->rd, dst); 4317 return advance_pc(dc); 4318 } 4319 4320 TRANS(FSQRTd, ALL, do_env_dd, a, gen_helper_fsqrtd) 4321 TRANS(FxTOd, 64, do_env_dd, a, gen_helper_fxtod) 4322 TRANS(FdTOx, 64, do_env_dd, a, gen_helper_fdtox) 4323 4324 static bool do_env_df(DisasContext *dc, arg_r_r *a, 4325 void (*func)(TCGv_i64, TCGv_env, TCGv_i32)) 4326 { 4327 TCGv_i64 dst; 4328 TCGv_i32 src; 4329 4330 if (gen_trap_ifnofpu(dc)) { 4331 return true; 4332 } 4333 4334 dst = gen_dest_fpr_D(dc, a->rd); 4335 src = gen_load_fpr_F(dc, a->rs); 4336 func(dst, tcg_env, src); 4337 gen_store_fpr_D(dc, a->rd, dst); 4338 return advance_pc(dc); 4339 } 4340 4341 TRANS(FiTOd, ALL, do_env_df, a, gen_helper_fitod) 4342 TRANS(FsTOd, ALL, do_env_df, a, gen_helper_fstod) 4343 TRANS(FsTOx, 64, do_env_df, a, gen_helper_fstox) 4344 4345 static bool do_qq(DisasContext *dc, arg_r_r *a, 4346 void (*func)(TCGv_i128, TCGv_i128)) 4347 { 4348 TCGv_i128 t; 4349 4350 if (gen_trap_ifnofpu(dc)) { 4351 return true; 4352 } 4353 if (gen_trap_float128(dc)) { 4354 return true; 4355 } 4356 4357 gen_op_clear_ieee_excp_and_FTT(); 4358 t = gen_load_fpr_Q(dc, a->rs); 4359 func(t, t); 4360 gen_store_fpr_Q(dc, a->rd, t); 4361 return advance_pc(dc); 4362 } 4363 4364 TRANS(FMOVq, 64, do_qq, a, tcg_gen_mov_i128) 4365 TRANS(FNEGq, 64, do_qq, a, gen_op_fnegq) 4366 TRANS(FABSq, 64, do_qq, a, gen_op_fabsq) 4367 4368 static bool do_env_qq(DisasContext *dc, arg_r_r *a, 4369 void (*func)(TCGv_i128, TCGv_env, TCGv_i128)) 4370 { 4371 TCGv_i128 t; 4372 4373 if (gen_trap_ifnofpu(dc)) { 4374 return true; 4375 } 4376 if (gen_trap_float128(dc)) { 4377 return true; 4378 } 4379 4380 t = gen_load_fpr_Q(dc, a->rs); 4381 func(t, tcg_env, t); 4382 gen_store_fpr_Q(dc, a->rd, t); 4383 return advance_pc(dc); 4384 } 4385 4386 TRANS(FSQRTq, ALL, do_env_qq, a, gen_helper_fsqrtq) 4387 4388 static bool do_env_fq(DisasContext *dc, arg_r_r *a, 4389 void (*func)(TCGv_i32, TCGv_env, TCGv_i128)) 4390 { 4391 TCGv_i128 src; 4392 TCGv_i32 dst; 4393 4394 if (gen_trap_ifnofpu(dc)) { 4395 return true; 4396 } 4397 if (gen_trap_float128(dc)) { 4398 return true; 4399 } 4400 4401 src = gen_load_fpr_Q(dc, a->rs); 4402 dst = tcg_temp_new_i32(); 4403 func(dst, tcg_env, src); 4404 gen_store_fpr_F(dc, a->rd, dst); 4405 return advance_pc(dc); 4406 } 4407 4408 TRANS(FqTOs, ALL, do_env_fq, a, gen_helper_fqtos) 4409 TRANS(FqTOi, ALL, do_env_fq, a, gen_helper_fqtoi) 4410 4411 static bool do_env_dq(DisasContext *dc, arg_r_r *a, 4412 void (*func)(TCGv_i64, TCGv_env, TCGv_i128)) 4413 { 4414 TCGv_i128 src; 4415 TCGv_i64 dst; 4416 4417 if (gen_trap_ifnofpu(dc)) { 4418 return true; 4419 } 4420 if (gen_trap_float128(dc)) { 4421 return true; 4422 } 4423 4424 src = gen_load_fpr_Q(dc, a->rs); 4425 dst = gen_dest_fpr_D(dc, a->rd); 4426 func(dst, tcg_env, src); 4427 gen_store_fpr_D(dc, a->rd, dst); 4428 return advance_pc(dc); 4429 } 4430 4431 TRANS(FqTOd, ALL, do_env_dq, a, gen_helper_fqtod) 4432 TRANS(FqTOx, 64, do_env_dq, a, gen_helper_fqtox) 4433 4434 static bool do_env_qf(DisasContext *dc, arg_r_r *a, 4435 void (*func)(TCGv_i128, TCGv_env, TCGv_i32)) 4436 { 4437 TCGv_i32 src; 4438 TCGv_i128 dst; 4439 4440 if (gen_trap_ifnofpu(dc)) { 4441 return true; 4442 } 4443 if (gen_trap_float128(dc)) { 4444 return true; 4445 } 4446 4447 src = gen_load_fpr_F(dc, a->rs); 4448 dst = tcg_temp_new_i128(); 4449 func(dst, tcg_env, src); 4450 gen_store_fpr_Q(dc, a->rd, dst); 4451 return advance_pc(dc); 4452 } 4453 4454 TRANS(FiTOq, ALL, do_env_qf, a, gen_helper_fitoq) 4455 TRANS(FsTOq, ALL, do_env_qf, a, gen_helper_fstoq) 4456 4457 static bool do_env_qd(DisasContext *dc, arg_r_r *a, 4458 void (*func)(TCGv_i128, TCGv_env, TCGv_i64)) 4459 { 4460 TCGv_i64 src; 4461 TCGv_i128 dst; 4462 4463 if (gen_trap_ifnofpu(dc)) { 4464 return true; 4465 } 4466 if (gen_trap_float128(dc)) { 4467 return true; 4468 } 4469 4470 src = gen_load_fpr_D(dc, a->rs); 4471 dst = tcg_temp_new_i128(); 4472 func(dst, tcg_env, src); 4473 gen_store_fpr_Q(dc, a->rd, dst); 4474 return advance_pc(dc); 4475 } 4476 4477 TRANS(FdTOq, ALL, do_env_qd, a, gen_helper_fdtoq) 4478 TRANS(FxTOq, 64, do_env_qd, a, gen_helper_fxtoq) 4479 4480 static bool do_fff(DisasContext *dc, arg_r_r_r *a, 4481 void (*func)(TCGv_i32, TCGv_i32, TCGv_i32)) 4482 { 4483 TCGv_i32 src1, src2; 4484 4485 if (gen_trap_ifnofpu(dc)) { 4486 return true; 4487 } 4488 4489 src1 = gen_load_fpr_F(dc, a->rs1); 4490 src2 = gen_load_fpr_F(dc, a->rs2); 4491 func(src1, src1, src2); 4492 gen_store_fpr_F(dc, a->rd, src1); 4493 return advance_pc(dc); 4494 } 4495 4496 TRANS(FPADD16s, VIS1, do_fff, a, tcg_gen_vec_add16_i32) 4497 TRANS(FPADD32s, VIS1, do_fff, a, tcg_gen_add_i32) 4498 TRANS(FPSUB16s, VIS1, do_fff, a, tcg_gen_vec_sub16_i32) 4499 TRANS(FPSUB32s, VIS1, do_fff, a, tcg_gen_sub_i32) 4500 TRANS(FNORs, VIS1, do_fff, a, tcg_gen_nor_i32) 4501 TRANS(FANDNOTs, VIS1, do_fff, a, tcg_gen_andc_i32) 4502 TRANS(FXORs, VIS1, do_fff, a, tcg_gen_xor_i32) 4503 TRANS(FNANDs, VIS1, do_fff, a, tcg_gen_nand_i32) 4504 TRANS(FANDs, VIS1, do_fff, a, tcg_gen_and_i32) 4505 TRANS(FXNORs, VIS1, do_fff, a, tcg_gen_eqv_i32) 4506 TRANS(FORNOTs, VIS1, do_fff, a, tcg_gen_orc_i32) 4507 TRANS(FORs, VIS1, do_fff, a, tcg_gen_or_i32) 4508 4509 static bool do_env_fff(DisasContext *dc, arg_r_r_r *a, 4510 void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32)) 4511 { 4512 TCGv_i32 src1, src2; 4513 4514 if (gen_trap_ifnofpu(dc)) { 4515 return true; 4516 } 4517 4518 src1 = gen_load_fpr_F(dc, a->rs1); 4519 src2 = gen_load_fpr_F(dc, a->rs2); 4520 func(src1, tcg_env, src1, src2); 4521 gen_store_fpr_F(dc, a->rd, src1); 4522 return advance_pc(dc); 4523 } 4524 4525 TRANS(FADDs, ALL, do_env_fff, a, gen_helper_fadds) 4526 TRANS(FSUBs, ALL, do_env_fff, a, gen_helper_fsubs) 4527 TRANS(FMULs, ALL, do_env_fff, a, gen_helper_fmuls) 4528 TRANS(FDIVs, ALL, do_env_fff, a, gen_helper_fdivs) 4529 4530 static bool do_ddd(DisasContext *dc, arg_r_r_r *a, 4531 void (*func)(TCGv_i64, TCGv_i64, TCGv_i64)) 4532 { 4533 TCGv_i64 dst, src1, src2; 4534 4535 if (gen_trap_ifnofpu(dc)) { 4536 return true; 4537 } 4538 4539 dst = gen_dest_fpr_D(dc, a->rd); 4540 src1 = gen_load_fpr_D(dc, a->rs1); 4541 src2 = gen_load_fpr_D(dc, a->rs2); 4542 func(dst, src1, src2); 4543 gen_store_fpr_D(dc, a->rd, dst); 4544 return advance_pc(dc); 4545 } 4546 4547 TRANS(FMUL8x16, VIS1, do_ddd, a, gen_helper_fmul8x16) 4548 TRANS(FMUL8x16AU, VIS1, do_ddd, a, gen_helper_fmul8x16au) 4549 TRANS(FMUL8x16AL, VIS1, do_ddd, a, gen_helper_fmul8x16al) 4550 TRANS(FMUL8SUx16, VIS1, do_ddd, a, gen_helper_fmul8sux16) 4551 TRANS(FMUL8ULx16, VIS1, do_ddd, a, gen_helper_fmul8ulx16) 4552 TRANS(FMULD8SUx16, VIS1, do_ddd, a, gen_helper_fmuld8sux16) 4553 TRANS(FMULD8ULx16, VIS1, do_ddd, a, gen_helper_fmuld8ulx16) 4554 TRANS(FPMERGE, VIS1, do_ddd, a, gen_helper_fpmerge) 4555 TRANS(FEXPAND, VIS1, do_ddd, a, gen_helper_fexpand) 4556 4557 TRANS(FPADD16, VIS1, do_ddd, a, tcg_gen_vec_add16_i64) 4558 TRANS(FPADD32, VIS1, do_ddd, a, tcg_gen_vec_add32_i64) 4559 TRANS(FPSUB16, VIS1, do_ddd, a, tcg_gen_vec_sub16_i64) 4560 TRANS(FPSUB32, VIS1, do_ddd, a, tcg_gen_vec_sub32_i64) 4561 TRANS(FNORd, VIS1, do_ddd, a, tcg_gen_nor_i64) 4562 TRANS(FANDNOTd, VIS1, do_ddd, a, tcg_gen_andc_i64) 4563 TRANS(FXORd, VIS1, do_ddd, a, tcg_gen_xor_i64) 4564 TRANS(FNANDd, VIS1, do_ddd, a, tcg_gen_nand_i64) 4565 TRANS(FANDd, VIS1, do_ddd, a, tcg_gen_and_i64) 4566 TRANS(FXNORd, VIS1, do_ddd, a, tcg_gen_eqv_i64) 4567 TRANS(FORNOTd, VIS1, do_ddd, a, tcg_gen_orc_i64) 4568 TRANS(FORd, VIS1, do_ddd, a, tcg_gen_or_i64) 4569 4570 TRANS(FPACK32, VIS1, do_ddd, a, gen_op_fpack32) 4571 TRANS(FALIGNDATAg, VIS1, do_ddd, a, gen_op_faligndata) 4572 TRANS(BSHUFFLE, VIS2, do_ddd, a, gen_op_bshuffle) 4573 4574 static bool do_rdd(DisasContext *dc, arg_r_r_r *a, 4575 void (*func)(TCGv, TCGv_i64, TCGv_i64)) 4576 { 4577 TCGv_i64 src1, src2; 4578 TCGv dst; 4579 4580 if (gen_trap_ifnofpu(dc)) { 4581 return true; 4582 } 4583 4584 dst = gen_dest_gpr(dc, a->rd); 4585 src1 = gen_load_fpr_D(dc, a->rs1); 4586 src2 = gen_load_fpr_D(dc, a->rs2); 4587 func(dst, src1, src2); 4588 gen_store_gpr(dc, a->rd, dst); 4589 return advance_pc(dc); 4590 } 4591 4592 TRANS(FPCMPLE16, VIS1, do_rdd, a, gen_helper_fcmple16) 4593 TRANS(FPCMPNE16, VIS1, do_rdd, a, gen_helper_fcmpne16) 4594 TRANS(FPCMPGT16, VIS1, do_rdd, a, gen_helper_fcmpgt16) 4595 TRANS(FPCMPEQ16, VIS1, do_rdd, a, gen_helper_fcmpeq16) 4596 4597 TRANS(FPCMPLE32, VIS1, do_rdd, a, gen_helper_fcmple32) 4598 TRANS(FPCMPNE32, VIS1, do_rdd, a, gen_helper_fcmpne32) 4599 TRANS(FPCMPGT32, VIS1, do_rdd, a, gen_helper_fcmpgt32) 4600 TRANS(FPCMPEQ32, VIS1, do_rdd, a, gen_helper_fcmpeq32) 4601 4602 static bool do_env_ddd(DisasContext *dc, arg_r_r_r *a, 4603 void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64)) 4604 { 4605 TCGv_i64 dst, src1, src2; 4606 4607 if (gen_trap_ifnofpu(dc)) { 4608 return true; 4609 } 4610 4611 dst = gen_dest_fpr_D(dc, a->rd); 4612 src1 = gen_load_fpr_D(dc, a->rs1); 4613 src2 = gen_load_fpr_D(dc, a->rs2); 4614 func(dst, tcg_env, src1, src2); 4615 gen_store_fpr_D(dc, a->rd, dst); 4616 return advance_pc(dc); 4617 } 4618 4619 TRANS(FADDd, ALL, do_env_ddd, a, gen_helper_faddd) 4620 TRANS(FSUBd, ALL, do_env_ddd, a, gen_helper_fsubd) 4621 TRANS(FMULd, ALL, do_env_ddd, a, gen_helper_fmuld) 4622 TRANS(FDIVd, ALL, do_env_ddd, a, gen_helper_fdivd) 4623 4624 static bool trans_FsMULd(DisasContext *dc, arg_r_r_r *a) 4625 { 4626 TCGv_i64 dst; 4627 TCGv_i32 src1, src2; 4628 4629 if (gen_trap_ifnofpu(dc)) { 4630 return true; 4631 } 4632 if (!(dc->def->features & CPU_FEATURE_FSMULD)) { 4633 return raise_unimpfpop(dc); 4634 } 4635 4636 dst = gen_dest_fpr_D(dc, a->rd); 4637 src1 = gen_load_fpr_F(dc, a->rs1); 4638 src2 = gen_load_fpr_F(dc, a->rs2); 4639 gen_helper_fsmuld(dst, tcg_env, src1, src2); 4640 gen_store_fpr_D(dc, a->rd, dst); 4641 return advance_pc(dc); 4642 } 4643 4644 static bool do_dddd(DisasContext *dc, arg_r_r_r *a, 4645 void (*func)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64)) 4646 { 4647 TCGv_i64 dst, src0, src1, src2; 4648 4649 if (gen_trap_ifnofpu(dc)) { 4650 return true; 4651 } 4652 4653 dst = gen_dest_fpr_D(dc, a->rd); 4654 src0 = gen_load_fpr_D(dc, a->rd); 4655 src1 = gen_load_fpr_D(dc, a->rs1); 4656 src2 = gen_load_fpr_D(dc, a->rs2); 4657 func(dst, src0, src1, src2); 4658 gen_store_fpr_D(dc, a->rd, dst); 4659 return advance_pc(dc); 4660 } 4661 4662 TRANS(PDIST, VIS1, do_dddd, a, gen_helper_pdist) 4663 4664 static bool do_env_qqq(DisasContext *dc, arg_r_r_r *a, 4665 void (*func)(TCGv_i128, TCGv_env, TCGv_i128, TCGv_i128)) 4666 { 4667 TCGv_i128 src1, src2; 4668 4669 if (gen_trap_ifnofpu(dc)) { 4670 return true; 4671 } 4672 if (gen_trap_float128(dc)) { 4673 return true; 4674 } 4675 4676 src1 = gen_load_fpr_Q(dc, a->rs1); 4677 src2 = gen_load_fpr_Q(dc, a->rs2); 4678 func(src1, tcg_env, src1, src2); 4679 gen_store_fpr_Q(dc, a->rd, src1); 4680 return advance_pc(dc); 4681 } 4682 4683 TRANS(FADDq, ALL, do_env_qqq, a, gen_helper_faddq) 4684 TRANS(FSUBq, ALL, do_env_qqq, a, gen_helper_fsubq) 4685 TRANS(FMULq, ALL, do_env_qqq, a, gen_helper_fmulq) 4686 TRANS(FDIVq, ALL, do_env_qqq, a, gen_helper_fdivq) 4687 4688 static bool trans_FdMULq(DisasContext *dc, arg_r_r_r *a) 4689 { 4690 TCGv_i64 src1, src2; 4691 TCGv_i128 dst; 4692 4693 if (gen_trap_ifnofpu(dc)) { 4694 return true; 4695 } 4696 if (gen_trap_float128(dc)) { 4697 return true; 4698 } 4699 4700 src1 = gen_load_fpr_D(dc, a->rs1); 4701 src2 = gen_load_fpr_D(dc, a->rs2); 4702 dst = tcg_temp_new_i128(); 4703 gen_helper_fdmulq(dst, tcg_env, src1, src2); 4704 gen_store_fpr_Q(dc, a->rd, dst); 4705 return advance_pc(dc); 4706 } 4707 4708 static bool do_fmovr(DisasContext *dc, arg_FMOVRs *a, bool is_128, 4709 void (*func)(DisasContext *, DisasCompare *, int, int)) 4710 { 4711 DisasCompare cmp; 4712 4713 if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) { 4714 return false; 4715 } 4716 if (gen_trap_ifnofpu(dc)) { 4717 return true; 4718 } 4719 if (is_128 && gen_trap_float128(dc)) { 4720 return true; 4721 } 4722 4723 gen_op_clear_ieee_excp_and_FTT(); 4724 func(dc, &cmp, a->rd, a->rs2); 4725 return advance_pc(dc); 4726 } 4727 4728 TRANS(FMOVRs, 64, do_fmovr, a, false, gen_fmovs) 4729 TRANS(FMOVRd, 64, do_fmovr, a, false, gen_fmovd) 4730 TRANS(FMOVRq, 64, do_fmovr, a, true, gen_fmovq) 4731 4732 static bool do_fmovcc(DisasContext *dc, arg_FMOVscc *a, bool is_128, 4733 void (*func)(DisasContext *, DisasCompare *, int, int)) 4734 { 4735 DisasCompare cmp; 4736 4737 if (gen_trap_ifnofpu(dc)) { 4738 return true; 4739 } 4740 if (is_128 && gen_trap_float128(dc)) { 4741 return true; 4742 } 4743 4744 gen_op_clear_ieee_excp_and_FTT(); 4745 gen_compare(&cmp, a->cc, a->cond, dc); 4746 func(dc, &cmp, a->rd, a->rs2); 4747 return advance_pc(dc); 4748 } 4749 4750 TRANS(FMOVscc, 64, do_fmovcc, a, false, gen_fmovs) 4751 TRANS(FMOVdcc, 64, do_fmovcc, a, false, gen_fmovd) 4752 TRANS(FMOVqcc, 64, do_fmovcc, a, true, gen_fmovq) 4753 4754 static bool do_fmovfcc(DisasContext *dc, arg_FMOVsfcc *a, bool is_128, 4755 void (*func)(DisasContext *, DisasCompare *, int, int)) 4756 { 4757 DisasCompare cmp; 4758 4759 if (gen_trap_ifnofpu(dc)) { 4760 return true; 4761 } 4762 if (is_128 && gen_trap_float128(dc)) { 4763 return true; 4764 } 4765 4766 gen_op_clear_ieee_excp_and_FTT(); 4767 gen_fcompare(&cmp, a->cc, a->cond); 4768 func(dc, &cmp, a->rd, a->rs2); 4769 return advance_pc(dc); 4770 } 4771 4772 TRANS(FMOVsfcc, 64, do_fmovfcc, a, false, gen_fmovs) 4773 TRANS(FMOVdfcc, 64, do_fmovfcc, a, false, gen_fmovd) 4774 TRANS(FMOVqfcc, 64, do_fmovfcc, a, true, gen_fmovq) 4775 4776 static bool do_fcmps(DisasContext *dc, arg_FCMPs *a, bool e) 4777 { 4778 TCGv_i32 src1, src2; 4779 4780 if (avail_32(dc) && a->cc != 0) { 4781 return false; 4782 } 4783 if (gen_trap_ifnofpu(dc)) { 4784 return true; 4785 } 4786 4787 src1 = gen_load_fpr_F(dc, a->rs1); 4788 src2 = gen_load_fpr_F(dc, a->rs2); 4789 if (e) { 4790 gen_helper_fcmpes(cpu_fcc[a->cc], tcg_env, src1, src2); 4791 } else { 4792 gen_helper_fcmps(cpu_fcc[a->cc], tcg_env, src1, src2); 4793 } 4794 return advance_pc(dc); 4795 } 4796 4797 TRANS(FCMPs, ALL, do_fcmps, a, false) 4798 TRANS(FCMPEs, ALL, do_fcmps, a, true) 4799 4800 static bool do_fcmpd(DisasContext *dc, arg_FCMPd *a, bool e) 4801 { 4802 TCGv_i64 src1, src2; 4803 4804 if (avail_32(dc) && a->cc != 0) { 4805 return false; 4806 } 4807 if (gen_trap_ifnofpu(dc)) { 4808 return true; 4809 } 4810 4811 src1 = gen_load_fpr_D(dc, a->rs1); 4812 src2 = gen_load_fpr_D(dc, a->rs2); 4813 if (e) { 4814 gen_helper_fcmped(cpu_fcc[a->cc], tcg_env, src1, src2); 4815 } else { 4816 gen_helper_fcmpd(cpu_fcc[a->cc], tcg_env, src1, src2); 4817 } 4818 return advance_pc(dc); 4819 } 4820 4821 TRANS(FCMPd, ALL, do_fcmpd, a, false) 4822 TRANS(FCMPEd, ALL, do_fcmpd, a, true) 4823 4824 static bool do_fcmpq(DisasContext *dc, arg_FCMPq *a, bool e) 4825 { 4826 TCGv_i128 src1, src2; 4827 4828 if (avail_32(dc) && a->cc != 0) { 4829 return false; 4830 } 4831 if (gen_trap_ifnofpu(dc)) { 4832 return true; 4833 } 4834 if (gen_trap_float128(dc)) { 4835 return true; 4836 } 4837 4838 src1 = gen_load_fpr_Q(dc, a->rs1); 4839 src2 = gen_load_fpr_Q(dc, a->rs2); 4840 if (e) { 4841 gen_helper_fcmpeq(cpu_fcc[a->cc], tcg_env, src1, src2); 4842 } else { 4843 gen_helper_fcmpq(cpu_fcc[a->cc], tcg_env, src1, src2); 4844 } 4845 return advance_pc(dc); 4846 } 4847 4848 TRANS(FCMPq, ALL, do_fcmpq, a, false) 4849 TRANS(FCMPEq, ALL, do_fcmpq, a, true) 4850 4851 static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) 4852 { 4853 DisasContext *dc = container_of(dcbase, DisasContext, base); 4854 CPUSPARCState *env = cpu_env(cs); 4855 int bound; 4856 4857 dc->pc = dc->base.pc_first; 4858 dc->npc = (target_ulong)dc->base.tb->cs_base; 4859 dc->mem_idx = dc->base.tb->flags & TB_FLAG_MMU_MASK; 4860 dc->def = &env->def; 4861 dc->fpu_enabled = tb_fpu_enabled(dc->base.tb->flags); 4862 dc->address_mask_32bit = tb_am_enabled(dc->base.tb->flags); 4863 #ifndef CONFIG_USER_ONLY 4864 dc->supervisor = (dc->base.tb->flags & TB_FLAG_SUPER) != 0; 4865 #endif 4866 #ifdef TARGET_SPARC64 4867 dc->fprs_dirty = 0; 4868 dc->asi = (dc->base.tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff; 4869 #ifndef CONFIG_USER_ONLY 4870 dc->hypervisor = (dc->base.tb->flags & TB_FLAG_HYPER) != 0; 4871 #endif 4872 #endif 4873 /* 4874 * if we reach a page boundary, we stop generation so that the 4875 * PC of a TT_TFAULT exception is always in the right page 4876 */ 4877 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4; 4878 dc->base.max_insns = MIN(dc->base.max_insns, bound); 4879 } 4880 4881 static void sparc_tr_tb_start(DisasContextBase *db, CPUState *cs) 4882 { 4883 } 4884 4885 static void sparc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs) 4886 { 4887 DisasContext *dc = container_of(dcbase, DisasContext, base); 4888 target_ulong npc = dc->npc; 4889 4890 if (npc & 3) { 4891 switch (npc) { 4892 case JUMP_PC: 4893 assert(dc->jump_pc[1] == dc->pc + 4); 4894 npc = dc->jump_pc[0] | JUMP_PC; 4895 break; 4896 case DYNAMIC_PC: 4897 case DYNAMIC_PC_LOOKUP: 4898 npc = DYNAMIC_PC; 4899 break; 4900 default: 4901 g_assert_not_reached(); 4902 } 4903 } 4904 tcg_gen_insn_start(dc->pc, npc); 4905 } 4906 4907 static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) 4908 { 4909 DisasContext *dc = container_of(dcbase, DisasContext, base); 4910 CPUSPARCState *env = cpu_env(cs); 4911 unsigned int insn; 4912 4913 insn = translator_ldl(env, &dc->base, dc->pc); 4914 dc->base.pc_next += 4; 4915 4916 if (!decode(dc, insn)) { 4917 gen_exception(dc, TT_ILL_INSN); 4918 } 4919 4920 if (dc->base.is_jmp == DISAS_NORETURN) { 4921 return; 4922 } 4923 if (dc->pc != dc->base.pc_next) { 4924 dc->base.is_jmp = DISAS_TOO_MANY; 4925 } 4926 } 4927 4928 static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) 4929 { 4930 DisasContext *dc = container_of(dcbase, DisasContext, base); 4931 DisasDelayException *e, *e_next; 4932 bool may_lookup; 4933 4934 finishing_insn(dc); 4935 4936 switch (dc->base.is_jmp) { 4937 case DISAS_NEXT: 4938 case DISAS_TOO_MANY: 4939 if (((dc->pc | dc->npc) & 3) == 0) { 4940 /* static PC and NPC: we can use direct chaining */ 4941 gen_goto_tb(dc, 0, dc->pc, dc->npc); 4942 break; 4943 } 4944 4945 may_lookup = true; 4946 if (dc->pc & 3) { 4947 switch (dc->pc) { 4948 case DYNAMIC_PC_LOOKUP: 4949 break; 4950 case DYNAMIC_PC: 4951 may_lookup = false; 4952 break; 4953 default: 4954 g_assert_not_reached(); 4955 } 4956 } else { 4957 tcg_gen_movi_tl(cpu_pc, dc->pc); 4958 } 4959 4960 if (dc->npc & 3) { 4961 switch (dc->npc) { 4962 case JUMP_PC: 4963 gen_generic_branch(dc); 4964 break; 4965 case DYNAMIC_PC: 4966 may_lookup = false; 4967 break; 4968 case DYNAMIC_PC_LOOKUP: 4969 break; 4970 default: 4971 g_assert_not_reached(); 4972 } 4973 } else { 4974 tcg_gen_movi_tl(cpu_npc, dc->npc); 4975 } 4976 if (may_lookup) { 4977 tcg_gen_lookup_and_goto_ptr(); 4978 } else { 4979 tcg_gen_exit_tb(NULL, 0); 4980 } 4981 break; 4982 4983 case DISAS_NORETURN: 4984 break; 4985 4986 case DISAS_EXIT: 4987 /* Exit TB */ 4988 save_state(dc); 4989 tcg_gen_exit_tb(NULL, 0); 4990 break; 4991 4992 default: 4993 g_assert_not_reached(); 4994 } 4995 4996 for (e = dc->delay_excp_list; e ; e = e_next) { 4997 gen_set_label(e->lab); 4998 4999 tcg_gen_movi_tl(cpu_pc, e->pc); 5000 if (e->npc % 4 == 0) { 5001 tcg_gen_movi_tl(cpu_npc, e->npc); 5002 } 5003 gen_helper_raise_exception(tcg_env, e->excp); 5004 5005 e_next = e->next; 5006 g_free(e); 5007 } 5008 } 5009 5010 static void sparc_tr_disas_log(const DisasContextBase *dcbase, 5011 CPUState *cpu, FILE *logfile) 5012 { 5013 fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first)); 5014 target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size); 5015 } 5016 5017 static const TranslatorOps sparc_tr_ops = { 5018 .init_disas_context = sparc_tr_init_disas_context, 5019 .tb_start = sparc_tr_tb_start, 5020 .insn_start = sparc_tr_insn_start, 5021 .translate_insn = sparc_tr_translate_insn, 5022 .tb_stop = sparc_tr_tb_stop, 5023 .disas_log = sparc_tr_disas_log, 5024 }; 5025 5026 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, 5027 vaddr pc, void *host_pc) 5028 { 5029 DisasContext dc = {}; 5030 5031 translator_loop(cs, tb, max_insns, pc, host_pc, &sparc_tr_ops, &dc.base); 5032 } 5033 5034 void sparc_tcg_init(void) 5035 { 5036 static const char gregnames[32][4] = { 5037 "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7", 5038 "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7", 5039 "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7", 5040 "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7", 5041 }; 5042 static const char fregnames[32][4] = { 5043 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14", 5044 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30", 5045 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46", 5046 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62", 5047 }; 5048 5049 static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = { 5050 #ifdef TARGET_SPARC64 5051 { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" }, 5052 { &cpu_fcc[0], offsetof(CPUSPARCState, fcc[0]), "fcc0" }, 5053 { &cpu_fcc[1], offsetof(CPUSPARCState, fcc[1]), "fcc1" }, 5054 { &cpu_fcc[2], offsetof(CPUSPARCState, fcc[2]), "fcc2" }, 5055 { &cpu_fcc[3], offsetof(CPUSPARCState, fcc[3]), "fcc3" }, 5056 #else 5057 { &cpu_fcc[0], offsetof(CPUSPARCState, fcc[0]), "fcc" }, 5058 #endif 5059 }; 5060 5061 static const struct { TCGv *ptr; int off; const char *name; } rtl[] = { 5062 #ifdef TARGET_SPARC64 5063 { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" }, 5064 { &cpu_xcc_Z, offsetof(CPUSPARCState, xcc_Z), "xcc_Z" }, 5065 { &cpu_xcc_C, offsetof(CPUSPARCState, xcc_C), "xcc_C" }, 5066 #endif 5067 { &cpu_cc_N, offsetof(CPUSPARCState, cc_N), "cc_N" }, 5068 { &cpu_cc_V, offsetof(CPUSPARCState, cc_V), "cc_V" }, 5069 { &cpu_icc_Z, offsetof(CPUSPARCState, icc_Z), "icc_Z" }, 5070 { &cpu_icc_C, offsetof(CPUSPARCState, icc_C), "icc_C" }, 5071 { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" }, 5072 { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" }, 5073 { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" }, 5074 { &cpu_y, offsetof(CPUSPARCState, y), "y" }, 5075 { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" }, 5076 }; 5077 5078 unsigned int i; 5079 5080 cpu_regwptr = tcg_global_mem_new_ptr(tcg_env, 5081 offsetof(CPUSPARCState, regwptr), 5082 "regwptr"); 5083 5084 for (i = 0; i < ARRAY_SIZE(r32); ++i) { 5085 *r32[i].ptr = tcg_global_mem_new_i32(tcg_env, r32[i].off, r32[i].name); 5086 } 5087 5088 for (i = 0; i < ARRAY_SIZE(rtl); ++i) { 5089 *rtl[i].ptr = tcg_global_mem_new(tcg_env, rtl[i].off, rtl[i].name); 5090 } 5091 5092 cpu_regs[0] = NULL; 5093 for (i = 1; i < 8; ++i) { 5094 cpu_regs[i] = tcg_global_mem_new(tcg_env, 5095 offsetof(CPUSPARCState, gregs[i]), 5096 gregnames[i]); 5097 } 5098 5099 for (i = 8; i < 32; ++i) { 5100 cpu_regs[i] = tcg_global_mem_new(cpu_regwptr, 5101 (i - 8) * sizeof(target_ulong), 5102 gregnames[i]); 5103 } 5104 5105 for (i = 0; i < TARGET_DPREGS; i++) { 5106 cpu_fpr[i] = tcg_global_mem_new_i64(tcg_env, 5107 offsetof(CPUSPARCState, fpr[i]), 5108 fregnames[i]); 5109 } 5110 } 5111 5112 void sparc_restore_state_to_opc(CPUState *cs, 5113 const TranslationBlock *tb, 5114 const uint64_t *data) 5115 { 5116 SPARCCPU *cpu = SPARC_CPU(cs); 5117 CPUSPARCState *env = &cpu->env; 5118 target_ulong pc = data[0]; 5119 target_ulong npc = data[1]; 5120 5121 env->pc = pc; 5122 if (npc == DYNAMIC_PC) { 5123 /* dynamic NPC: already stored */ 5124 } else if (npc & JUMP_PC) { 5125 /* jump PC: use 'cond' and the jump targets of the translation */ 5126 if (env->cond) { 5127 env->npc = npc & ~3; 5128 } else { 5129 env->npc = pc + 4; 5130 } 5131 } else { 5132 env->npc = npc; 5133 } 5134 } 5135