1 /* 2 SPARC translation 3 4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at> 5 Copyright (C) 2003-2005 Fabrice Bellard 6 7 This library is free software; you can redistribute it and/or 8 modify it under the terms of the GNU Lesser General Public 9 License as published by the Free Software Foundation; either 10 version 2.1 of the License, or (at your option) any later version. 11 12 This library is distributed in the hope that it will be useful, 13 but WITHOUT ANY WARRANTY; without even the implied warranty of 14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 Lesser General Public License for more details. 16 17 You should have received a copy of the GNU Lesser General Public 18 License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #include "qemu/osdep.h" 22 23 #include "cpu.h" 24 #include "disas/disas.h" 25 #include "exec/helper-proto.h" 26 #include "exec/exec-all.h" 27 #include "tcg/tcg-op.h" 28 29 #include "exec/helper-gen.h" 30 31 #include "exec/translator.h" 32 #include "exec/log.h" 33 #include "asi.h" 34 35 #define HELPER_H "helper.h" 36 #include "exec/helper-info.c.inc" 37 #undef HELPER_H 38 39 /* Dynamic PC, must exit to main loop. */ 40 #define DYNAMIC_PC 1 41 /* Dynamic PC, one of two values according to jump_pc[T2]. */ 42 #define JUMP_PC 2 43 /* Dynamic PC, may lookup next TB. */ 44 #define DYNAMIC_PC_LOOKUP 3 45 46 #define DISAS_EXIT DISAS_TARGET_0 47 48 /* global register indexes */ 49 static TCGv_ptr cpu_regwptr; 50 static TCGv cpu_cc_src, cpu_cc_src2, cpu_cc_dst; 51 static TCGv_i32 cpu_cc_op; 52 static TCGv_i32 cpu_psr; 53 static TCGv cpu_fsr, cpu_pc, cpu_npc; 54 static TCGv cpu_regs[32]; 55 static TCGv cpu_y; 56 #ifndef CONFIG_USER_ONLY 57 static TCGv cpu_tbr; 58 #endif 59 static TCGv cpu_cond; 60 #ifdef TARGET_SPARC64 61 static TCGv_i32 cpu_xcc, cpu_fprs; 62 static TCGv cpu_gsr; 63 static TCGv cpu_tick_cmpr, cpu_stick_cmpr, cpu_hstick_cmpr; 64 static TCGv cpu_hintp, cpu_htba, cpu_hver, cpu_ssr, cpu_ver; 65 #else 66 static TCGv cpu_wim; 67 #endif 68 /* Floating point registers */ 69 static TCGv_i64 cpu_fpr[TARGET_DPREGS]; 70 71 typedef struct DisasDelayException { 72 struct DisasDelayException *next; 73 TCGLabel *lab; 74 TCGv_i32 excp; 75 /* Saved state at parent insn. */ 76 target_ulong pc; 77 target_ulong npc; 78 } DisasDelayException; 79 80 typedef struct DisasContext { 81 DisasContextBase base; 82 target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */ 83 target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */ 84 target_ulong jump_pc[2]; /* used when JUMP_PC pc value is used */ 85 int mem_idx; 86 bool fpu_enabled; 87 bool address_mask_32bit; 88 #ifndef CONFIG_USER_ONLY 89 bool supervisor; 90 #ifdef TARGET_SPARC64 91 bool hypervisor; 92 #endif 93 #endif 94 95 uint32_t cc_op; /* current CC operation */ 96 sparc_def_t *def; 97 #ifdef TARGET_SPARC64 98 int fprs_dirty; 99 int asi; 100 #endif 101 DisasDelayException *delay_excp_list; 102 } DisasContext; 103 104 typedef struct { 105 TCGCond cond; 106 bool is_bool; 107 TCGv c1, c2; 108 } DisasCompare; 109 110 // This function uses non-native bit order 111 #define GET_FIELD(X, FROM, TO) \ 112 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1)) 113 114 // This function uses the order in the manuals, i.e. bit 0 is 2^0 115 #define GET_FIELD_SP(X, FROM, TO) \ 116 GET_FIELD(X, 31 - (TO), 31 - (FROM)) 117 118 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1) 119 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1)) 120 121 #ifdef TARGET_SPARC64 122 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e)) 123 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c)) 124 #else 125 #define DFPREG(r) (r & 0x1e) 126 #define QFPREG(r) (r & 0x1c) 127 #endif 128 129 #define UA2005_HTRAP_MASK 0xff 130 #define V8_TRAP_MASK 0x7f 131 132 static int sign_extend(int x, int len) 133 { 134 len = 32 - len; 135 return (x << len) >> len; 136 } 137 138 #define IS_IMM (insn & (1<<13)) 139 140 static void gen_update_fprs_dirty(DisasContext *dc, int rd) 141 { 142 #if defined(TARGET_SPARC64) 143 int bit = (rd < 32) ? 1 : 2; 144 /* If we know we've already set this bit within the TB, 145 we can avoid setting it again. */ 146 if (!(dc->fprs_dirty & bit)) { 147 dc->fprs_dirty |= bit; 148 tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit); 149 } 150 #endif 151 } 152 153 /* floating point registers moves */ 154 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src) 155 { 156 TCGv_i32 ret = tcg_temp_new_i32(); 157 if (src & 1) { 158 tcg_gen_extrl_i64_i32(ret, cpu_fpr[src / 2]); 159 } else { 160 tcg_gen_extrh_i64_i32(ret, cpu_fpr[src / 2]); 161 } 162 return ret; 163 } 164 165 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v) 166 { 167 TCGv_i64 t = tcg_temp_new_i64(); 168 169 tcg_gen_extu_i32_i64(t, v); 170 tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t, 171 (dst & 1 ? 0 : 32), 32); 172 gen_update_fprs_dirty(dc, dst); 173 } 174 175 static TCGv_i32 gen_dest_fpr_F(DisasContext *dc) 176 { 177 return tcg_temp_new_i32(); 178 } 179 180 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src) 181 { 182 src = DFPREG(src); 183 return cpu_fpr[src / 2]; 184 } 185 186 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v) 187 { 188 dst = DFPREG(dst); 189 tcg_gen_mov_i64(cpu_fpr[dst / 2], v); 190 gen_update_fprs_dirty(dc, dst); 191 } 192 193 static TCGv_i64 gen_dest_fpr_D(DisasContext *dc, unsigned int dst) 194 { 195 return cpu_fpr[DFPREG(dst) / 2]; 196 } 197 198 static void gen_op_load_fpr_QT0(unsigned int src) 199 { 200 tcg_gen_st_i64(cpu_fpr[src / 2], tcg_env, offsetof(CPUSPARCState, qt0) + 201 offsetof(CPU_QuadU, ll.upper)); 202 tcg_gen_st_i64(cpu_fpr[src/2 + 1], tcg_env, offsetof(CPUSPARCState, qt0) + 203 offsetof(CPU_QuadU, ll.lower)); 204 } 205 206 static void gen_op_load_fpr_QT1(unsigned int src) 207 { 208 tcg_gen_st_i64(cpu_fpr[src / 2], tcg_env, offsetof(CPUSPARCState, qt1) + 209 offsetof(CPU_QuadU, ll.upper)); 210 tcg_gen_st_i64(cpu_fpr[src/2 + 1], tcg_env, offsetof(CPUSPARCState, qt1) + 211 offsetof(CPU_QuadU, ll.lower)); 212 } 213 214 static void gen_op_store_QT0_fpr(unsigned int dst) 215 { 216 tcg_gen_ld_i64(cpu_fpr[dst / 2], tcg_env, offsetof(CPUSPARCState, qt0) + 217 offsetof(CPU_QuadU, ll.upper)); 218 tcg_gen_ld_i64(cpu_fpr[dst/2 + 1], tcg_env, offsetof(CPUSPARCState, qt0) + 219 offsetof(CPU_QuadU, ll.lower)); 220 } 221 222 static void gen_store_fpr_Q(DisasContext *dc, unsigned int dst, 223 TCGv_i64 v1, TCGv_i64 v2) 224 { 225 dst = QFPREG(dst); 226 227 tcg_gen_mov_i64(cpu_fpr[dst / 2], v1); 228 tcg_gen_mov_i64(cpu_fpr[dst / 2 + 1], v2); 229 gen_update_fprs_dirty(dc, dst); 230 } 231 232 #ifdef TARGET_SPARC64 233 static TCGv_i64 gen_load_fpr_Q0(DisasContext *dc, unsigned int src) 234 { 235 src = QFPREG(src); 236 return cpu_fpr[src / 2]; 237 } 238 239 static TCGv_i64 gen_load_fpr_Q1(DisasContext *dc, unsigned int src) 240 { 241 src = QFPREG(src); 242 return cpu_fpr[src / 2 + 1]; 243 } 244 245 static void gen_move_Q(DisasContext *dc, unsigned int rd, unsigned int rs) 246 { 247 rd = QFPREG(rd); 248 rs = QFPREG(rs); 249 250 tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]); 251 tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]); 252 gen_update_fprs_dirty(dc, rd); 253 } 254 #endif 255 256 /* moves */ 257 #ifdef CONFIG_USER_ONLY 258 #define supervisor(dc) 0 259 #ifdef TARGET_SPARC64 260 #define hypervisor(dc) 0 261 #endif 262 #else 263 #ifdef TARGET_SPARC64 264 #define hypervisor(dc) (dc->hypervisor) 265 #define supervisor(dc) (dc->supervisor | dc->hypervisor) 266 #else 267 #define supervisor(dc) (dc->supervisor) 268 #endif 269 #endif 270 271 #if !defined(TARGET_SPARC64) 272 # define AM_CHECK(dc) false 273 #elif defined(TARGET_ABI32) 274 # define AM_CHECK(dc) true 275 #elif defined(CONFIG_USER_ONLY) 276 # define AM_CHECK(dc) false 277 #else 278 # define AM_CHECK(dc) ((dc)->address_mask_32bit) 279 #endif 280 281 static void gen_address_mask(DisasContext *dc, TCGv addr) 282 { 283 if (AM_CHECK(dc)) { 284 tcg_gen_andi_tl(addr, addr, 0xffffffffULL); 285 } 286 } 287 288 static target_ulong address_mask_i(DisasContext *dc, target_ulong addr) 289 { 290 return AM_CHECK(dc) ? (uint32_t)addr : addr; 291 } 292 293 static TCGv gen_load_gpr(DisasContext *dc, int reg) 294 { 295 if (reg > 0) { 296 assert(reg < 32); 297 return cpu_regs[reg]; 298 } else { 299 TCGv t = tcg_temp_new(); 300 tcg_gen_movi_tl(t, 0); 301 return t; 302 } 303 } 304 305 static void gen_store_gpr(DisasContext *dc, int reg, TCGv v) 306 { 307 if (reg > 0) { 308 assert(reg < 32); 309 tcg_gen_mov_tl(cpu_regs[reg], v); 310 } 311 } 312 313 static TCGv gen_dest_gpr(DisasContext *dc, int reg) 314 { 315 if (reg > 0) { 316 assert(reg < 32); 317 return cpu_regs[reg]; 318 } else { 319 return tcg_temp_new(); 320 } 321 } 322 323 static bool use_goto_tb(DisasContext *s, target_ulong pc, target_ulong npc) 324 { 325 return translator_use_goto_tb(&s->base, pc) && 326 translator_use_goto_tb(&s->base, npc); 327 } 328 329 static void gen_goto_tb(DisasContext *s, int tb_num, 330 target_ulong pc, target_ulong npc) 331 { 332 if (use_goto_tb(s, pc, npc)) { 333 /* jump to same page: we can use a direct jump */ 334 tcg_gen_goto_tb(tb_num); 335 tcg_gen_movi_tl(cpu_pc, pc); 336 tcg_gen_movi_tl(cpu_npc, npc); 337 tcg_gen_exit_tb(s->base.tb, tb_num); 338 } else { 339 /* jump to another page: we can use an indirect jump */ 340 tcg_gen_movi_tl(cpu_pc, pc); 341 tcg_gen_movi_tl(cpu_npc, npc); 342 tcg_gen_lookup_and_goto_ptr(); 343 } 344 } 345 346 // XXX suboptimal 347 static void gen_mov_reg_N(TCGv reg, TCGv_i32 src) 348 { 349 tcg_gen_extu_i32_tl(reg, src); 350 tcg_gen_extract_tl(reg, reg, PSR_NEG_SHIFT, 1); 351 } 352 353 static void gen_mov_reg_Z(TCGv reg, TCGv_i32 src) 354 { 355 tcg_gen_extu_i32_tl(reg, src); 356 tcg_gen_extract_tl(reg, reg, PSR_ZERO_SHIFT, 1); 357 } 358 359 static void gen_mov_reg_V(TCGv reg, TCGv_i32 src) 360 { 361 tcg_gen_extu_i32_tl(reg, src); 362 tcg_gen_extract_tl(reg, reg, PSR_OVF_SHIFT, 1); 363 } 364 365 static void gen_mov_reg_C(TCGv reg, TCGv_i32 src) 366 { 367 tcg_gen_extu_i32_tl(reg, src); 368 tcg_gen_extract_tl(reg, reg, PSR_CARRY_SHIFT, 1); 369 } 370 371 static void gen_op_add_cc(TCGv dst, TCGv src1, TCGv src2) 372 { 373 tcg_gen_mov_tl(cpu_cc_src, src1); 374 tcg_gen_mov_tl(cpu_cc_src2, src2); 375 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2); 376 tcg_gen_mov_tl(dst, cpu_cc_dst); 377 } 378 379 static TCGv_i32 gen_add32_carry32(void) 380 { 381 TCGv_i32 carry_32, cc_src1_32, cc_src2_32; 382 383 /* Carry is computed from a previous add: (dst < src) */ 384 #if TARGET_LONG_BITS == 64 385 cc_src1_32 = tcg_temp_new_i32(); 386 cc_src2_32 = tcg_temp_new_i32(); 387 tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_dst); 388 tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src); 389 #else 390 cc_src1_32 = cpu_cc_dst; 391 cc_src2_32 = cpu_cc_src; 392 #endif 393 394 carry_32 = tcg_temp_new_i32(); 395 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32); 396 397 return carry_32; 398 } 399 400 static TCGv_i32 gen_sub32_carry32(void) 401 { 402 TCGv_i32 carry_32, cc_src1_32, cc_src2_32; 403 404 /* Carry is computed from a previous borrow: (src1 < src2) */ 405 #if TARGET_LONG_BITS == 64 406 cc_src1_32 = tcg_temp_new_i32(); 407 cc_src2_32 = tcg_temp_new_i32(); 408 tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_src); 409 tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src2); 410 #else 411 cc_src1_32 = cpu_cc_src; 412 cc_src2_32 = cpu_cc_src2; 413 #endif 414 415 carry_32 = tcg_temp_new_i32(); 416 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32); 417 418 return carry_32; 419 } 420 421 static void gen_op_addx_int(DisasContext *dc, TCGv dst, TCGv src1, 422 TCGv src2, int update_cc) 423 { 424 TCGv_i32 carry_32; 425 TCGv carry; 426 427 switch (dc->cc_op) { 428 case CC_OP_DIV: 429 case CC_OP_LOGIC: 430 /* Carry is known to be zero. Fall back to plain ADD. */ 431 if (update_cc) { 432 gen_op_add_cc(dst, src1, src2); 433 } else { 434 tcg_gen_add_tl(dst, src1, src2); 435 } 436 return; 437 438 case CC_OP_ADD: 439 case CC_OP_TADD: 440 case CC_OP_TADDTV: 441 if (TARGET_LONG_BITS == 32) { 442 /* We can re-use the host's hardware carry generation by using 443 an ADD2 opcode. We discard the low part of the output. 444 Ideally we'd combine this operation with the add that 445 generated the carry in the first place. */ 446 carry = tcg_temp_new(); 447 tcg_gen_add2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2); 448 goto add_done; 449 } 450 carry_32 = gen_add32_carry32(); 451 break; 452 453 case CC_OP_SUB: 454 case CC_OP_TSUB: 455 case CC_OP_TSUBTV: 456 carry_32 = gen_sub32_carry32(); 457 break; 458 459 default: 460 /* We need external help to produce the carry. */ 461 carry_32 = tcg_temp_new_i32(); 462 gen_helper_compute_C_icc(carry_32, tcg_env); 463 break; 464 } 465 466 #if TARGET_LONG_BITS == 64 467 carry = tcg_temp_new(); 468 tcg_gen_extu_i32_i64(carry, carry_32); 469 #else 470 carry = carry_32; 471 #endif 472 473 tcg_gen_add_tl(dst, src1, src2); 474 tcg_gen_add_tl(dst, dst, carry); 475 476 add_done: 477 if (update_cc) { 478 tcg_gen_mov_tl(cpu_cc_src, src1); 479 tcg_gen_mov_tl(cpu_cc_src2, src2); 480 tcg_gen_mov_tl(cpu_cc_dst, dst); 481 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADDX); 482 dc->cc_op = CC_OP_ADDX; 483 } 484 } 485 486 static void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2) 487 { 488 tcg_gen_mov_tl(cpu_cc_src, src1); 489 tcg_gen_mov_tl(cpu_cc_src2, src2); 490 tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2); 491 tcg_gen_mov_tl(dst, cpu_cc_dst); 492 } 493 494 static void gen_op_subx_int(DisasContext *dc, TCGv dst, TCGv src1, 495 TCGv src2, int update_cc) 496 { 497 TCGv_i32 carry_32; 498 TCGv carry; 499 500 switch (dc->cc_op) { 501 case CC_OP_DIV: 502 case CC_OP_LOGIC: 503 /* Carry is known to be zero. Fall back to plain SUB. */ 504 if (update_cc) { 505 gen_op_sub_cc(dst, src1, src2); 506 } else { 507 tcg_gen_sub_tl(dst, src1, src2); 508 } 509 return; 510 511 case CC_OP_ADD: 512 case CC_OP_TADD: 513 case CC_OP_TADDTV: 514 carry_32 = gen_add32_carry32(); 515 break; 516 517 case CC_OP_SUB: 518 case CC_OP_TSUB: 519 case CC_OP_TSUBTV: 520 if (TARGET_LONG_BITS == 32) { 521 /* We can re-use the host's hardware carry generation by using 522 a SUB2 opcode. We discard the low part of the output. 523 Ideally we'd combine this operation with the add that 524 generated the carry in the first place. */ 525 carry = tcg_temp_new(); 526 tcg_gen_sub2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2); 527 goto sub_done; 528 } 529 carry_32 = gen_sub32_carry32(); 530 break; 531 532 default: 533 /* We need external help to produce the carry. */ 534 carry_32 = tcg_temp_new_i32(); 535 gen_helper_compute_C_icc(carry_32, tcg_env); 536 break; 537 } 538 539 #if TARGET_LONG_BITS == 64 540 carry = tcg_temp_new(); 541 tcg_gen_extu_i32_i64(carry, carry_32); 542 #else 543 carry = carry_32; 544 #endif 545 546 tcg_gen_sub_tl(dst, src1, src2); 547 tcg_gen_sub_tl(dst, dst, carry); 548 549 sub_done: 550 if (update_cc) { 551 tcg_gen_mov_tl(cpu_cc_src, src1); 552 tcg_gen_mov_tl(cpu_cc_src2, src2); 553 tcg_gen_mov_tl(cpu_cc_dst, dst); 554 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUBX); 555 dc->cc_op = CC_OP_SUBX; 556 } 557 } 558 559 static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2) 560 { 561 TCGv r_temp, zero, t0; 562 563 r_temp = tcg_temp_new(); 564 t0 = tcg_temp_new(); 565 566 /* old op: 567 if (!(env->y & 1)) 568 T1 = 0; 569 */ 570 zero = tcg_constant_tl(0); 571 tcg_gen_andi_tl(cpu_cc_src, src1, 0xffffffff); 572 tcg_gen_andi_tl(r_temp, cpu_y, 0x1); 573 tcg_gen_andi_tl(cpu_cc_src2, src2, 0xffffffff); 574 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_cc_src2, r_temp, zero, 575 zero, cpu_cc_src2); 576 577 // b2 = T0 & 1; 578 // env->y = (b2 << 31) | (env->y >> 1); 579 tcg_gen_extract_tl(t0, cpu_y, 1, 31); 580 tcg_gen_deposit_tl(cpu_y, t0, cpu_cc_src, 31, 1); 581 582 // b1 = N ^ V; 583 gen_mov_reg_N(t0, cpu_psr); 584 gen_mov_reg_V(r_temp, cpu_psr); 585 tcg_gen_xor_tl(t0, t0, r_temp); 586 587 // T0 = (b1 << 31) | (T0 >> 1); 588 // src1 = T0; 589 tcg_gen_shli_tl(t0, t0, 31); 590 tcg_gen_shri_tl(cpu_cc_src, cpu_cc_src, 1); 591 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0); 592 593 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2); 594 595 tcg_gen_mov_tl(dst, cpu_cc_dst); 596 } 597 598 static void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext) 599 { 600 #if TARGET_LONG_BITS == 32 601 if (sign_ext) { 602 tcg_gen_muls2_tl(dst, cpu_y, src1, src2); 603 } else { 604 tcg_gen_mulu2_tl(dst, cpu_y, src1, src2); 605 } 606 #else 607 TCGv t0 = tcg_temp_new_i64(); 608 TCGv t1 = tcg_temp_new_i64(); 609 610 if (sign_ext) { 611 tcg_gen_ext32s_i64(t0, src1); 612 tcg_gen_ext32s_i64(t1, src2); 613 } else { 614 tcg_gen_ext32u_i64(t0, src1); 615 tcg_gen_ext32u_i64(t1, src2); 616 } 617 618 tcg_gen_mul_i64(dst, t0, t1); 619 tcg_gen_shri_i64(cpu_y, dst, 32); 620 #endif 621 } 622 623 static void gen_op_umul(TCGv dst, TCGv src1, TCGv src2) 624 { 625 /* zero-extend truncated operands before multiplication */ 626 gen_op_multiply(dst, src1, src2, 0); 627 } 628 629 static void gen_op_smul(TCGv dst, TCGv src1, TCGv src2) 630 { 631 /* sign-extend truncated operands before multiplication */ 632 gen_op_multiply(dst, src1, src2, 1); 633 } 634 635 // 1 636 static void gen_op_eval_ba(TCGv dst) 637 { 638 tcg_gen_movi_tl(dst, 1); 639 } 640 641 // Z 642 static void gen_op_eval_be(TCGv dst, TCGv_i32 src) 643 { 644 gen_mov_reg_Z(dst, src); 645 } 646 647 // Z | (N ^ V) 648 static void gen_op_eval_ble(TCGv dst, TCGv_i32 src) 649 { 650 TCGv t0 = tcg_temp_new(); 651 gen_mov_reg_N(t0, src); 652 gen_mov_reg_V(dst, src); 653 tcg_gen_xor_tl(dst, dst, t0); 654 gen_mov_reg_Z(t0, src); 655 tcg_gen_or_tl(dst, dst, t0); 656 } 657 658 // N ^ V 659 static void gen_op_eval_bl(TCGv dst, TCGv_i32 src) 660 { 661 TCGv t0 = tcg_temp_new(); 662 gen_mov_reg_V(t0, src); 663 gen_mov_reg_N(dst, src); 664 tcg_gen_xor_tl(dst, dst, t0); 665 } 666 667 // C | Z 668 static void gen_op_eval_bleu(TCGv dst, TCGv_i32 src) 669 { 670 TCGv t0 = tcg_temp_new(); 671 gen_mov_reg_Z(t0, src); 672 gen_mov_reg_C(dst, src); 673 tcg_gen_or_tl(dst, dst, t0); 674 } 675 676 // C 677 static void gen_op_eval_bcs(TCGv dst, TCGv_i32 src) 678 { 679 gen_mov_reg_C(dst, src); 680 } 681 682 // V 683 static void gen_op_eval_bvs(TCGv dst, TCGv_i32 src) 684 { 685 gen_mov_reg_V(dst, src); 686 } 687 688 // 0 689 static void gen_op_eval_bn(TCGv dst) 690 { 691 tcg_gen_movi_tl(dst, 0); 692 } 693 694 // N 695 static void gen_op_eval_bneg(TCGv dst, TCGv_i32 src) 696 { 697 gen_mov_reg_N(dst, src); 698 } 699 700 // !Z 701 static void gen_op_eval_bne(TCGv dst, TCGv_i32 src) 702 { 703 gen_mov_reg_Z(dst, src); 704 tcg_gen_xori_tl(dst, dst, 0x1); 705 } 706 707 // !(Z | (N ^ V)) 708 static void gen_op_eval_bg(TCGv dst, TCGv_i32 src) 709 { 710 gen_op_eval_ble(dst, src); 711 tcg_gen_xori_tl(dst, dst, 0x1); 712 } 713 714 // !(N ^ V) 715 static void gen_op_eval_bge(TCGv dst, TCGv_i32 src) 716 { 717 gen_op_eval_bl(dst, src); 718 tcg_gen_xori_tl(dst, dst, 0x1); 719 } 720 721 // !(C | Z) 722 static void gen_op_eval_bgu(TCGv dst, TCGv_i32 src) 723 { 724 gen_op_eval_bleu(dst, src); 725 tcg_gen_xori_tl(dst, dst, 0x1); 726 } 727 728 // !C 729 static void gen_op_eval_bcc(TCGv dst, TCGv_i32 src) 730 { 731 gen_mov_reg_C(dst, src); 732 tcg_gen_xori_tl(dst, dst, 0x1); 733 } 734 735 // !N 736 static void gen_op_eval_bpos(TCGv dst, TCGv_i32 src) 737 { 738 gen_mov_reg_N(dst, src); 739 tcg_gen_xori_tl(dst, dst, 0x1); 740 } 741 742 // !V 743 static void gen_op_eval_bvc(TCGv dst, TCGv_i32 src) 744 { 745 gen_mov_reg_V(dst, src); 746 tcg_gen_xori_tl(dst, dst, 0x1); 747 } 748 749 /* 750 FPSR bit field FCC1 | FCC0: 751 0 = 752 1 < 753 2 > 754 3 unordered 755 */ 756 static void gen_mov_reg_FCC0(TCGv reg, TCGv src, 757 unsigned int fcc_offset) 758 { 759 tcg_gen_shri_tl(reg, src, FSR_FCC0_SHIFT + fcc_offset); 760 tcg_gen_andi_tl(reg, reg, 0x1); 761 } 762 763 static void gen_mov_reg_FCC1(TCGv reg, TCGv src, unsigned int fcc_offset) 764 { 765 tcg_gen_shri_tl(reg, src, FSR_FCC1_SHIFT + fcc_offset); 766 tcg_gen_andi_tl(reg, reg, 0x1); 767 } 768 769 // !0: FCC0 | FCC1 770 static void gen_op_eval_fbne(TCGv dst, TCGv src, unsigned int fcc_offset) 771 { 772 TCGv t0 = tcg_temp_new(); 773 gen_mov_reg_FCC0(dst, src, fcc_offset); 774 gen_mov_reg_FCC1(t0, src, fcc_offset); 775 tcg_gen_or_tl(dst, dst, t0); 776 } 777 778 // 1 or 2: FCC0 ^ FCC1 779 static void gen_op_eval_fblg(TCGv dst, TCGv src, unsigned int fcc_offset) 780 { 781 TCGv t0 = tcg_temp_new(); 782 gen_mov_reg_FCC0(dst, src, fcc_offset); 783 gen_mov_reg_FCC1(t0, src, fcc_offset); 784 tcg_gen_xor_tl(dst, dst, t0); 785 } 786 787 // 1 or 3: FCC0 788 static void gen_op_eval_fbul(TCGv dst, TCGv src, unsigned int fcc_offset) 789 { 790 gen_mov_reg_FCC0(dst, src, fcc_offset); 791 } 792 793 // 1: FCC0 & !FCC1 794 static void gen_op_eval_fbl(TCGv dst, TCGv src, unsigned int fcc_offset) 795 { 796 TCGv t0 = tcg_temp_new(); 797 gen_mov_reg_FCC0(dst, src, fcc_offset); 798 gen_mov_reg_FCC1(t0, src, fcc_offset); 799 tcg_gen_andc_tl(dst, dst, t0); 800 } 801 802 // 2 or 3: FCC1 803 static void gen_op_eval_fbug(TCGv dst, TCGv src, unsigned int fcc_offset) 804 { 805 gen_mov_reg_FCC1(dst, src, fcc_offset); 806 } 807 808 // 2: !FCC0 & FCC1 809 static void gen_op_eval_fbg(TCGv dst, TCGv src, unsigned int fcc_offset) 810 { 811 TCGv t0 = tcg_temp_new(); 812 gen_mov_reg_FCC0(dst, src, fcc_offset); 813 gen_mov_reg_FCC1(t0, src, fcc_offset); 814 tcg_gen_andc_tl(dst, t0, dst); 815 } 816 817 // 3: FCC0 & FCC1 818 static void gen_op_eval_fbu(TCGv dst, TCGv src, unsigned int fcc_offset) 819 { 820 TCGv t0 = tcg_temp_new(); 821 gen_mov_reg_FCC0(dst, src, fcc_offset); 822 gen_mov_reg_FCC1(t0, src, fcc_offset); 823 tcg_gen_and_tl(dst, dst, t0); 824 } 825 826 // 0: !(FCC0 | FCC1) 827 static void gen_op_eval_fbe(TCGv dst, TCGv src, unsigned int fcc_offset) 828 { 829 TCGv t0 = tcg_temp_new(); 830 gen_mov_reg_FCC0(dst, src, fcc_offset); 831 gen_mov_reg_FCC1(t0, src, fcc_offset); 832 tcg_gen_or_tl(dst, dst, t0); 833 tcg_gen_xori_tl(dst, dst, 0x1); 834 } 835 836 // 0 or 3: !(FCC0 ^ FCC1) 837 static void gen_op_eval_fbue(TCGv dst, TCGv src, unsigned int fcc_offset) 838 { 839 TCGv t0 = tcg_temp_new(); 840 gen_mov_reg_FCC0(dst, src, fcc_offset); 841 gen_mov_reg_FCC1(t0, src, fcc_offset); 842 tcg_gen_xor_tl(dst, dst, t0); 843 tcg_gen_xori_tl(dst, dst, 0x1); 844 } 845 846 // 0 or 2: !FCC0 847 static void gen_op_eval_fbge(TCGv dst, TCGv src, unsigned int fcc_offset) 848 { 849 gen_mov_reg_FCC0(dst, src, fcc_offset); 850 tcg_gen_xori_tl(dst, dst, 0x1); 851 } 852 853 // !1: !(FCC0 & !FCC1) 854 static void gen_op_eval_fbuge(TCGv dst, TCGv src, unsigned int fcc_offset) 855 { 856 TCGv t0 = tcg_temp_new(); 857 gen_mov_reg_FCC0(dst, src, fcc_offset); 858 gen_mov_reg_FCC1(t0, src, fcc_offset); 859 tcg_gen_andc_tl(dst, dst, t0); 860 tcg_gen_xori_tl(dst, dst, 0x1); 861 } 862 863 // 0 or 1: !FCC1 864 static void gen_op_eval_fble(TCGv dst, TCGv src, unsigned int fcc_offset) 865 { 866 gen_mov_reg_FCC1(dst, src, fcc_offset); 867 tcg_gen_xori_tl(dst, dst, 0x1); 868 } 869 870 // !2: !(!FCC0 & FCC1) 871 static void gen_op_eval_fbule(TCGv dst, TCGv src, unsigned int fcc_offset) 872 { 873 TCGv t0 = tcg_temp_new(); 874 gen_mov_reg_FCC0(dst, src, fcc_offset); 875 gen_mov_reg_FCC1(t0, src, fcc_offset); 876 tcg_gen_andc_tl(dst, t0, dst); 877 tcg_gen_xori_tl(dst, dst, 0x1); 878 } 879 880 // !3: !(FCC0 & FCC1) 881 static void gen_op_eval_fbo(TCGv dst, TCGv src, unsigned int fcc_offset) 882 { 883 TCGv t0 = tcg_temp_new(); 884 gen_mov_reg_FCC0(dst, src, fcc_offset); 885 gen_mov_reg_FCC1(t0, src, fcc_offset); 886 tcg_gen_and_tl(dst, dst, t0); 887 tcg_gen_xori_tl(dst, dst, 0x1); 888 } 889 890 static void gen_branch2(DisasContext *dc, target_ulong pc1, 891 target_ulong pc2, TCGv r_cond) 892 { 893 TCGLabel *l1 = gen_new_label(); 894 895 tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1); 896 897 gen_goto_tb(dc, 0, pc1, pc1 + 4); 898 899 gen_set_label(l1); 900 gen_goto_tb(dc, 1, pc2, pc2 + 4); 901 } 902 903 static void gen_generic_branch(DisasContext *dc) 904 { 905 TCGv npc0 = tcg_constant_tl(dc->jump_pc[0]); 906 TCGv npc1 = tcg_constant_tl(dc->jump_pc[1]); 907 TCGv zero = tcg_constant_tl(0); 908 909 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, zero, npc0, npc1); 910 } 911 912 /* call this function before using the condition register as it may 913 have been set for a jump */ 914 static void flush_cond(DisasContext *dc) 915 { 916 if (dc->npc == JUMP_PC) { 917 gen_generic_branch(dc); 918 dc->npc = DYNAMIC_PC_LOOKUP; 919 } 920 } 921 922 static void save_npc(DisasContext *dc) 923 { 924 if (dc->npc & 3) { 925 switch (dc->npc) { 926 case JUMP_PC: 927 gen_generic_branch(dc); 928 dc->npc = DYNAMIC_PC_LOOKUP; 929 break; 930 case DYNAMIC_PC: 931 case DYNAMIC_PC_LOOKUP: 932 break; 933 default: 934 g_assert_not_reached(); 935 } 936 } else { 937 tcg_gen_movi_tl(cpu_npc, dc->npc); 938 } 939 } 940 941 static void update_psr(DisasContext *dc) 942 { 943 if (dc->cc_op != CC_OP_FLAGS) { 944 dc->cc_op = CC_OP_FLAGS; 945 gen_helper_compute_psr(tcg_env); 946 } 947 } 948 949 static void save_state(DisasContext *dc) 950 { 951 tcg_gen_movi_tl(cpu_pc, dc->pc); 952 save_npc(dc); 953 } 954 955 static void gen_exception(DisasContext *dc, int which) 956 { 957 save_state(dc); 958 gen_helper_raise_exception(tcg_env, tcg_constant_i32(which)); 959 dc->base.is_jmp = DISAS_NORETURN; 960 } 961 962 static TCGLabel *delay_exceptionv(DisasContext *dc, TCGv_i32 excp) 963 { 964 DisasDelayException *e = g_new0(DisasDelayException, 1); 965 966 e->next = dc->delay_excp_list; 967 dc->delay_excp_list = e; 968 969 e->lab = gen_new_label(); 970 e->excp = excp; 971 e->pc = dc->pc; 972 /* Caller must have used flush_cond before branch. */ 973 assert(e->npc != JUMP_PC); 974 e->npc = dc->npc; 975 976 return e->lab; 977 } 978 979 static TCGLabel *delay_exception(DisasContext *dc, int excp) 980 { 981 return delay_exceptionv(dc, tcg_constant_i32(excp)); 982 } 983 984 static void gen_check_align(DisasContext *dc, TCGv addr, int mask) 985 { 986 TCGv t = tcg_temp_new(); 987 TCGLabel *lab; 988 989 tcg_gen_andi_tl(t, addr, mask); 990 991 flush_cond(dc); 992 lab = delay_exception(dc, TT_UNALIGNED); 993 tcg_gen_brcondi_tl(TCG_COND_NE, t, 0, lab); 994 } 995 996 static void gen_mov_pc_npc(DisasContext *dc) 997 { 998 if (dc->npc & 3) { 999 switch (dc->npc) { 1000 case JUMP_PC: 1001 gen_generic_branch(dc); 1002 tcg_gen_mov_tl(cpu_pc, cpu_npc); 1003 dc->pc = DYNAMIC_PC_LOOKUP; 1004 break; 1005 case DYNAMIC_PC: 1006 case DYNAMIC_PC_LOOKUP: 1007 tcg_gen_mov_tl(cpu_pc, cpu_npc); 1008 dc->pc = dc->npc; 1009 break; 1010 default: 1011 g_assert_not_reached(); 1012 } 1013 } else { 1014 dc->pc = dc->npc; 1015 } 1016 } 1017 1018 static void gen_op_next_insn(void) 1019 { 1020 tcg_gen_mov_tl(cpu_pc, cpu_npc); 1021 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4); 1022 } 1023 1024 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond, 1025 DisasContext *dc) 1026 { 1027 static int subcc_cond[16] = { 1028 TCG_COND_NEVER, 1029 TCG_COND_EQ, 1030 TCG_COND_LE, 1031 TCG_COND_LT, 1032 TCG_COND_LEU, 1033 TCG_COND_LTU, 1034 -1, /* neg */ 1035 -1, /* overflow */ 1036 TCG_COND_ALWAYS, 1037 TCG_COND_NE, 1038 TCG_COND_GT, 1039 TCG_COND_GE, 1040 TCG_COND_GTU, 1041 TCG_COND_GEU, 1042 -1, /* pos */ 1043 -1, /* no overflow */ 1044 }; 1045 1046 static int logic_cond[16] = { 1047 TCG_COND_NEVER, 1048 TCG_COND_EQ, /* eq: Z */ 1049 TCG_COND_LE, /* le: Z | (N ^ V) -> Z | N */ 1050 TCG_COND_LT, /* lt: N ^ V -> N */ 1051 TCG_COND_EQ, /* leu: C | Z -> Z */ 1052 TCG_COND_NEVER, /* ltu: C -> 0 */ 1053 TCG_COND_LT, /* neg: N */ 1054 TCG_COND_NEVER, /* vs: V -> 0 */ 1055 TCG_COND_ALWAYS, 1056 TCG_COND_NE, /* ne: !Z */ 1057 TCG_COND_GT, /* gt: !(Z | (N ^ V)) -> !(Z | N) */ 1058 TCG_COND_GE, /* ge: !(N ^ V) -> !N */ 1059 TCG_COND_NE, /* gtu: !(C | Z) -> !Z */ 1060 TCG_COND_ALWAYS, /* geu: !C -> 1 */ 1061 TCG_COND_GE, /* pos: !N */ 1062 TCG_COND_ALWAYS, /* vc: !V -> 1 */ 1063 }; 1064 1065 TCGv_i32 r_src; 1066 TCGv r_dst; 1067 1068 #ifdef TARGET_SPARC64 1069 if (xcc) { 1070 r_src = cpu_xcc; 1071 } else { 1072 r_src = cpu_psr; 1073 } 1074 #else 1075 r_src = cpu_psr; 1076 #endif 1077 1078 switch (dc->cc_op) { 1079 case CC_OP_LOGIC: 1080 cmp->cond = logic_cond[cond]; 1081 do_compare_dst_0: 1082 cmp->is_bool = false; 1083 cmp->c2 = tcg_constant_tl(0); 1084 #ifdef TARGET_SPARC64 1085 if (!xcc) { 1086 cmp->c1 = tcg_temp_new(); 1087 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_dst); 1088 break; 1089 } 1090 #endif 1091 cmp->c1 = cpu_cc_dst; 1092 break; 1093 1094 case CC_OP_SUB: 1095 switch (cond) { 1096 case 6: /* neg */ 1097 case 14: /* pos */ 1098 cmp->cond = (cond == 6 ? TCG_COND_LT : TCG_COND_GE); 1099 goto do_compare_dst_0; 1100 1101 case 7: /* overflow */ 1102 case 15: /* !overflow */ 1103 goto do_dynamic; 1104 1105 default: 1106 cmp->cond = subcc_cond[cond]; 1107 cmp->is_bool = false; 1108 #ifdef TARGET_SPARC64 1109 if (!xcc) { 1110 /* Note that sign-extension works for unsigned compares as 1111 long as both operands are sign-extended. */ 1112 cmp->c1 = tcg_temp_new(); 1113 cmp->c2 = tcg_temp_new(); 1114 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_src); 1115 tcg_gen_ext32s_tl(cmp->c2, cpu_cc_src2); 1116 break; 1117 } 1118 #endif 1119 cmp->c1 = cpu_cc_src; 1120 cmp->c2 = cpu_cc_src2; 1121 break; 1122 } 1123 break; 1124 1125 default: 1126 do_dynamic: 1127 gen_helper_compute_psr(tcg_env); 1128 dc->cc_op = CC_OP_FLAGS; 1129 /* FALLTHRU */ 1130 1131 case CC_OP_FLAGS: 1132 /* We're going to generate a boolean result. */ 1133 cmp->cond = TCG_COND_NE; 1134 cmp->is_bool = true; 1135 cmp->c1 = r_dst = tcg_temp_new(); 1136 cmp->c2 = tcg_constant_tl(0); 1137 1138 switch (cond) { 1139 case 0x0: 1140 gen_op_eval_bn(r_dst); 1141 break; 1142 case 0x1: 1143 gen_op_eval_be(r_dst, r_src); 1144 break; 1145 case 0x2: 1146 gen_op_eval_ble(r_dst, r_src); 1147 break; 1148 case 0x3: 1149 gen_op_eval_bl(r_dst, r_src); 1150 break; 1151 case 0x4: 1152 gen_op_eval_bleu(r_dst, r_src); 1153 break; 1154 case 0x5: 1155 gen_op_eval_bcs(r_dst, r_src); 1156 break; 1157 case 0x6: 1158 gen_op_eval_bneg(r_dst, r_src); 1159 break; 1160 case 0x7: 1161 gen_op_eval_bvs(r_dst, r_src); 1162 break; 1163 case 0x8: 1164 gen_op_eval_ba(r_dst); 1165 break; 1166 case 0x9: 1167 gen_op_eval_bne(r_dst, r_src); 1168 break; 1169 case 0xa: 1170 gen_op_eval_bg(r_dst, r_src); 1171 break; 1172 case 0xb: 1173 gen_op_eval_bge(r_dst, r_src); 1174 break; 1175 case 0xc: 1176 gen_op_eval_bgu(r_dst, r_src); 1177 break; 1178 case 0xd: 1179 gen_op_eval_bcc(r_dst, r_src); 1180 break; 1181 case 0xe: 1182 gen_op_eval_bpos(r_dst, r_src); 1183 break; 1184 case 0xf: 1185 gen_op_eval_bvc(r_dst, r_src); 1186 break; 1187 } 1188 break; 1189 } 1190 } 1191 1192 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond) 1193 { 1194 unsigned int offset; 1195 TCGv r_dst; 1196 1197 /* For now we still generate a straight boolean result. */ 1198 cmp->cond = TCG_COND_NE; 1199 cmp->is_bool = true; 1200 cmp->c1 = r_dst = tcg_temp_new(); 1201 cmp->c2 = tcg_constant_tl(0); 1202 1203 switch (cc) { 1204 default: 1205 case 0x0: 1206 offset = 0; 1207 break; 1208 case 0x1: 1209 offset = 32 - 10; 1210 break; 1211 case 0x2: 1212 offset = 34 - 10; 1213 break; 1214 case 0x3: 1215 offset = 36 - 10; 1216 break; 1217 } 1218 1219 switch (cond) { 1220 case 0x0: 1221 gen_op_eval_bn(r_dst); 1222 break; 1223 case 0x1: 1224 gen_op_eval_fbne(r_dst, cpu_fsr, offset); 1225 break; 1226 case 0x2: 1227 gen_op_eval_fblg(r_dst, cpu_fsr, offset); 1228 break; 1229 case 0x3: 1230 gen_op_eval_fbul(r_dst, cpu_fsr, offset); 1231 break; 1232 case 0x4: 1233 gen_op_eval_fbl(r_dst, cpu_fsr, offset); 1234 break; 1235 case 0x5: 1236 gen_op_eval_fbug(r_dst, cpu_fsr, offset); 1237 break; 1238 case 0x6: 1239 gen_op_eval_fbg(r_dst, cpu_fsr, offset); 1240 break; 1241 case 0x7: 1242 gen_op_eval_fbu(r_dst, cpu_fsr, offset); 1243 break; 1244 case 0x8: 1245 gen_op_eval_ba(r_dst); 1246 break; 1247 case 0x9: 1248 gen_op_eval_fbe(r_dst, cpu_fsr, offset); 1249 break; 1250 case 0xa: 1251 gen_op_eval_fbue(r_dst, cpu_fsr, offset); 1252 break; 1253 case 0xb: 1254 gen_op_eval_fbge(r_dst, cpu_fsr, offset); 1255 break; 1256 case 0xc: 1257 gen_op_eval_fbuge(r_dst, cpu_fsr, offset); 1258 break; 1259 case 0xd: 1260 gen_op_eval_fble(r_dst, cpu_fsr, offset); 1261 break; 1262 case 0xe: 1263 gen_op_eval_fbule(r_dst, cpu_fsr, offset); 1264 break; 1265 case 0xf: 1266 gen_op_eval_fbo(r_dst, cpu_fsr, offset); 1267 break; 1268 } 1269 } 1270 1271 // Inverted logic 1272 static const TCGCond gen_tcg_cond_reg[8] = { 1273 TCG_COND_NEVER, /* reserved */ 1274 TCG_COND_NE, 1275 TCG_COND_GT, 1276 TCG_COND_GE, 1277 TCG_COND_NEVER, /* reserved */ 1278 TCG_COND_EQ, 1279 TCG_COND_LE, 1280 TCG_COND_LT, 1281 }; 1282 1283 static void gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src) 1284 { 1285 cmp->cond = tcg_invert_cond(gen_tcg_cond_reg[cond]); 1286 cmp->is_bool = false; 1287 cmp->c1 = r_src; 1288 cmp->c2 = tcg_constant_tl(0); 1289 } 1290 1291 #ifdef TARGET_SPARC64 1292 static void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2) 1293 { 1294 switch (fccno) { 1295 case 0: 1296 gen_helper_fcmps(cpu_fsr, tcg_env, r_rs1, r_rs2); 1297 break; 1298 case 1: 1299 gen_helper_fcmps_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2); 1300 break; 1301 case 2: 1302 gen_helper_fcmps_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2); 1303 break; 1304 case 3: 1305 gen_helper_fcmps_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2); 1306 break; 1307 } 1308 } 1309 1310 static void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2) 1311 { 1312 switch (fccno) { 1313 case 0: 1314 gen_helper_fcmpd(cpu_fsr, tcg_env, r_rs1, r_rs2); 1315 break; 1316 case 1: 1317 gen_helper_fcmpd_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2); 1318 break; 1319 case 2: 1320 gen_helper_fcmpd_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2); 1321 break; 1322 case 3: 1323 gen_helper_fcmpd_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2); 1324 break; 1325 } 1326 } 1327 1328 static void gen_op_fcmpq(int fccno) 1329 { 1330 switch (fccno) { 1331 case 0: 1332 gen_helper_fcmpq(cpu_fsr, tcg_env); 1333 break; 1334 case 1: 1335 gen_helper_fcmpq_fcc1(cpu_fsr, tcg_env); 1336 break; 1337 case 2: 1338 gen_helper_fcmpq_fcc2(cpu_fsr, tcg_env); 1339 break; 1340 case 3: 1341 gen_helper_fcmpq_fcc3(cpu_fsr, tcg_env); 1342 break; 1343 } 1344 } 1345 1346 static void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2) 1347 { 1348 switch (fccno) { 1349 case 0: 1350 gen_helper_fcmpes(cpu_fsr, tcg_env, r_rs1, r_rs2); 1351 break; 1352 case 1: 1353 gen_helper_fcmpes_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2); 1354 break; 1355 case 2: 1356 gen_helper_fcmpes_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2); 1357 break; 1358 case 3: 1359 gen_helper_fcmpes_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2); 1360 break; 1361 } 1362 } 1363 1364 static void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2) 1365 { 1366 switch (fccno) { 1367 case 0: 1368 gen_helper_fcmped(cpu_fsr, tcg_env, r_rs1, r_rs2); 1369 break; 1370 case 1: 1371 gen_helper_fcmped_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2); 1372 break; 1373 case 2: 1374 gen_helper_fcmped_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2); 1375 break; 1376 case 3: 1377 gen_helper_fcmped_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2); 1378 break; 1379 } 1380 } 1381 1382 static void gen_op_fcmpeq(int fccno) 1383 { 1384 switch (fccno) { 1385 case 0: 1386 gen_helper_fcmpeq(cpu_fsr, tcg_env); 1387 break; 1388 case 1: 1389 gen_helper_fcmpeq_fcc1(cpu_fsr, tcg_env); 1390 break; 1391 case 2: 1392 gen_helper_fcmpeq_fcc2(cpu_fsr, tcg_env); 1393 break; 1394 case 3: 1395 gen_helper_fcmpeq_fcc3(cpu_fsr, tcg_env); 1396 break; 1397 } 1398 } 1399 1400 #else 1401 1402 static void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2) 1403 { 1404 gen_helper_fcmps(cpu_fsr, tcg_env, r_rs1, r_rs2); 1405 } 1406 1407 static void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2) 1408 { 1409 gen_helper_fcmpd(cpu_fsr, tcg_env, r_rs1, r_rs2); 1410 } 1411 1412 static void gen_op_fcmpq(int fccno) 1413 { 1414 gen_helper_fcmpq(cpu_fsr, tcg_env); 1415 } 1416 1417 static void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2) 1418 { 1419 gen_helper_fcmpes(cpu_fsr, tcg_env, r_rs1, r_rs2); 1420 } 1421 1422 static void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2) 1423 { 1424 gen_helper_fcmped(cpu_fsr, tcg_env, r_rs1, r_rs2); 1425 } 1426 1427 static void gen_op_fcmpeq(int fccno) 1428 { 1429 gen_helper_fcmpeq(cpu_fsr, tcg_env); 1430 } 1431 #endif 1432 1433 static void gen_op_fpexception_im(DisasContext *dc, int fsr_flags) 1434 { 1435 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_NMASK); 1436 tcg_gen_ori_tl(cpu_fsr, cpu_fsr, fsr_flags); 1437 gen_exception(dc, TT_FP_EXCP); 1438 } 1439 1440 static int gen_trap_ifnofpu(DisasContext *dc) 1441 { 1442 #if !defined(CONFIG_USER_ONLY) 1443 if (!dc->fpu_enabled) { 1444 gen_exception(dc, TT_NFPU_INSN); 1445 return 1; 1446 } 1447 #endif 1448 return 0; 1449 } 1450 1451 static void gen_op_clear_ieee_excp_and_FTT(void) 1452 { 1453 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK); 1454 } 1455 1456 static void gen_fop_FF(DisasContext *dc, int rd, int rs, 1457 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32)) 1458 { 1459 TCGv_i32 dst, src; 1460 1461 src = gen_load_fpr_F(dc, rs); 1462 dst = gen_dest_fpr_F(dc); 1463 1464 gen(dst, tcg_env, src); 1465 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); 1466 1467 gen_store_fpr_F(dc, rd, dst); 1468 } 1469 1470 static void gen_ne_fop_FF(DisasContext *dc, int rd, int rs, 1471 void (*gen)(TCGv_i32, TCGv_i32)) 1472 { 1473 TCGv_i32 dst, src; 1474 1475 src = gen_load_fpr_F(dc, rs); 1476 dst = gen_dest_fpr_F(dc); 1477 1478 gen(dst, src); 1479 1480 gen_store_fpr_F(dc, rd, dst); 1481 } 1482 1483 static void gen_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2, 1484 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32)) 1485 { 1486 TCGv_i32 dst, src1, src2; 1487 1488 src1 = gen_load_fpr_F(dc, rs1); 1489 src2 = gen_load_fpr_F(dc, rs2); 1490 dst = gen_dest_fpr_F(dc); 1491 1492 gen(dst, tcg_env, src1, src2); 1493 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); 1494 1495 gen_store_fpr_F(dc, rd, dst); 1496 } 1497 1498 #ifdef TARGET_SPARC64 1499 static void gen_ne_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2, 1500 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32)) 1501 { 1502 TCGv_i32 dst, src1, src2; 1503 1504 src1 = gen_load_fpr_F(dc, rs1); 1505 src2 = gen_load_fpr_F(dc, rs2); 1506 dst = gen_dest_fpr_F(dc); 1507 1508 gen(dst, src1, src2); 1509 1510 gen_store_fpr_F(dc, rd, dst); 1511 } 1512 #endif 1513 1514 static void gen_fop_DD(DisasContext *dc, int rd, int rs, 1515 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64)) 1516 { 1517 TCGv_i64 dst, src; 1518 1519 src = gen_load_fpr_D(dc, rs); 1520 dst = gen_dest_fpr_D(dc, rd); 1521 1522 gen(dst, tcg_env, src); 1523 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); 1524 1525 gen_store_fpr_D(dc, rd, dst); 1526 } 1527 1528 #ifdef TARGET_SPARC64 1529 static void gen_ne_fop_DD(DisasContext *dc, int rd, int rs, 1530 void (*gen)(TCGv_i64, TCGv_i64)) 1531 { 1532 TCGv_i64 dst, src; 1533 1534 src = gen_load_fpr_D(dc, rs); 1535 dst = gen_dest_fpr_D(dc, rd); 1536 1537 gen(dst, src); 1538 1539 gen_store_fpr_D(dc, rd, dst); 1540 } 1541 #endif 1542 1543 static void gen_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2, 1544 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64)) 1545 { 1546 TCGv_i64 dst, src1, src2; 1547 1548 src1 = gen_load_fpr_D(dc, rs1); 1549 src2 = gen_load_fpr_D(dc, rs2); 1550 dst = gen_dest_fpr_D(dc, rd); 1551 1552 gen(dst, tcg_env, src1, src2); 1553 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); 1554 1555 gen_store_fpr_D(dc, rd, dst); 1556 } 1557 1558 #ifdef TARGET_SPARC64 1559 static void gen_ne_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2, 1560 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64)) 1561 { 1562 TCGv_i64 dst, src1, src2; 1563 1564 src1 = gen_load_fpr_D(dc, rs1); 1565 src2 = gen_load_fpr_D(dc, rs2); 1566 dst = gen_dest_fpr_D(dc, rd); 1567 1568 gen(dst, src1, src2); 1569 1570 gen_store_fpr_D(dc, rd, dst); 1571 } 1572 1573 static void gen_gsr_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2, 1574 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64)) 1575 { 1576 TCGv_i64 dst, src1, src2; 1577 1578 src1 = gen_load_fpr_D(dc, rs1); 1579 src2 = gen_load_fpr_D(dc, rs2); 1580 dst = gen_dest_fpr_D(dc, rd); 1581 1582 gen(dst, cpu_gsr, src1, src2); 1583 1584 gen_store_fpr_D(dc, rd, dst); 1585 } 1586 1587 static void gen_ne_fop_DDDD(DisasContext *dc, int rd, int rs1, int rs2, 1588 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64)) 1589 { 1590 TCGv_i64 dst, src0, src1, src2; 1591 1592 src1 = gen_load_fpr_D(dc, rs1); 1593 src2 = gen_load_fpr_D(dc, rs2); 1594 src0 = gen_load_fpr_D(dc, rd); 1595 dst = gen_dest_fpr_D(dc, rd); 1596 1597 gen(dst, src0, src1, src2); 1598 1599 gen_store_fpr_D(dc, rd, dst); 1600 } 1601 #endif 1602 1603 static void gen_fop_QQ(DisasContext *dc, int rd, int rs, 1604 void (*gen)(TCGv_ptr)) 1605 { 1606 gen_op_load_fpr_QT1(QFPREG(rs)); 1607 1608 gen(tcg_env); 1609 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); 1610 1611 gen_op_store_QT0_fpr(QFPREG(rd)); 1612 gen_update_fprs_dirty(dc, QFPREG(rd)); 1613 } 1614 1615 #ifdef TARGET_SPARC64 1616 static void gen_ne_fop_QQ(DisasContext *dc, int rd, int rs, 1617 void (*gen)(TCGv_ptr)) 1618 { 1619 gen_op_load_fpr_QT1(QFPREG(rs)); 1620 1621 gen(tcg_env); 1622 1623 gen_op_store_QT0_fpr(QFPREG(rd)); 1624 gen_update_fprs_dirty(dc, QFPREG(rd)); 1625 } 1626 #endif 1627 1628 static void gen_fop_QQQ(DisasContext *dc, int rd, int rs1, int rs2, 1629 void (*gen)(TCGv_ptr)) 1630 { 1631 gen_op_load_fpr_QT0(QFPREG(rs1)); 1632 gen_op_load_fpr_QT1(QFPREG(rs2)); 1633 1634 gen(tcg_env); 1635 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); 1636 1637 gen_op_store_QT0_fpr(QFPREG(rd)); 1638 gen_update_fprs_dirty(dc, QFPREG(rd)); 1639 } 1640 1641 static void gen_fop_DFF(DisasContext *dc, int rd, int rs1, int rs2, 1642 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32, TCGv_i32)) 1643 { 1644 TCGv_i64 dst; 1645 TCGv_i32 src1, src2; 1646 1647 src1 = gen_load_fpr_F(dc, rs1); 1648 src2 = gen_load_fpr_F(dc, rs2); 1649 dst = gen_dest_fpr_D(dc, rd); 1650 1651 gen(dst, tcg_env, src1, src2); 1652 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); 1653 1654 gen_store_fpr_D(dc, rd, dst); 1655 } 1656 1657 static void gen_fop_QDD(DisasContext *dc, int rd, int rs1, int rs2, 1658 void (*gen)(TCGv_ptr, TCGv_i64, TCGv_i64)) 1659 { 1660 TCGv_i64 src1, src2; 1661 1662 src1 = gen_load_fpr_D(dc, rs1); 1663 src2 = gen_load_fpr_D(dc, rs2); 1664 1665 gen(tcg_env, src1, src2); 1666 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); 1667 1668 gen_op_store_QT0_fpr(QFPREG(rd)); 1669 gen_update_fprs_dirty(dc, QFPREG(rd)); 1670 } 1671 1672 #ifdef TARGET_SPARC64 1673 static void gen_fop_DF(DisasContext *dc, int rd, int rs, 1674 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32)) 1675 { 1676 TCGv_i64 dst; 1677 TCGv_i32 src; 1678 1679 src = gen_load_fpr_F(dc, rs); 1680 dst = gen_dest_fpr_D(dc, rd); 1681 1682 gen(dst, tcg_env, src); 1683 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); 1684 1685 gen_store_fpr_D(dc, rd, dst); 1686 } 1687 #endif 1688 1689 static void gen_ne_fop_DF(DisasContext *dc, int rd, int rs, 1690 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32)) 1691 { 1692 TCGv_i64 dst; 1693 TCGv_i32 src; 1694 1695 src = gen_load_fpr_F(dc, rs); 1696 dst = gen_dest_fpr_D(dc, rd); 1697 1698 gen(dst, tcg_env, src); 1699 1700 gen_store_fpr_D(dc, rd, dst); 1701 } 1702 1703 static void gen_fop_FD(DisasContext *dc, int rd, int rs, 1704 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i64)) 1705 { 1706 TCGv_i32 dst; 1707 TCGv_i64 src; 1708 1709 src = gen_load_fpr_D(dc, rs); 1710 dst = gen_dest_fpr_F(dc); 1711 1712 gen(dst, tcg_env, src); 1713 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); 1714 1715 gen_store_fpr_F(dc, rd, dst); 1716 } 1717 1718 static void gen_fop_FQ(DisasContext *dc, int rd, int rs, 1719 void (*gen)(TCGv_i32, TCGv_ptr)) 1720 { 1721 TCGv_i32 dst; 1722 1723 gen_op_load_fpr_QT1(QFPREG(rs)); 1724 dst = gen_dest_fpr_F(dc); 1725 1726 gen(dst, tcg_env); 1727 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); 1728 1729 gen_store_fpr_F(dc, rd, dst); 1730 } 1731 1732 static void gen_fop_DQ(DisasContext *dc, int rd, int rs, 1733 void (*gen)(TCGv_i64, TCGv_ptr)) 1734 { 1735 TCGv_i64 dst; 1736 1737 gen_op_load_fpr_QT1(QFPREG(rs)); 1738 dst = gen_dest_fpr_D(dc, rd); 1739 1740 gen(dst, tcg_env); 1741 gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env); 1742 1743 gen_store_fpr_D(dc, rd, dst); 1744 } 1745 1746 static void gen_ne_fop_QF(DisasContext *dc, int rd, int rs, 1747 void (*gen)(TCGv_ptr, TCGv_i32)) 1748 { 1749 TCGv_i32 src; 1750 1751 src = gen_load_fpr_F(dc, rs); 1752 1753 gen(tcg_env, src); 1754 1755 gen_op_store_QT0_fpr(QFPREG(rd)); 1756 gen_update_fprs_dirty(dc, QFPREG(rd)); 1757 } 1758 1759 static void gen_ne_fop_QD(DisasContext *dc, int rd, int rs, 1760 void (*gen)(TCGv_ptr, TCGv_i64)) 1761 { 1762 TCGv_i64 src; 1763 1764 src = gen_load_fpr_D(dc, rs); 1765 1766 gen(tcg_env, src); 1767 1768 gen_op_store_QT0_fpr(QFPREG(rd)); 1769 gen_update_fprs_dirty(dc, QFPREG(rd)); 1770 } 1771 1772 static void gen_swap(DisasContext *dc, TCGv dst, TCGv src, 1773 TCGv addr, int mmu_idx, MemOp memop) 1774 { 1775 gen_address_mask(dc, addr); 1776 tcg_gen_atomic_xchg_tl(dst, addr, src, mmu_idx, memop | MO_ALIGN); 1777 } 1778 1779 static void gen_ldstub(DisasContext *dc, TCGv dst, TCGv addr, int mmu_idx) 1780 { 1781 TCGv m1 = tcg_constant_tl(0xff); 1782 gen_address_mask(dc, addr); 1783 tcg_gen_atomic_xchg_tl(dst, addr, m1, mmu_idx, MO_UB); 1784 } 1785 1786 /* asi moves */ 1787 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64) 1788 typedef enum { 1789 GET_ASI_HELPER, 1790 GET_ASI_EXCP, 1791 GET_ASI_DIRECT, 1792 GET_ASI_DTWINX, 1793 GET_ASI_BLOCK, 1794 GET_ASI_SHORT, 1795 GET_ASI_BCOPY, 1796 GET_ASI_BFILL, 1797 } ASIType; 1798 1799 typedef struct { 1800 ASIType type; 1801 int asi; 1802 int mem_idx; 1803 MemOp memop; 1804 } DisasASI; 1805 1806 static DisasASI get_asi(DisasContext *dc, int insn, MemOp memop) 1807 { 1808 int asi = GET_FIELD(insn, 19, 26); 1809 ASIType type = GET_ASI_HELPER; 1810 int mem_idx = dc->mem_idx; 1811 1812 #ifndef TARGET_SPARC64 1813 /* Before v9, all asis are immediate and privileged. */ 1814 if (IS_IMM) { 1815 gen_exception(dc, TT_ILL_INSN); 1816 type = GET_ASI_EXCP; 1817 } else if (supervisor(dc) 1818 /* Note that LEON accepts ASI_USERDATA in user mode, for 1819 use with CASA. Also note that previous versions of 1820 QEMU allowed (and old versions of gcc emitted) ASI_P 1821 for LEON, which is incorrect. */ 1822 || (asi == ASI_USERDATA 1823 && (dc->def->features & CPU_FEATURE_CASA))) { 1824 switch (asi) { 1825 case ASI_USERDATA: /* User data access */ 1826 mem_idx = MMU_USER_IDX; 1827 type = GET_ASI_DIRECT; 1828 break; 1829 case ASI_KERNELDATA: /* Supervisor data access */ 1830 mem_idx = MMU_KERNEL_IDX; 1831 type = GET_ASI_DIRECT; 1832 break; 1833 case ASI_M_BYPASS: /* MMU passthrough */ 1834 case ASI_LEON_BYPASS: /* LEON MMU passthrough */ 1835 mem_idx = MMU_PHYS_IDX; 1836 type = GET_ASI_DIRECT; 1837 break; 1838 case ASI_M_BCOPY: /* Block copy, sta access */ 1839 mem_idx = MMU_KERNEL_IDX; 1840 type = GET_ASI_BCOPY; 1841 break; 1842 case ASI_M_BFILL: /* Block fill, stda access */ 1843 mem_idx = MMU_KERNEL_IDX; 1844 type = GET_ASI_BFILL; 1845 break; 1846 } 1847 1848 /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the 1849 * permissions check in get_physical_address(..). 1850 */ 1851 mem_idx = (dc->mem_idx == MMU_PHYS_IDX) ? MMU_PHYS_IDX : mem_idx; 1852 } else { 1853 gen_exception(dc, TT_PRIV_INSN); 1854 type = GET_ASI_EXCP; 1855 } 1856 #else 1857 if (IS_IMM) { 1858 asi = dc->asi; 1859 } 1860 /* With v9, all asis below 0x80 are privileged. */ 1861 /* ??? We ought to check cpu_has_hypervisor, but we didn't copy 1862 down that bit into DisasContext. For the moment that's ok, 1863 since the direct implementations below doesn't have any ASIs 1864 in the restricted [0x30, 0x7f] range, and the check will be 1865 done properly in the helper. */ 1866 if (!supervisor(dc) && asi < 0x80) { 1867 gen_exception(dc, TT_PRIV_ACT); 1868 type = GET_ASI_EXCP; 1869 } else { 1870 switch (asi) { 1871 case ASI_REAL: /* Bypass */ 1872 case ASI_REAL_IO: /* Bypass, non-cacheable */ 1873 case ASI_REAL_L: /* Bypass LE */ 1874 case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */ 1875 case ASI_TWINX_REAL: /* Real address, twinx */ 1876 case ASI_TWINX_REAL_L: /* Real address, twinx, LE */ 1877 case ASI_QUAD_LDD_PHYS: 1878 case ASI_QUAD_LDD_PHYS_L: 1879 mem_idx = MMU_PHYS_IDX; 1880 break; 1881 case ASI_N: /* Nucleus */ 1882 case ASI_NL: /* Nucleus LE */ 1883 case ASI_TWINX_N: 1884 case ASI_TWINX_NL: 1885 case ASI_NUCLEUS_QUAD_LDD: 1886 case ASI_NUCLEUS_QUAD_LDD_L: 1887 if (hypervisor(dc)) { 1888 mem_idx = MMU_PHYS_IDX; 1889 } else { 1890 mem_idx = MMU_NUCLEUS_IDX; 1891 } 1892 break; 1893 case ASI_AIUP: /* As if user primary */ 1894 case ASI_AIUPL: /* As if user primary LE */ 1895 case ASI_TWINX_AIUP: 1896 case ASI_TWINX_AIUP_L: 1897 case ASI_BLK_AIUP_4V: 1898 case ASI_BLK_AIUP_L_4V: 1899 case ASI_BLK_AIUP: 1900 case ASI_BLK_AIUPL: 1901 mem_idx = MMU_USER_IDX; 1902 break; 1903 case ASI_AIUS: /* As if user secondary */ 1904 case ASI_AIUSL: /* As if user secondary LE */ 1905 case ASI_TWINX_AIUS: 1906 case ASI_TWINX_AIUS_L: 1907 case ASI_BLK_AIUS_4V: 1908 case ASI_BLK_AIUS_L_4V: 1909 case ASI_BLK_AIUS: 1910 case ASI_BLK_AIUSL: 1911 mem_idx = MMU_USER_SECONDARY_IDX; 1912 break; 1913 case ASI_S: /* Secondary */ 1914 case ASI_SL: /* Secondary LE */ 1915 case ASI_TWINX_S: 1916 case ASI_TWINX_SL: 1917 case ASI_BLK_COMMIT_S: 1918 case ASI_BLK_S: 1919 case ASI_BLK_SL: 1920 case ASI_FL8_S: 1921 case ASI_FL8_SL: 1922 case ASI_FL16_S: 1923 case ASI_FL16_SL: 1924 if (mem_idx == MMU_USER_IDX) { 1925 mem_idx = MMU_USER_SECONDARY_IDX; 1926 } else if (mem_idx == MMU_KERNEL_IDX) { 1927 mem_idx = MMU_KERNEL_SECONDARY_IDX; 1928 } 1929 break; 1930 case ASI_P: /* Primary */ 1931 case ASI_PL: /* Primary LE */ 1932 case ASI_TWINX_P: 1933 case ASI_TWINX_PL: 1934 case ASI_BLK_COMMIT_P: 1935 case ASI_BLK_P: 1936 case ASI_BLK_PL: 1937 case ASI_FL8_P: 1938 case ASI_FL8_PL: 1939 case ASI_FL16_P: 1940 case ASI_FL16_PL: 1941 break; 1942 } 1943 switch (asi) { 1944 case ASI_REAL: 1945 case ASI_REAL_IO: 1946 case ASI_REAL_L: 1947 case ASI_REAL_IO_L: 1948 case ASI_N: 1949 case ASI_NL: 1950 case ASI_AIUP: 1951 case ASI_AIUPL: 1952 case ASI_AIUS: 1953 case ASI_AIUSL: 1954 case ASI_S: 1955 case ASI_SL: 1956 case ASI_P: 1957 case ASI_PL: 1958 type = GET_ASI_DIRECT; 1959 break; 1960 case ASI_TWINX_REAL: 1961 case ASI_TWINX_REAL_L: 1962 case ASI_TWINX_N: 1963 case ASI_TWINX_NL: 1964 case ASI_TWINX_AIUP: 1965 case ASI_TWINX_AIUP_L: 1966 case ASI_TWINX_AIUS: 1967 case ASI_TWINX_AIUS_L: 1968 case ASI_TWINX_P: 1969 case ASI_TWINX_PL: 1970 case ASI_TWINX_S: 1971 case ASI_TWINX_SL: 1972 case ASI_QUAD_LDD_PHYS: 1973 case ASI_QUAD_LDD_PHYS_L: 1974 case ASI_NUCLEUS_QUAD_LDD: 1975 case ASI_NUCLEUS_QUAD_LDD_L: 1976 type = GET_ASI_DTWINX; 1977 break; 1978 case ASI_BLK_COMMIT_P: 1979 case ASI_BLK_COMMIT_S: 1980 case ASI_BLK_AIUP_4V: 1981 case ASI_BLK_AIUP_L_4V: 1982 case ASI_BLK_AIUP: 1983 case ASI_BLK_AIUPL: 1984 case ASI_BLK_AIUS_4V: 1985 case ASI_BLK_AIUS_L_4V: 1986 case ASI_BLK_AIUS: 1987 case ASI_BLK_AIUSL: 1988 case ASI_BLK_S: 1989 case ASI_BLK_SL: 1990 case ASI_BLK_P: 1991 case ASI_BLK_PL: 1992 type = GET_ASI_BLOCK; 1993 break; 1994 case ASI_FL8_S: 1995 case ASI_FL8_SL: 1996 case ASI_FL8_P: 1997 case ASI_FL8_PL: 1998 memop = MO_UB; 1999 type = GET_ASI_SHORT; 2000 break; 2001 case ASI_FL16_S: 2002 case ASI_FL16_SL: 2003 case ASI_FL16_P: 2004 case ASI_FL16_PL: 2005 memop = MO_TEUW; 2006 type = GET_ASI_SHORT; 2007 break; 2008 } 2009 /* The little-endian asis all have bit 3 set. */ 2010 if (asi & 8) { 2011 memop ^= MO_BSWAP; 2012 } 2013 } 2014 #endif 2015 2016 return (DisasASI){ type, asi, mem_idx, memop }; 2017 } 2018 2019 static void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr, 2020 int insn, MemOp memop) 2021 { 2022 DisasASI da = get_asi(dc, insn, memop); 2023 2024 switch (da.type) { 2025 case GET_ASI_EXCP: 2026 break; 2027 case GET_ASI_DTWINX: /* Reserved for ldda. */ 2028 gen_exception(dc, TT_ILL_INSN); 2029 break; 2030 case GET_ASI_DIRECT: 2031 gen_address_mask(dc, addr); 2032 tcg_gen_qemu_ld_tl(dst, addr, da.mem_idx, da.memop | MO_ALIGN); 2033 break; 2034 default: 2035 { 2036 TCGv_i32 r_asi = tcg_constant_i32(da.asi); 2037 TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN); 2038 2039 save_state(dc); 2040 #ifdef TARGET_SPARC64 2041 gen_helper_ld_asi(dst, tcg_env, addr, r_asi, r_mop); 2042 #else 2043 { 2044 TCGv_i64 t64 = tcg_temp_new_i64(); 2045 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop); 2046 tcg_gen_trunc_i64_tl(dst, t64); 2047 } 2048 #endif 2049 } 2050 break; 2051 } 2052 } 2053 2054 static void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr, 2055 int insn, MemOp memop) 2056 { 2057 DisasASI da = get_asi(dc, insn, memop); 2058 2059 switch (da.type) { 2060 case GET_ASI_EXCP: 2061 break; 2062 case GET_ASI_DTWINX: /* Reserved for stda. */ 2063 #ifndef TARGET_SPARC64 2064 gen_exception(dc, TT_ILL_INSN); 2065 break; 2066 #else 2067 if (!(dc->def->features & CPU_FEATURE_HYPV)) { 2068 /* Pre OpenSPARC CPUs don't have these */ 2069 gen_exception(dc, TT_ILL_INSN); 2070 return; 2071 } 2072 /* in OpenSPARC T1+ CPUs TWINX ASIs in store instructions 2073 * are ST_BLKINIT_ ASIs */ 2074 #endif 2075 /* fall through */ 2076 case GET_ASI_DIRECT: 2077 gen_address_mask(dc, addr); 2078 tcg_gen_qemu_st_tl(src, addr, da.mem_idx, da.memop | MO_ALIGN); 2079 break; 2080 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) 2081 case GET_ASI_BCOPY: 2082 /* Copy 32 bytes from the address in SRC to ADDR. */ 2083 /* ??? The original qemu code suggests 4-byte alignment, dropping 2084 the low bits, but the only place I can see this used is in the 2085 Linux kernel with 32 byte alignment, which would make more sense 2086 as a cacheline-style operation. */ 2087 { 2088 TCGv saddr = tcg_temp_new(); 2089 TCGv daddr = tcg_temp_new(); 2090 TCGv four = tcg_constant_tl(4); 2091 TCGv_i32 tmp = tcg_temp_new_i32(); 2092 int i; 2093 2094 tcg_gen_andi_tl(saddr, src, -4); 2095 tcg_gen_andi_tl(daddr, addr, -4); 2096 for (i = 0; i < 32; i += 4) { 2097 /* Since the loads and stores are paired, allow the 2098 copy to happen in the host endianness. */ 2099 tcg_gen_qemu_ld_i32(tmp, saddr, da.mem_idx, MO_UL); 2100 tcg_gen_qemu_st_i32(tmp, daddr, da.mem_idx, MO_UL); 2101 tcg_gen_add_tl(saddr, saddr, four); 2102 tcg_gen_add_tl(daddr, daddr, four); 2103 } 2104 } 2105 break; 2106 #endif 2107 default: 2108 { 2109 TCGv_i32 r_asi = tcg_constant_i32(da.asi); 2110 TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN); 2111 2112 save_state(dc); 2113 #ifdef TARGET_SPARC64 2114 gen_helper_st_asi(tcg_env, addr, src, r_asi, r_mop); 2115 #else 2116 { 2117 TCGv_i64 t64 = tcg_temp_new_i64(); 2118 tcg_gen_extu_tl_i64(t64, src); 2119 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop); 2120 } 2121 #endif 2122 2123 /* A write to a TLB register may alter page maps. End the TB. */ 2124 dc->npc = DYNAMIC_PC; 2125 } 2126 break; 2127 } 2128 } 2129 2130 static void gen_swap_asi(DisasContext *dc, TCGv dst, TCGv src, 2131 TCGv addr, int insn) 2132 { 2133 DisasASI da = get_asi(dc, insn, MO_TEUL); 2134 2135 switch (da.type) { 2136 case GET_ASI_EXCP: 2137 break; 2138 case GET_ASI_DIRECT: 2139 gen_swap(dc, dst, src, addr, da.mem_idx, da.memop); 2140 break; 2141 default: 2142 /* ??? Should be DAE_invalid_asi. */ 2143 gen_exception(dc, TT_DATA_ACCESS); 2144 break; 2145 } 2146 } 2147 2148 static void gen_cas_asi(DisasContext *dc, TCGv addr, TCGv cmpv, 2149 int insn, int rd) 2150 { 2151 DisasASI da = get_asi(dc, insn, MO_TEUL); 2152 TCGv oldv; 2153 2154 switch (da.type) { 2155 case GET_ASI_EXCP: 2156 return; 2157 case GET_ASI_DIRECT: 2158 oldv = tcg_temp_new(); 2159 tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd), 2160 da.mem_idx, da.memop | MO_ALIGN); 2161 gen_store_gpr(dc, rd, oldv); 2162 break; 2163 default: 2164 /* ??? Should be DAE_invalid_asi. */ 2165 gen_exception(dc, TT_DATA_ACCESS); 2166 break; 2167 } 2168 } 2169 2170 static void gen_ldstub_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn) 2171 { 2172 DisasASI da = get_asi(dc, insn, MO_UB); 2173 2174 switch (da.type) { 2175 case GET_ASI_EXCP: 2176 break; 2177 case GET_ASI_DIRECT: 2178 gen_ldstub(dc, dst, addr, da.mem_idx); 2179 break; 2180 default: 2181 /* ??? In theory, this should be raise DAE_invalid_asi. 2182 But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1. */ 2183 if (tb_cflags(dc->base.tb) & CF_PARALLEL) { 2184 gen_helper_exit_atomic(tcg_env); 2185 } else { 2186 TCGv_i32 r_asi = tcg_constant_i32(da.asi); 2187 TCGv_i32 r_mop = tcg_constant_i32(MO_UB); 2188 TCGv_i64 s64, t64; 2189 2190 save_state(dc); 2191 t64 = tcg_temp_new_i64(); 2192 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop); 2193 2194 s64 = tcg_constant_i64(0xff); 2195 gen_helper_st_asi(tcg_env, addr, s64, r_asi, r_mop); 2196 2197 tcg_gen_trunc_i64_tl(dst, t64); 2198 2199 /* End the TB. */ 2200 dc->npc = DYNAMIC_PC; 2201 } 2202 break; 2203 } 2204 } 2205 #endif 2206 2207 #ifdef TARGET_SPARC64 2208 static void gen_ldf_asi(DisasContext *dc, TCGv addr, 2209 int insn, int size, int rd) 2210 { 2211 DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEUQ)); 2212 TCGv_i32 d32; 2213 TCGv_i64 d64; 2214 2215 switch (da.type) { 2216 case GET_ASI_EXCP: 2217 break; 2218 2219 case GET_ASI_DIRECT: 2220 gen_address_mask(dc, addr); 2221 switch (size) { 2222 case 4: 2223 d32 = gen_dest_fpr_F(dc); 2224 tcg_gen_qemu_ld_i32(d32, addr, da.mem_idx, da.memop | MO_ALIGN); 2225 gen_store_fpr_F(dc, rd, d32); 2226 break; 2227 case 8: 2228 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx, 2229 da.memop | MO_ALIGN_4); 2230 break; 2231 case 16: 2232 d64 = tcg_temp_new_i64(); 2233 tcg_gen_qemu_ld_i64(d64, addr, da.mem_idx, da.memop | MO_ALIGN_4); 2234 tcg_gen_addi_tl(addr, addr, 8); 2235 tcg_gen_qemu_ld_i64(cpu_fpr[rd/2+1], addr, da.mem_idx, 2236 da.memop | MO_ALIGN_4); 2237 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64); 2238 break; 2239 default: 2240 g_assert_not_reached(); 2241 } 2242 break; 2243 2244 case GET_ASI_BLOCK: 2245 /* Valid for lddfa on aligned registers only. */ 2246 if (size == 8 && (rd & 7) == 0) { 2247 MemOp memop; 2248 TCGv eight; 2249 int i; 2250 2251 gen_address_mask(dc, addr); 2252 2253 /* The first operation checks required alignment. */ 2254 memop = da.memop | MO_ALIGN_64; 2255 eight = tcg_constant_tl(8); 2256 for (i = 0; ; ++i) { 2257 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr, 2258 da.mem_idx, memop); 2259 if (i == 7) { 2260 break; 2261 } 2262 tcg_gen_add_tl(addr, addr, eight); 2263 memop = da.memop; 2264 } 2265 } else { 2266 gen_exception(dc, TT_ILL_INSN); 2267 } 2268 break; 2269 2270 case GET_ASI_SHORT: 2271 /* Valid for lddfa only. */ 2272 if (size == 8) { 2273 gen_address_mask(dc, addr); 2274 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx, 2275 da.memop | MO_ALIGN); 2276 } else { 2277 gen_exception(dc, TT_ILL_INSN); 2278 } 2279 break; 2280 2281 default: 2282 { 2283 TCGv_i32 r_asi = tcg_constant_i32(da.asi); 2284 TCGv_i32 r_mop = tcg_constant_i32(da.memop | MO_ALIGN); 2285 2286 save_state(dc); 2287 /* According to the table in the UA2011 manual, the only 2288 other asis that are valid for ldfa/lddfa/ldqfa are 2289 the NO_FAULT asis. We still need a helper for these, 2290 but we can just use the integer asi helper for them. */ 2291 switch (size) { 2292 case 4: 2293 d64 = tcg_temp_new_i64(); 2294 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop); 2295 d32 = gen_dest_fpr_F(dc); 2296 tcg_gen_extrl_i64_i32(d32, d64); 2297 gen_store_fpr_F(dc, rd, d32); 2298 break; 2299 case 8: 2300 gen_helper_ld_asi(cpu_fpr[rd / 2], tcg_env, addr, r_asi, r_mop); 2301 break; 2302 case 16: 2303 d64 = tcg_temp_new_i64(); 2304 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop); 2305 tcg_gen_addi_tl(addr, addr, 8); 2306 gen_helper_ld_asi(cpu_fpr[rd/2+1], tcg_env, addr, r_asi, r_mop); 2307 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64); 2308 break; 2309 default: 2310 g_assert_not_reached(); 2311 } 2312 } 2313 break; 2314 } 2315 } 2316 2317 static void gen_stf_asi(DisasContext *dc, TCGv addr, 2318 int insn, int size, int rd) 2319 { 2320 DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEUQ)); 2321 TCGv_i32 d32; 2322 2323 switch (da.type) { 2324 case GET_ASI_EXCP: 2325 break; 2326 2327 case GET_ASI_DIRECT: 2328 gen_address_mask(dc, addr); 2329 switch (size) { 2330 case 4: 2331 d32 = gen_load_fpr_F(dc, rd); 2332 tcg_gen_qemu_st_i32(d32, addr, da.mem_idx, da.memop | MO_ALIGN); 2333 break; 2334 case 8: 2335 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx, 2336 da.memop | MO_ALIGN_4); 2337 break; 2338 case 16: 2339 /* Only 4-byte alignment required. However, it is legal for the 2340 cpu to signal the alignment fault, and the OS trap handler is 2341 required to fix it up. Requiring 16-byte alignment here avoids 2342 having to probe the second page before performing the first 2343 write. */ 2344 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx, 2345 da.memop | MO_ALIGN_16); 2346 tcg_gen_addi_tl(addr, addr, 8); 2347 tcg_gen_qemu_st_i64(cpu_fpr[rd/2+1], addr, da.mem_idx, da.memop); 2348 break; 2349 default: 2350 g_assert_not_reached(); 2351 } 2352 break; 2353 2354 case GET_ASI_BLOCK: 2355 /* Valid for stdfa on aligned registers only. */ 2356 if (size == 8 && (rd & 7) == 0) { 2357 MemOp memop; 2358 TCGv eight; 2359 int i; 2360 2361 gen_address_mask(dc, addr); 2362 2363 /* The first operation checks required alignment. */ 2364 memop = da.memop | MO_ALIGN_64; 2365 eight = tcg_constant_tl(8); 2366 for (i = 0; ; ++i) { 2367 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr, 2368 da.mem_idx, memop); 2369 if (i == 7) { 2370 break; 2371 } 2372 tcg_gen_add_tl(addr, addr, eight); 2373 memop = da.memop; 2374 } 2375 } else { 2376 gen_exception(dc, TT_ILL_INSN); 2377 } 2378 break; 2379 2380 case GET_ASI_SHORT: 2381 /* Valid for stdfa only. */ 2382 if (size == 8) { 2383 gen_address_mask(dc, addr); 2384 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx, 2385 da.memop | MO_ALIGN); 2386 } else { 2387 gen_exception(dc, TT_ILL_INSN); 2388 } 2389 break; 2390 2391 default: 2392 /* According to the table in the UA2011 manual, the only 2393 other asis that are valid for ldfa/lddfa/ldqfa are 2394 the PST* asis, which aren't currently handled. */ 2395 gen_exception(dc, TT_ILL_INSN); 2396 break; 2397 } 2398 } 2399 2400 static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd) 2401 { 2402 DisasASI da = get_asi(dc, insn, MO_TEUQ); 2403 TCGv_i64 hi = gen_dest_gpr(dc, rd); 2404 TCGv_i64 lo = gen_dest_gpr(dc, rd + 1); 2405 2406 switch (da.type) { 2407 case GET_ASI_EXCP: 2408 return; 2409 2410 case GET_ASI_DTWINX: 2411 gen_address_mask(dc, addr); 2412 tcg_gen_qemu_ld_i64(hi, addr, da.mem_idx, da.memop | MO_ALIGN_16); 2413 tcg_gen_addi_tl(addr, addr, 8); 2414 tcg_gen_qemu_ld_i64(lo, addr, da.mem_idx, da.memop); 2415 break; 2416 2417 case GET_ASI_DIRECT: 2418 { 2419 TCGv_i64 tmp = tcg_temp_new_i64(); 2420 2421 gen_address_mask(dc, addr); 2422 tcg_gen_qemu_ld_i64(tmp, addr, da.mem_idx, da.memop | MO_ALIGN); 2423 2424 /* Note that LE ldda acts as if each 32-bit register 2425 result is byte swapped. Having just performed one 2426 64-bit bswap, we need now to swap the writebacks. */ 2427 if ((da.memop & MO_BSWAP) == MO_TE) { 2428 tcg_gen_extr32_i64(lo, hi, tmp); 2429 } else { 2430 tcg_gen_extr32_i64(hi, lo, tmp); 2431 } 2432 } 2433 break; 2434 2435 default: 2436 /* ??? In theory we've handled all of the ASIs that are valid 2437 for ldda, and this should raise DAE_invalid_asi. However, 2438 real hardware allows others. This can be seen with e.g. 2439 FreeBSD 10.3 wrt ASI_IC_TAG. */ 2440 { 2441 TCGv_i32 r_asi = tcg_constant_i32(da.asi); 2442 TCGv_i32 r_mop = tcg_constant_i32(da.memop); 2443 TCGv_i64 tmp = tcg_temp_new_i64(); 2444 2445 save_state(dc); 2446 gen_helper_ld_asi(tmp, tcg_env, addr, r_asi, r_mop); 2447 2448 /* See above. */ 2449 if ((da.memop & MO_BSWAP) == MO_TE) { 2450 tcg_gen_extr32_i64(lo, hi, tmp); 2451 } else { 2452 tcg_gen_extr32_i64(hi, lo, tmp); 2453 } 2454 } 2455 break; 2456 } 2457 2458 gen_store_gpr(dc, rd, hi); 2459 gen_store_gpr(dc, rd + 1, lo); 2460 } 2461 2462 static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr, 2463 int insn, int rd) 2464 { 2465 DisasASI da = get_asi(dc, insn, MO_TEUQ); 2466 TCGv lo = gen_load_gpr(dc, rd + 1); 2467 2468 switch (da.type) { 2469 case GET_ASI_EXCP: 2470 break; 2471 2472 case GET_ASI_DTWINX: 2473 gen_address_mask(dc, addr); 2474 tcg_gen_qemu_st_i64(hi, addr, da.mem_idx, da.memop | MO_ALIGN_16); 2475 tcg_gen_addi_tl(addr, addr, 8); 2476 tcg_gen_qemu_st_i64(lo, addr, da.mem_idx, da.memop); 2477 break; 2478 2479 case GET_ASI_DIRECT: 2480 { 2481 TCGv_i64 t64 = tcg_temp_new_i64(); 2482 2483 /* Note that LE stda acts as if each 32-bit register result is 2484 byte swapped. We will perform one 64-bit LE store, so now 2485 we must swap the order of the construction. */ 2486 if ((da.memop & MO_BSWAP) == MO_TE) { 2487 tcg_gen_concat32_i64(t64, lo, hi); 2488 } else { 2489 tcg_gen_concat32_i64(t64, hi, lo); 2490 } 2491 gen_address_mask(dc, addr); 2492 tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop | MO_ALIGN); 2493 } 2494 break; 2495 2496 default: 2497 /* ??? In theory we've handled all of the ASIs that are valid 2498 for stda, and this should raise DAE_invalid_asi. */ 2499 { 2500 TCGv_i32 r_asi = tcg_constant_i32(da.asi); 2501 TCGv_i32 r_mop = tcg_constant_i32(da.memop); 2502 TCGv_i64 t64 = tcg_temp_new_i64(); 2503 2504 /* See above. */ 2505 if ((da.memop & MO_BSWAP) == MO_TE) { 2506 tcg_gen_concat32_i64(t64, lo, hi); 2507 } else { 2508 tcg_gen_concat32_i64(t64, hi, lo); 2509 } 2510 2511 save_state(dc); 2512 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop); 2513 } 2514 break; 2515 } 2516 } 2517 2518 static void gen_casx_asi(DisasContext *dc, TCGv addr, TCGv cmpv, 2519 int insn, int rd) 2520 { 2521 DisasASI da = get_asi(dc, insn, MO_TEUQ); 2522 TCGv oldv; 2523 2524 switch (da.type) { 2525 case GET_ASI_EXCP: 2526 return; 2527 case GET_ASI_DIRECT: 2528 oldv = tcg_temp_new(); 2529 tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd), 2530 da.mem_idx, da.memop | MO_ALIGN); 2531 gen_store_gpr(dc, rd, oldv); 2532 break; 2533 default: 2534 /* ??? Should be DAE_invalid_asi. */ 2535 gen_exception(dc, TT_DATA_ACCESS); 2536 break; 2537 } 2538 } 2539 2540 #elif !defined(CONFIG_USER_ONLY) 2541 static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd) 2542 { 2543 /* ??? Work around an apparent bug in Ubuntu gcc 4.8.2-10ubuntu2+12, 2544 whereby "rd + 1" elicits "error: array subscript is above array". 2545 Since we have already asserted that rd is even, the semantics 2546 are unchanged. */ 2547 TCGv lo = gen_dest_gpr(dc, rd | 1); 2548 TCGv hi = gen_dest_gpr(dc, rd); 2549 TCGv_i64 t64 = tcg_temp_new_i64(); 2550 DisasASI da = get_asi(dc, insn, MO_TEUQ); 2551 2552 switch (da.type) { 2553 case GET_ASI_EXCP: 2554 return; 2555 case GET_ASI_DIRECT: 2556 gen_address_mask(dc, addr); 2557 tcg_gen_qemu_ld_i64(t64, addr, da.mem_idx, da.memop | MO_ALIGN); 2558 break; 2559 default: 2560 { 2561 TCGv_i32 r_asi = tcg_constant_i32(da.asi); 2562 TCGv_i32 r_mop = tcg_constant_i32(MO_UQ); 2563 2564 save_state(dc); 2565 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop); 2566 } 2567 break; 2568 } 2569 2570 tcg_gen_extr_i64_i32(lo, hi, t64); 2571 gen_store_gpr(dc, rd | 1, lo); 2572 gen_store_gpr(dc, rd, hi); 2573 } 2574 2575 static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr, 2576 int insn, int rd) 2577 { 2578 DisasASI da = get_asi(dc, insn, MO_TEUQ); 2579 TCGv lo = gen_load_gpr(dc, rd + 1); 2580 TCGv_i64 t64 = tcg_temp_new_i64(); 2581 2582 tcg_gen_concat_tl_i64(t64, lo, hi); 2583 2584 switch (da.type) { 2585 case GET_ASI_EXCP: 2586 break; 2587 case GET_ASI_DIRECT: 2588 gen_address_mask(dc, addr); 2589 tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop | MO_ALIGN); 2590 break; 2591 case GET_ASI_BFILL: 2592 /* Store 32 bytes of T64 to ADDR. */ 2593 /* ??? The original qemu code suggests 8-byte alignment, dropping 2594 the low bits, but the only place I can see this used is in the 2595 Linux kernel with 32 byte alignment, which would make more sense 2596 as a cacheline-style operation. */ 2597 { 2598 TCGv d_addr = tcg_temp_new(); 2599 TCGv eight = tcg_constant_tl(8); 2600 int i; 2601 2602 tcg_gen_andi_tl(d_addr, addr, -8); 2603 for (i = 0; i < 32; i += 8) { 2604 tcg_gen_qemu_st_i64(t64, d_addr, da.mem_idx, da.memop); 2605 tcg_gen_add_tl(d_addr, d_addr, eight); 2606 } 2607 } 2608 break; 2609 default: 2610 { 2611 TCGv_i32 r_asi = tcg_constant_i32(da.asi); 2612 TCGv_i32 r_mop = tcg_constant_i32(MO_UQ); 2613 2614 save_state(dc); 2615 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop); 2616 } 2617 break; 2618 } 2619 } 2620 #endif 2621 2622 static TCGv get_src1(DisasContext *dc, unsigned int insn) 2623 { 2624 unsigned int rs1 = GET_FIELD(insn, 13, 17); 2625 return gen_load_gpr(dc, rs1); 2626 } 2627 2628 static TCGv get_src2(DisasContext *dc, unsigned int insn) 2629 { 2630 if (IS_IMM) { /* immediate */ 2631 target_long simm = GET_FIELDs(insn, 19, 31); 2632 TCGv t = tcg_temp_new(); 2633 tcg_gen_movi_tl(t, simm); 2634 return t; 2635 } else { /* register */ 2636 unsigned int rs2 = GET_FIELD(insn, 27, 31); 2637 return gen_load_gpr(dc, rs2); 2638 } 2639 } 2640 2641 #ifdef TARGET_SPARC64 2642 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs) 2643 { 2644 TCGv_i32 c32, zero, dst, s1, s2; 2645 2646 /* We have two choices here: extend the 32 bit data and use movcond_i64, 2647 or fold the comparison down to 32 bits and use movcond_i32. Choose 2648 the later. */ 2649 c32 = tcg_temp_new_i32(); 2650 if (cmp->is_bool) { 2651 tcg_gen_extrl_i64_i32(c32, cmp->c1); 2652 } else { 2653 TCGv_i64 c64 = tcg_temp_new_i64(); 2654 tcg_gen_setcond_i64(cmp->cond, c64, cmp->c1, cmp->c2); 2655 tcg_gen_extrl_i64_i32(c32, c64); 2656 } 2657 2658 s1 = gen_load_fpr_F(dc, rs); 2659 s2 = gen_load_fpr_F(dc, rd); 2660 dst = gen_dest_fpr_F(dc); 2661 zero = tcg_constant_i32(0); 2662 2663 tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2); 2664 2665 gen_store_fpr_F(dc, rd, dst); 2666 } 2667 2668 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs) 2669 { 2670 TCGv_i64 dst = gen_dest_fpr_D(dc, rd); 2671 tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, cmp->c2, 2672 gen_load_fpr_D(dc, rs), 2673 gen_load_fpr_D(dc, rd)); 2674 gen_store_fpr_D(dc, rd, dst); 2675 } 2676 2677 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs) 2678 { 2679 int qd = QFPREG(rd); 2680 int qs = QFPREG(rs); 2681 2682 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, cmp->c2, 2683 cpu_fpr[qs / 2], cpu_fpr[qd / 2]); 2684 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, cmp->c2, 2685 cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]); 2686 2687 gen_update_fprs_dirty(dc, qd); 2688 } 2689 2690 #ifndef CONFIG_USER_ONLY 2691 static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr, TCGv_env tcg_env) 2692 { 2693 TCGv_i32 r_tl = tcg_temp_new_i32(); 2694 2695 /* load env->tl into r_tl */ 2696 tcg_gen_ld_i32(r_tl, tcg_env, offsetof(CPUSPARCState, tl)); 2697 2698 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */ 2699 tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK); 2700 2701 /* calculate offset to current trap state from env->ts, reuse r_tl */ 2702 tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state)); 2703 tcg_gen_addi_ptr(r_tsptr, tcg_env, offsetof(CPUSPARCState, ts)); 2704 2705 /* tsptr = env->ts[env->tl & MAXTL_MASK] */ 2706 { 2707 TCGv_ptr r_tl_tmp = tcg_temp_new_ptr(); 2708 tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl); 2709 tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp); 2710 } 2711 } 2712 #endif 2713 2714 static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2, 2715 int width, bool cc, bool left) 2716 { 2717 TCGv lo1, lo2; 2718 uint64_t amask, tabl, tabr; 2719 int shift, imask, omask; 2720 2721 if (cc) { 2722 tcg_gen_mov_tl(cpu_cc_src, s1); 2723 tcg_gen_mov_tl(cpu_cc_src2, s2); 2724 tcg_gen_sub_tl(cpu_cc_dst, s1, s2); 2725 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB); 2726 dc->cc_op = CC_OP_SUB; 2727 } 2728 2729 /* Theory of operation: there are two tables, left and right (not to 2730 be confused with the left and right versions of the opcode). These 2731 are indexed by the low 3 bits of the inputs. To make things "easy", 2732 these tables are loaded into two constants, TABL and TABR below. 2733 The operation index = (input & imask) << shift calculates the index 2734 into the constant, while val = (table >> index) & omask calculates 2735 the value we're looking for. */ 2736 switch (width) { 2737 case 8: 2738 imask = 0x7; 2739 shift = 3; 2740 omask = 0xff; 2741 if (left) { 2742 tabl = 0x80c0e0f0f8fcfeffULL; 2743 tabr = 0xff7f3f1f0f070301ULL; 2744 } else { 2745 tabl = 0x0103070f1f3f7fffULL; 2746 tabr = 0xfffefcf8f0e0c080ULL; 2747 } 2748 break; 2749 case 16: 2750 imask = 0x6; 2751 shift = 1; 2752 omask = 0xf; 2753 if (left) { 2754 tabl = 0x8cef; 2755 tabr = 0xf731; 2756 } else { 2757 tabl = 0x137f; 2758 tabr = 0xfec8; 2759 } 2760 break; 2761 case 32: 2762 imask = 0x4; 2763 shift = 0; 2764 omask = 0x3; 2765 if (left) { 2766 tabl = (2 << 2) | 3; 2767 tabr = (3 << 2) | 1; 2768 } else { 2769 tabl = (1 << 2) | 3; 2770 tabr = (3 << 2) | 2; 2771 } 2772 break; 2773 default: 2774 abort(); 2775 } 2776 2777 lo1 = tcg_temp_new(); 2778 lo2 = tcg_temp_new(); 2779 tcg_gen_andi_tl(lo1, s1, imask); 2780 tcg_gen_andi_tl(lo2, s2, imask); 2781 tcg_gen_shli_tl(lo1, lo1, shift); 2782 tcg_gen_shli_tl(lo2, lo2, shift); 2783 2784 tcg_gen_shr_tl(lo1, tcg_constant_tl(tabl), lo1); 2785 tcg_gen_shr_tl(lo2, tcg_constant_tl(tabr), lo2); 2786 tcg_gen_andi_tl(lo1, lo1, omask); 2787 tcg_gen_andi_tl(lo2, lo2, omask); 2788 2789 amask = -8; 2790 if (AM_CHECK(dc)) { 2791 amask &= 0xffffffffULL; 2792 } 2793 tcg_gen_andi_tl(s1, s1, amask); 2794 tcg_gen_andi_tl(s2, s2, amask); 2795 2796 /* Compute dst = (s1 == s2 ? lo1 : lo1 & lo2). */ 2797 tcg_gen_and_tl(lo2, lo2, lo1); 2798 tcg_gen_movcond_tl(TCG_COND_EQ, dst, s1, s2, lo1, lo2); 2799 } 2800 2801 static void gen_alignaddr(TCGv dst, TCGv s1, TCGv s2, bool left) 2802 { 2803 TCGv tmp = tcg_temp_new(); 2804 2805 tcg_gen_add_tl(tmp, s1, s2); 2806 tcg_gen_andi_tl(dst, tmp, -8); 2807 if (left) { 2808 tcg_gen_neg_tl(tmp, tmp); 2809 } 2810 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3); 2811 } 2812 2813 static void gen_faligndata(TCGv dst, TCGv gsr, TCGv s1, TCGv s2) 2814 { 2815 TCGv t1, t2, shift; 2816 2817 t1 = tcg_temp_new(); 2818 t2 = tcg_temp_new(); 2819 shift = tcg_temp_new(); 2820 2821 tcg_gen_andi_tl(shift, gsr, 7); 2822 tcg_gen_shli_tl(shift, shift, 3); 2823 tcg_gen_shl_tl(t1, s1, shift); 2824 2825 /* A shift of 64 does not produce 0 in TCG. Divide this into a 2826 shift of (up to 63) followed by a constant shift of 1. */ 2827 tcg_gen_xori_tl(shift, shift, 63); 2828 tcg_gen_shr_tl(t2, s2, shift); 2829 tcg_gen_shri_tl(t2, t2, 1); 2830 2831 tcg_gen_or_tl(dst, t1, t2); 2832 } 2833 #endif 2834 2835 /* Include the auto-generated decoder. */ 2836 #include "decode-insns.c.inc" 2837 2838 #define TRANS(NAME, AVAIL, FUNC, ...) \ 2839 static bool trans_##NAME(DisasContext *dc, arg_##NAME *a) \ 2840 { return avail_##AVAIL(dc) && FUNC(dc, __VA_ARGS__); } 2841 2842 #define avail_ALL(C) true 2843 #ifdef TARGET_SPARC64 2844 # define avail_32(C) false 2845 # define avail_64(C) true 2846 #else 2847 # define avail_32(C) true 2848 # define avail_64(C) false 2849 #endif 2850 2851 /* Default case for non jump instructions. */ 2852 static bool advance_pc(DisasContext *dc) 2853 { 2854 if (dc->npc & 3) { 2855 switch (dc->npc) { 2856 case DYNAMIC_PC: 2857 case DYNAMIC_PC_LOOKUP: 2858 dc->pc = dc->npc; 2859 gen_op_next_insn(); 2860 break; 2861 case JUMP_PC: 2862 /* we can do a static jump */ 2863 gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond); 2864 dc->base.is_jmp = DISAS_NORETURN; 2865 break; 2866 default: 2867 g_assert_not_reached(); 2868 } 2869 } else { 2870 dc->pc = dc->npc; 2871 dc->npc = dc->npc + 4; 2872 } 2873 return true; 2874 } 2875 2876 static bool advance_jump_uncond_never(DisasContext *dc, bool annul) 2877 { 2878 if (annul) { 2879 dc->pc = dc->npc + 4; 2880 dc->npc = dc->pc + 4; 2881 } else { 2882 dc->pc = dc->npc; 2883 dc->npc = dc->pc + 4; 2884 } 2885 return true; 2886 } 2887 2888 static bool advance_jump_uncond_always(DisasContext *dc, bool annul, 2889 target_ulong dest) 2890 { 2891 if (annul) { 2892 dc->pc = dest; 2893 dc->npc = dest + 4; 2894 } else { 2895 dc->pc = dc->npc; 2896 dc->npc = dest; 2897 tcg_gen_mov_tl(cpu_pc, cpu_npc); 2898 } 2899 return true; 2900 } 2901 2902 static bool advance_jump_cond(DisasContext *dc, DisasCompare *cmp, 2903 bool annul, target_ulong dest) 2904 { 2905 target_ulong npc = dc->npc; 2906 2907 if (annul) { 2908 TCGLabel *l1 = gen_new_label(); 2909 2910 tcg_gen_brcond_tl(tcg_invert_cond(cmp->cond), cmp->c1, cmp->c2, l1); 2911 gen_goto_tb(dc, 0, npc, dest); 2912 gen_set_label(l1); 2913 gen_goto_tb(dc, 1, npc + 4, npc + 8); 2914 2915 dc->base.is_jmp = DISAS_NORETURN; 2916 } else { 2917 if (npc & 3) { 2918 switch (npc) { 2919 case DYNAMIC_PC: 2920 case DYNAMIC_PC_LOOKUP: 2921 tcg_gen_mov_tl(cpu_pc, cpu_npc); 2922 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4); 2923 tcg_gen_movcond_tl(cmp->cond, cpu_npc, 2924 cmp->c1, cmp->c2, 2925 tcg_constant_tl(dest), cpu_npc); 2926 dc->pc = npc; 2927 break; 2928 default: 2929 g_assert_not_reached(); 2930 } 2931 } else { 2932 dc->pc = npc; 2933 dc->jump_pc[0] = dest; 2934 dc->jump_pc[1] = npc + 4; 2935 dc->npc = JUMP_PC; 2936 if (cmp->is_bool) { 2937 tcg_gen_mov_tl(cpu_cond, cmp->c1); 2938 } else { 2939 tcg_gen_setcond_tl(cmp->cond, cpu_cond, cmp->c1, cmp->c2); 2940 } 2941 } 2942 } 2943 return true; 2944 } 2945 2946 static bool do_bpcc(DisasContext *dc, arg_bcc *a) 2947 { 2948 target_long target = address_mask_i(dc, dc->pc + a->i * 4); 2949 DisasCompare cmp; 2950 2951 switch (a->cond) { 2952 case 0x0: 2953 return advance_jump_uncond_never(dc, a->a); 2954 case 0x8: 2955 return advance_jump_uncond_always(dc, a->a, target); 2956 default: 2957 flush_cond(dc); 2958 2959 gen_compare(&cmp, a->cc, a->cond, dc); 2960 return advance_jump_cond(dc, &cmp, a->a, target); 2961 } 2962 } 2963 2964 TRANS(Bicc, ALL, do_bpcc, a) 2965 TRANS(BPcc, 64, do_bpcc, a) 2966 2967 static bool do_fbpfcc(DisasContext *dc, arg_bcc *a) 2968 { 2969 target_long target = address_mask_i(dc, dc->pc + a->i * 4); 2970 DisasCompare cmp; 2971 2972 if (gen_trap_ifnofpu(dc)) { 2973 return true; 2974 } 2975 switch (a->cond) { 2976 case 0x0: 2977 return advance_jump_uncond_never(dc, a->a); 2978 case 0x8: 2979 return advance_jump_uncond_always(dc, a->a, target); 2980 default: 2981 flush_cond(dc); 2982 2983 gen_fcompare(&cmp, a->cc, a->cond); 2984 return advance_jump_cond(dc, &cmp, a->a, target); 2985 } 2986 } 2987 2988 TRANS(FBPfcc, 64, do_fbpfcc, a) 2989 TRANS(FBfcc, ALL, do_fbpfcc, a) 2990 2991 static bool trans_BPr(DisasContext *dc, arg_BPr *a) 2992 { 2993 target_long target = address_mask_i(dc, dc->pc + a->i * 4); 2994 DisasCompare cmp; 2995 2996 if (!avail_64(dc)) { 2997 return false; 2998 } 2999 if (gen_tcg_cond_reg[a->cond] == TCG_COND_NEVER) { 3000 return false; 3001 } 3002 3003 flush_cond(dc); 3004 gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1)); 3005 return advance_jump_cond(dc, &cmp, a->a, target); 3006 } 3007 3008 static bool trans_CALL(DisasContext *dc, arg_CALL *a) 3009 { 3010 target_long target = address_mask_i(dc, dc->pc + a->i * 4); 3011 3012 gen_store_gpr(dc, 15, tcg_constant_tl(dc->pc)); 3013 gen_mov_pc_npc(dc); 3014 dc->npc = target; 3015 return true; 3016 } 3017 3018 static bool trans_NCP(DisasContext *dc, arg_NCP *a) 3019 { 3020 /* 3021 * For sparc32, always generate the no-coprocessor exception. 3022 * For sparc64, always generate illegal instruction. 3023 */ 3024 #ifdef TARGET_SPARC64 3025 return false; 3026 #else 3027 gen_exception(dc, TT_NCP_INSN); 3028 return true; 3029 #endif 3030 } 3031 3032 #define CHECK_IU_FEATURE(dc, FEATURE) \ 3033 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \ 3034 goto illegal_insn; 3035 #define CHECK_FPU_FEATURE(dc, FEATURE) \ 3036 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \ 3037 goto nfpu_insn; 3038 3039 /* before an instruction, dc->pc must be static */ 3040 static void disas_sparc_legacy(DisasContext *dc, unsigned int insn) 3041 { 3042 unsigned int opc, rs1, rs2, rd; 3043 TCGv cpu_src1, cpu_src2; 3044 TCGv_i32 cpu_src1_32, cpu_src2_32, cpu_dst_32; 3045 TCGv_i64 cpu_src1_64, cpu_src2_64, cpu_dst_64; 3046 target_long simm; 3047 3048 opc = GET_FIELD(insn, 0, 1); 3049 rd = GET_FIELD(insn, 2, 6); 3050 3051 switch (opc) { 3052 case 0: /* branches/sethi */ 3053 { 3054 unsigned int xop = GET_FIELD(insn, 7, 9); 3055 switch (xop) { 3056 #ifdef TARGET_SPARC64 3057 case 0x1: /* V9 BPcc */ 3058 g_assert_not_reached(); /* in decodetree */ 3059 case 0x3: /* V9 BPr */ 3060 g_assert_not_reached(); /* in decodetree */ 3061 case 0x5: /* V9 FBPcc */ 3062 g_assert_not_reached(); /* in decodetree */ 3063 #else 3064 case 0x7: /* CBN+x */ 3065 g_assert_not_reached(); /* in decodetree */ 3066 #endif 3067 case 0x2: /* BN+x */ 3068 g_assert_not_reached(); /* in decodetree */ 3069 case 0x6: /* FBN+x */ 3070 g_assert_not_reached(); /* in decodetree */ 3071 case 0x4: /* SETHI */ 3072 /* Special-case %g0 because that's the canonical nop. */ 3073 if (rd) { 3074 uint32_t value = GET_FIELD(insn, 10, 31); 3075 TCGv t = gen_dest_gpr(dc, rd); 3076 tcg_gen_movi_tl(t, value << 10); 3077 gen_store_gpr(dc, rd, t); 3078 } 3079 break; 3080 case 0x0: /* UNIMPL */ 3081 default: 3082 goto illegal_insn; 3083 } 3084 break; 3085 } 3086 break; 3087 case 1: 3088 g_assert_not_reached(); /* in decodetree */ 3089 case 2: /* FPU & Logical Operations */ 3090 { 3091 unsigned int xop = GET_FIELD(insn, 7, 12); 3092 TCGv cpu_dst = tcg_temp_new(); 3093 TCGv cpu_tmp0; 3094 3095 if (xop == 0x3a) { /* generate trap */ 3096 int cond = GET_FIELD(insn, 3, 6); 3097 TCGv_i32 trap; 3098 TCGLabel *l1 = NULL; 3099 int mask; 3100 3101 if (cond == 0) { 3102 /* Trap never. */ 3103 break; 3104 } 3105 3106 save_state(dc); 3107 3108 if (cond != 8) { 3109 /* Conditional trap. */ 3110 DisasCompare cmp; 3111 #ifdef TARGET_SPARC64 3112 /* V9 icc/xcc */ 3113 int cc = GET_FIELD_SP(insn, 11, 12); 3114 if (cc == 0) { 3115 gen_compare(&cmp, 0, cond, dc); 3116 } else if (cc == 2) { 3117 gen_compare(&cmp, 1, cond, dc); 3118 } else { 3119 goto illegal_insn; 3120 } 3121 #else 3122 gen_compare(&cmp, 0, cond, dc); 3123 #endif 3124 l1 = gen_new_label(); 3125 tcg_gen_brcond_tl(tcg_invert_cond(cmp.cond), 3126 cmp.c1, cmp.c2, l1); 3127 } 3128 3129 mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc) 3130 ? UA2005_HTRAP_MASK : V8_TRAP_MASK); 3131 3132 /* Don't use the normal temporaries, as they may well have 3133 gone out of scope with the branch above. While we're 3134 doing that we might as well pre-truncate to 32-bit. */ 3135 trap = tcg_temp_new_i32(); 3136 3137 rs1 = GET_FIELD_SP(insn, 14, 18); 3138 if (IS_IMM) { 3139 rs2 = GET_FIELD_SP(insn, 0, 7); 3140 if (rs1 == 0) { 3141 tcg_gen_movi_i32(trap, (rs2 & mask) + TT_TRAP); 3142 /* Signal that the trap value is fully constant. */ 3143 mask = 0; 3144 } else { 3145 TCGv t1 = gen_load_gpr(dc, rs1); 3146 tcg_gen_trunc_tl_i32(trap, t1); 3147 tcg_gen_addi_i32(trap, trap, rs2); 3148 } 3149 } else { 3150 TCGv t1, t2; 3151 rs2 = GET_FIELD_SP(insn, 0, 4); 3152 t1 = gen_load_gpr(dc, rs1); 3153 t2 = gen_load_gpr(dc, rs2); 3154 tcg_gen_add_tl(t1, t1, t2); 3155 tcg_gen_trunc_tl_i32(trap, t1); 3156 } 3157 if (mask != 0) { 3158 tcg_gen_andi_i32(trap, trap, mask); 3159 tcg_gen_addi_i32(trap, trap, TT_TRAP); 3160 } 3161 3162 gen_helper_raise_exception(tcg_env, trap); 3163 3164 if (cond == 8) { 3165 /* An unconditional trap ends the TB. */ 3166 dc->base.is_jmp = DISAS_NORETURN; 3167 goto jmp_insn; 3168 } else { 3169 /* A conditional trap falls through to the next insn. */ 3170 gen_set_label(l1); 3171 break; 3172 } 3173 } else if (xop == 0x28) { 3174 rs1 = GET_FIELD(insn, 13, 17); 3175 switch(rs1) { 3176 case 0: /* rdy */ 3177 #ifndef TARGET_SPARC64 3178 case 0x01 ... 0x0e: /* undefined in the SPARCv8 3179 manual, rdy on the microSPARC 3180 II */ 3181 case 0x0f: /* stbar in the SPARCv8 manual, 3182 rdy on the microSPARC II */ 3183 case 0x10 ... 0x1f: /* implementation-dependent in the 3184 SPARCv8 manual, rdy on the 3185 microSPARC II */ 3186 /* Read Asr17 */ 3187 if (rs1 == 0x11 && dc->def->features & CPU_FEATURE_ASR17) { 3188 TCGv t = gen_dest_gpr(dc, rd); 3189 /* Read Asr17 for a Leon3 monoprocessor */ 3190 tcg_gen_movi_tl(t, (1 << 8) | (dc->def->nwindows - 1)); 3191 gen_store_gpr(dc, rd, t); 3192 break; 3193 } 3194 #endif 3195 gen_store_gpr(dc, rd, cpu_y); 3196 break; 3197 #ifdef TARGET_SPARC64 3198 case 0x2: /* V9 rdccr */ 3199 update_psr(dc); 3200 gen_helper_rdccr(cpu_dst, tcg_env); 3201 gen_store_gpr(dc, rd, cpu_dst); 3202 break; 3203 case 0x3: /* V9 rdasi */ 3204 tcg_gen_movi_tl(cpu_dst, dc->asi); 3205 gen_store_gpr(dc, rd, cpu_dst); 3206 break; 3207 case 0x4: /* V9 rdtick */ 3208 { 3209 TCGv_ptr r_tickptr; 3210 TCGv_i32 r_const; 3211 3212 r_tickptr = tcg_temp_new_ptr(); 3213 r_const = tcg_constant_i32(dc->mem_idx); 3214 tcg_gen_ld_ptr(r_tickptr, tcg_env, 3215 offsetof(CPUSPARCState, tick)); 3216 if (translator_io_start(&dc->base)) { 3217 dc->base.is_jmp = DISAS_EXIT; 3218 } 3219 gen_helper_tick_get_count(cpu_dst, tcg_env, r_tickptr, 3220 r_const); 3221 gen_store_gpr(dc, rd, cpu_dst); 3222 } 3223 break; 3224 case 0x5: /* V9 rdpc */ 3225 { 3226 TCGv t = gen_dest_gpr(dc, rd); 3227 if (unlikely(AM_CHECK(dc))) { 3228 tcg_gen_movi_tl(t, dc->pc & 0xffffffffULL); 3229 } else { 3230 tcg_gen_movi_tl(t, dc->pc); 3231 } 3232 gen_store_gpr(dc, rd, t); 3233 } 3234 break; 3235 case 0x6: /* V9 rdfprs */ 3236 tcg_gen_ext_i32_tl(cpu_dst, cpu_fprs); 3237 gen_store_gpr(dc, rd, cpu_dst); 3238 break; 3239 case 0xf: /* V9 membar */ 3240 break; /* no effect */ 3241 case 0x13: /* Graphics Status */ 3242 if (gen_trap_ifnofpu(dc)) { 3243 goto jmp_insn; 3244 } 3245 gen_store_gpr(dc, rd, cpu_gsr); 3246 break; 3247 case 0x16: /* Softint */ 3248 tcg_gen_ld32s_tl(cpu_dst, tcg_env, 3249 offsetof(CPUSPARCState, softint)); 3250 gen_store_gpr(dc, rd, cpu_dst); 3251 break; 3252 case 0x17: /* Tick compare */ 3253 gen_store_gpr(dc, rd, cpu_tick_cmpr); 3254 break; 3255 case 0x18: /* System tick */ 3256 { 3257 TCGv_ptr r_tickptr; 3258 TCGv_i32 r_const; 3259 3260 r_tickptr = tcg_temp_new_ptr(); 3261 r_const = tcg_constant_i32(dc->mem_idx); 3262 tcg_gen_ld_ptr(r_tickptr, tcg_env, 3263 offsetof(CPUSPARCState, stick)); 3264 if (translator_io_start(&dc->base)) { 3265 dc->base.is_jmp = DISAS_EXIT; 3266 } 3267 gen_helper_tick_get_count(cpu_dst, tcg_env, r_tickptr, 3268 r_const); 3269 gen_store_gpr(dc, rd, cpu_dst); 3270 } 3271 break; 3272 case 0x19: /* System tick compare */ 3273 gen_store_gpr(dc, rd, cpu_stick_cmpr); 3274 break; 3275 case 0x1a: /* UltraSPARC-T1 Strand status */ 3276 /* XXX HYPV check maybe not enough, UA2005 & UA2007 describe 3277 * this ASR as impl. dep 3278 */ 3279 CHECK_IU_FEATURE(dc, HYPV); 3280 { 3281 TCGv t = gen_dest_gpr(dc, rd); 3282 tcg_gen_movi_tl(t, 1UL); 3283 gen_store_gpr(dc, rd, t); 3284 } 3285 break; 3286 case 0x10: /* Performance Control */ 3287 case 0x11: /* Performance Instrumentation Counter */ 3288 case 0x12: /* Dispatch Control */ 3289 case 0x14: /* Softint set, WO */ 3290 case 0x15: /* Softint clear, WO */ 3291 #endif 3292 default: 3293 goto illegal_insn; 3294 } 3295 #if !defined(CONFIG_USER_ONLY) 3296 } else if (xop == 0x29) { /* rdpsr / UA2005 rdhpr */ 3297 #ifndef TARGET_SPARC64 3298 if (!supervisor(dc)) { 3299 goto priv_insn; 3300 } 3301 update_psr(dc); 3302 gen_helper_rdpsr(cpu_dst, tcg_env); 3303 #else 3304 CHECK_IU_FEATURE(dc, HYPV); 3305 if (!hypervisor(dc)) 3306 goto priv_insn; 3307 rs1 = GET_FIELD(insn, 13, 17); 3308 switch (rs1) { 3309 case 0: // hpstate 3310 tcg_gen_ld_i64(cpu_dst, tcg_env, 3311 offsetof(CPUSPARCState, hpstate)); 3312 break; 3313 case 1: // htstate 3314 // gen_op_rdhtstate(); 3315 break; 3316 case 3: // hintp 3317 tcg_gen_mov_tl(cpu_dst, cpu_hintp); 3318 break; 3319 case 5: // htba 3320 tcg_gen_mov_tl(cpu_dst, cpu_htba); 3321 break; 3322 case 6: // hver 3323 tcg_gen_mov_tl(cpu_dst, cpu_hver); 3324 break; 3325 case 31: // hstick_cmpr 3326 tcg_gen_mov_tl(cpu_dst, cpu_hstick_cmpr); 3327 break; 3328 default: 3329 goto illegal_insn; 3330 } 3331 #endif 3332 gen_store_gpr(dc, rd, cpu_dst); 3333 break; 3334 } else if (xop == 0x2a) { /* rdwim / V9 rdpr */ 3335 if (!supervisor(dc)) { 3336 goto priv_insn; 3337 } 3338 cpu_tmp0 = tcg_temp_new(); 3339 #ifdef TARGET_SPARC64 3340 rs1 = GET_FIELD(insn, 13, 17); 3341 switch (rs1) { 3342 case 0: // tpc 3343 { 3344 TCGv_ptr r_tsptr; 3345 3346 r_tsptr = tcg_temp_new_ptr(); 3347 gen_load_trap_state_at_tl(r_tsptr, tcg_env); 3348 tcg_gen_ld_tl(cpu_tmp0, r_tsptr, 3349 offsetof(trap_state, tpc)); 3350 } 3351 break; 3352 case 1: // tnpc 3353 { 3354 TCGv_ptr r_tsptr; 3355 3356 r_tsptr = tcg_temp_new_ptr(); 3357 gen_load_trap_state_at_tl(r_tsptr, tcg_env); 3358 tcg_gen_ld_tl(cpu_tmp0, r_tsptr, 3359 offsetof(trap_state, tnpc)); 3360 } 3361 break; 3362 case 2: // tstate 3363 { 3364 TCGv_ptr r_tsptr; 3365 3366 r_tsptr = tcg_temp_new_ptr(); 3367 gen_load_trap_state_at_tl(r_tsptr, tcg_env); 3368 tcg_gen_ld_tl(cpu_tmp0, r_tsptr, 3369 offsetof(trap_state, tstate)); 3370 } 3371 break; 3372 case 3: // tt 3373 { 3374 TCGv_ptr r_tsptr = tcg_temp_new_ptr(); 3375 3376 gen_load_trap_state_at_tl(r_tsptr, tcg_env); 3377 tcg_gen_ld32s_tl(cpu_tmp0, r_tsptr, 3378 offsetof(trap_state, tt)); 3379 } 3380 break; 3381 case 4: // tick 3382 { 3383 TCGv_ptr r_tickptr; 3384 TCGv_i32 r_const; 3385 3386 r_tickptr = tcg_temp_new_ptr(); 3387 r_const = tcg_constant_i32(dc->mem_idx); 3388 tcg_gen_ld_ptr(r_tickptr, tcg_env, 3389 offsetof(CPUSPARCState, tick)); 3390 if (translator_io_start(&dc->base)) { 3391 dc->base.is_jmp = DISAS_EXIT; 3392 } 3393 gen_helper_tick_get_count(cpu_tmp0, tcg_env, 3394 r_tickptr, r_const); 3395 } 3396 break; 3397 case 5: // tba 3398 tcg_gen_mov_tl(cpu_tmp0, cpu_tbr); 3399 break; 3400 case 6: // pstate 3401 tcg_gen_ld32s_tl(cpu_tmp0, tcg_env, 3402 offsetof(CPUSPARCState, pstate)); 3403 break; 3404 case 7: // tl 3405 tcg_gen_ld32s_tl(cpu_tmp0, tcg_env, 3406 offsetof(CPUSPARCState, tl)); 3407 break; 3408 case 8: // pil 3409 tcg_gen_ld32s_tl(cpu_tmp0, tcg_env, 3410 offsetof(CPUSPARCState, psrpil)); 3411 break; 3412 case 9: // cwp 3413 gen_helper_rdcwp(cpu_tmp0, tcg_env); 3414 break; 3415 case 10: // cansave 3416 tcg_gen_ld32s_tl(cpu_tmp0, tcg_env, 3417 offsetof(CPUSPARCState, cansave)); 3418 break; 3419 case 11: // canrestore 3420 tcg_gen_ld32s_tl(cpu_tmp0, tcg_env, 3421 offsetof(CPUSPARCState, canrestore)); 3422 break; 3423 case 12: // cleanwin 3424 tcg_gen_ld32s_tl(cpu_tmp0, tcg_env, 3425 offsetof(CPUSPARCState, cleanwin)); 3426 break; 3427 case 13: // otherwin 3428 tcg_gen_ld32s_tl(cpu_tmp0, tcg_env, 3429 offsetof(CPUSPARCState, otherwin)); 3430 break; 3431 case 14: // wstate 3432 tcg_gen_ld32s_tl(cpu_tmp0, tcg_env, 3433 offsetof(CPUSPARCState, wstate)); 3434 break; 3435 case 16: // UA2005 gl 3436 CHECK_IU_FEATURE(dc, GL); 3437 tcg_gen_ld32s_tl(cpu_tmp0, tcg_env, 3438 offsetof(CPUSPARCState, gl)); 3439 break; 3440 case 26: // UA2005 strand status 3441 CHECK_IU_FEATURE(dc, HYPV); 3442 if (!hypervisor(dc)) 3443 goto priv_insn; 3444 tcg_gen_mov_tl(cpu_tmp0, cpu_ssr); 3445 break; 3446 case 31: // ver 3447 tcg_gen_mov_tl(cpu_tmp0, cpu_ver); 3448 break; 3449 case 15: // fq 3450 default: 3451 goto illegal_insn; 3452 } 3453 #else 3454 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_wim); 3455 #endif 3456 gen_store_gpr(dc, rd, cpu_tmp0); 3457 break; 3458 #endif 3459 #if defined(TARGET_SPARC64) || !defined(CONFIG_USER_ONLY) 3460 } else if (xop == 0x2b) { /* rdtbr / V9 flushw */ 3461 #ifdef TARGET_SPARC64 3462 gen_helper_flushw(tcg_env); 3463 #else 3464 if (!supervisor(dc)) 3465 goto priv_insn; 3466 gen_store_gpr(dc, rd, cpu_tbr); 3467 #endif 3468 break; 3469 #endif 3470 } else if (xop == 0x34) { /* FPU Operations */ 3471 if (gen_trap_ifnofpu(dc)) { 3472 goto jmp_insn; 3473 } 3474 gen_op_clear_ieee_excp_and_FTT(); 3475 rs1 = GET_FIELD(insn, 13, 17); 3476 rs2 = GET_FIELD(insn, 27, 31); 3477 xop = GET_FIELD(insn, 18, 26); 3478 3479 switch (xop) { 3480 case 0x1: /* fmovs */ 3481 cpu_src1_32 = gen_load_fpr_F(dc, rs2); 3482 gen_store_fpr_F(dc, rd, cpu_src1_32); 3483 break; 3484 case 0x5: /* fnegs */ 3485 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fnegs); 3486 break; 3487 case 0x9: /* fabss */ 3488 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fabss); 3489 break; 3490 case 0x29: /* fsqrts */ 3491 gen_fop_FF(dc, rd, rs2, gen_helper_fsqrts); 3492 break; 3493 case 0x2a: /* fsqrtd */ 3494 gen_fop_DD(dc, rd, rs2, gen_helper_fsqrtd); 3495 break; 3496 case 0x2b: /* fsqrtq */ 3497 CHECK_FPU_FEATURE(dc, FLOAT128); 3498 gen_fop_QQ(dc, rd, rs2, gen_helper_fsqrtq); 3499 break; 3500 case 0x41: /* fadds */ 3501 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fadds); 3502 break; 3503 case 0x42: /* faddd */ 3504 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_faddd); 3505 break; 3506 case 0x43: /* faddq */ 3507 CHECK_FPU_FEATURE(dc, FLOAT128); 3508 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_faddq); 3509 break; 3510 case 0x45: /* fsubs */ 3511 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fsubs); 3512 break; 3513 case 0x46: /* fsubd */ 3514 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fsubd); 3515 break; 3516 case 0x47: /* fsubq */ 3517 CHECK_FPU_FEATURE(dc, FLOAT128); 3518 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fsubq); 3519 break; 3520 case 0x49: /* fmuls */ 3521 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fmuls); 3522 break; 3523 case 0x4a: /* fmuld */ 3524 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld); 3525 break; 3526 case 0x4b: /* fmulq */ 3527 CHECK_FPU_FEATURE(dc, FLOAT128); 3528 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fmulq); 3529 break; 3530 case 0x4d: /* fdivs */ 3531 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fdivs); 3532 break; 3533 case 0x4e: /* fdivd */ 3534 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fdivd); 3535 break; 3536 case 0x4f: /* fdivq */ 3537 CHECK_FPU_FEATURE(dc, FLOAT128); 3538 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fdivq); 3539 break; 3540 case 0x69: /* fsmuld */ 3541 CHECK_FPU_FEATURE(dc, FSMULD); 3542 gen_fop_DFF(dc, rd, rs1, rs2, gen_helper_fsmuld); 3543 break; 3544 case 0x6e: /* fdmulq */ 3545 CHECK_FPU_FEATURE(dc, FLOAT128); 3546 gen_fop_QDD(dc, rd, rs1, rs2, gen_helper_fdmulq); 3547 break; 3548 case 0xc4: /* fitos */ 3549 gen_fop_FF(dc, rd, rs2, gen_helper_fitos); 3550 break; 3551 case 0xc6: /* fdtos */ 3552 gen_fop_FD(dc, rd, rs2, gen_helper_fdtos); 3553 break; 3554 case 0xc7: /* fqtos */ 3555 CHECK_FPU_FEATURE(dc, FLOAT128); 3556 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtos); 3557 break; 3558 case 0xc8: /* fitod */ 3559 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fitod); 3560 break; 3561 case 0xc9: /* fstod */ 3562 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fstod); 3563 break; 3564 case 0xcb: /* fqtod */ 3565 CHECK_FPU_FEATURE(dc, FLOAT128); 3566 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtod); 3567 break; 3568 case 0xcc: /* fitoq */ 3569 CHECK_FPU_FEATURE(dc, FLOAT128); 3570 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fitoq); 3571 break; 3572 case 0xcd: /* fstoq */ 3573 CHECK_FPU_FEATURE(dc, FLOAT128); 3574 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fstoq); 3575 break; 3576 case 0xce: /* fdtoq */ 3577 CHECK_FPU_FEATURE(dc, FLOAT128); 3578 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fdtoq); 3579 break; 3580 case 0xd1: /* fstoi */ 3581 gen_fop_FF(dc, rd, rs2, gen_helper_fstoi); 3582 break; 3583 case 0xd2: /* fdtoi */ 3584 gen_fop_FD(dc, rd, rs2, gen_helper_fdtoi); 3585 break; 3586 case 0xd3: /* fqtoi */ 3587 CHECK_FPU_FEATURE(dc, FLOAT128); 3588 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtoi); 3589 break; 3590 #ifdef TARGET_SPARC64 3591 case 0x2: /* V9 fmovd */ 3592 cpu_src1_64 = gen_load_fpr_D(dc, rs2); 3593 gen_store_fpr_D(dc, rd, cpu_src1_64); 3594 break; 3595 case 0x3: /* V9 fmovq */ 3596 CHECK_FPU_FEATURE(dc, FLOAT128); 3597 gen_move_Q(dc, rd, rs2); 3598 break; 3599 case 0x6: /* V9 fnegd */ 3600 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fnegd); 3601 break; 3602 case 0x7: /* V9 fnegq */ 3603 CHECK_FPU_FEATURE(dc, FLOAT128); 3604 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fnegq); 3605 break; 3606 case 0xa: /* V9 fabsd */ 3607 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fabsd); 3608 break; 3609 case 0xb: /* V9 fabsq */ 3610 CHECK_FPU_FEATURE(dc, FLOAT128); 3611 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fabsq); 3612 break; 3613 case 0x81: /* V9 fstox */ 3614 gen_fop_DF(dc, rd, rs2, gen_helper_fstox); 3615 break; 3616 case 0x82: /* V9 fdtox */ 3617 gen_fop_DD(dc, rd, rs2, gen_helper_fdtox); 3618 break; 3619 case 0x83: /* V9 fqtox */ 3620 CHECK_FPU_FEATURE(dc, FLOAT128); 3621 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtox); 3622 break; 3623 case 0x84: /* V9 fxtos */ 3624 gen_fop_FD(dc, rd, rs2, gen_helper_fxtos); 3625 break; 3626 case 0x88: /* V9 fxtod */ 3627 gen_fop_DD(dc, rd, rs2, gen_helper_fxtod); 3628 break; 3629 case 0x8c: /* V9 fxtoq */ 3630 CHECK_FPU_FEATURE(dc, FLOAT128); 3631 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fxtoq); 3632 break; 3633 #endif 3634 default: 3635 goto illegal_insn; 3636 } 3637 } else if (xop == 0x35) { /* FPU Operations */ 3638 #ifdef TARGET_SPARC64 3639 int cond; 3640 #endif 3641 if (gen_trap_ifnofpu(dc)) { 3642 goto jmp_insn; 3643 } 3644 gen_op_clear_ieee_excp_and_FTT(); 3645 rs1 = GET_FIELD(insn, 13, 17); 3646 rs2 = GET_FIELD(insn, 27, 31); 3647 xop = GET_FIELD(insn, 18, 26); 3648 3649 #ifdef TARGET_SPARC64 3650 #define FMOVR(sz) \ 3651 do { \ 3652 DisasCompare cmp; \ 3653 cond = GET_FIELD_SP(insn, 10, 12); \ 3654 cpu_src1 = get_src1(dc, insn); \ 3655 gen_compare_reg(&cmp, cond, cpu_src1); \ 3656 gen_fmov##sz(dc, &cmp, rd, rs2); \ 3657 } while (0) 3658 3659 if ((xop & 0x11f) == 0x005) { /* V9 fmovsr */ 3660 FMOVR(s); 3661 break; 3662 } else if ((xop & 0x11f) == 0x006) { // V9 fmovdr 3663 FMOVR(d); 3664 break; 3665 } else if ((xop & 0x11f) == 0x007) { // V9 fmovqr 3666 CHECK_FPU_FEATURE(dc, FLOAT128); 3667 FMOVR(q); 3668 break; 3669 } 3670 #undef FMOVR 3671 #endif 3672 switch (xop) { 3673 #ifdef TARGET_SPARC64 3674 #define FMOVCC(fcc, sz) \ 3675 do { \ 3676 DisasCompare cmp; \ 3677 cond = GET_FIELD_SP(insn, 14, 17); \ 3678 gen_fcompare(&cmp, fcc, cond); \ 3679 gen_fmov##sz(dc, &cmp, rd, rs2); \ 3680 } while (0) 3681 3682 case 0x001: /* V9 fmovscc %fcc0 */ 3683 FMOVCC(0, s); 3684 break; 3685 case 0x002: /* V9 fmovdcc %fcc0 */ 3686 FMOVCC(0, d); 3687 break; 3688 case 0x003: /* V9 fmovqcc %fcc0 */ 3689 CHECK_FPU_FEATURE(dc, FLOAT128); 3690 FMOVCC(0, q); 3691 break; 3692 case 0x041: /* V9 fmovscc %fcc1 */ 3693 FMOVCC(1, s); 3694 break; 3695 case 0x042: /* V9 fmovdcc %fcc1 */ 3696 FMOVCC(1, d); 3697 break; 3698 case 0x043: /* V9 fmovqcc %fcc1 */ 3699 CHECK_FPU_FEATURE(dc, FLOAT128); 3700 FMOVCC(1, q); 3701 break; 3702 case 0x081: /* V9 fmovscc %fcc2 */ 3703 FMOVCC(2, s); 3704 break; 3705 case 0x082: /* V9 fmovdcc %fcc2 */ 3706 FMOVCC(2, d); 3707 break; 3708 case 0x083: /* V9 fmovqcc %fcc2 */ 3709 CHECK_FPU_FEATURE(dc, FLOAT128); 3710 FMOVCC(2, q); 3711 break; 3712 case 0x0c1: /* V9 fmovscc %fcc3 */ 3713 FMOVCC(3, s); 3714 break; 3715 case 0x0c2: /* V9 fmovdcc %fcc3 */ 3716 FMOVCC(3, d); 3717 break; 3718 case 0x0c3: /* V9 fmovqcc %fcc3 */ 3719 CHECK_FPU_FEATURE(dc, FLOAT128); 3720 FMOVCC(3, q); 3721 break; 3722 #undef FMOVCC 3723 #define FMOVCC(xcc, sz) \ 3724 do { \ 3725 DisasCompare cmp; \ 3726 cond = GET_FIELD_SP(insn, 14, 17); \ 3727 gen_compare(&cmp, xcc, cond, dc); \ 3728 gen_fmov##sz(dc, &cmp, rd, rs2); \ 3729 } while (0) 3730 3731 case 0x101: /* V9 fmovscc %icc */ 3732 FMOVCC(0, s); 3733 break; 3734 case 0x102: /* V9 fmovdcc %icc */ 3735 FMOVCC(0, d); 3736 break; 3737 case 0x103: /* V9 fmovqcc %icc */ 3738 CHECK_FPU_FEATURE(dc, FLOAT128); 3739 FMOVCC(0, q); 3740 break; 3741 case 0x181: /* V9 fmovscc %xcc */ 3742 FMOVCC(1, s); 3743 break; 3744 case 0x182: /* V9 fmovdcc %xcc */ 3745 FMOVCC(1, d); 3746 break; 3747 case 0x183: /* V9 fmovqcc %xcc */ 3748 CHECK_FPU_FEATURE(dc, FLOAT128); 3749 FMOVCC(1, q); 3750 break; 3751 #undef FMOVCC 3752 #endif 3753 case 0x51: /* fcmps, V9 %fcc */ 3754 cpu_src1_32 = gen_load_fpr_F(dc, rs1); 3755 cpu_src2_32 = gen_load_fpr_F(dc, rs2); 3756 gen_op_fcmps(rd & 3, cpu_src1_32, cpu_src2_32); 3757 break; 3758 case 0x52: /* fcmpd, V9 %fcc */ 3759 cpu_src1_64 = gen_load_fpr_D(dc, rs1); 3760 cpu_src2_64 = gen_load_fpr_D(dc, rs2); 3761 gen_op_fcmpd(rd & 3, cpu_src1_64, cpu_src2_64); 3762 break; 3763 case 0x53: /* fcmpq, V9 %fcc */ 3764 CHECK_FPU_FEATURE(dc, FLOAT128); 3765 gen_op_load_fpr_QT0(QFPREG(rs1)); 3766 gen_op_load_fpr_QT1(QFPREG(rs2)); 3767 gen_op_fcmpq(rd & 3); 3768 break; 3769 case 0x55: /* fcmpes, V9 %fcc */ 3770 cpu_src1_32 = gen_load_fpr_F(dc, rs1); 3771 cpu_src2_32 = gen_load_fpr_F(dc, rs2); 3772 gen_op_fcmpes(rd & 3, cpu_src1_32, cpu_src2_32); 3773 break; 3774 case 0x56: /* fcmped, V9 %fcc */ 3775 cpu_src1_64 = gen_load_fpr_D(dc, rs1); 3776 cpu_src2_64 = gen_load_fpr_D(dc, rs2); 3777 gen_op_fcmped(rd & 3, cpu_src1_64, cpu_src2_64); 3778 break; 3779 case 0x57: /* fcmpeq, V9 %fcc */ 3780 CHECK_FPU_FEATURE(dc, FLOAT128); 3781 gen_op_load_fpr_QT0(QFPREG(rs1)); 3782 gen_op_load_fpr_QT1(QFPREG(rs2)); 3783 gen_op_fcmpeq(rd & 3); 3784 break; 3785 default: 3786 goto illegal_insn; 3787 } 3788 } else if (xop == 0x2) { 3789 TCGv dst = gen_dest_gpr(dc, rd); 3790 rs1 = GET_FIELD(insn, 13, 17); 3791 if (rs1 == 0) { 3792 /* clr/mov shortcut : or %g0, x, y -> mov x, y */ 3793 if (IS_IMM) { /* immediate */ 3794 simm = GET_FIELDs(insn, 19, 31); 3795 tcg_gen_movi_tl(dst, simm); 3796 gen_store_gpr(dc, rd, dst); 3797 } else { /* register */ 3798 rs2 = GET_FIELD(insn, 27, 31); 3799 if (rs2 == 0) { 3800 tcg_gen_movi_tl(dst, 0); 3801 gen_store_gpr(dc, rd, dst); 3802 } else { 3803 cpu_src2 = gen_load_gpr(dc, rs2); 3804 gen_store_gpr(dc, rd, cpu_src2); 3805 } 3806 } 3807 } else { 3808 cpu_src1 = get_src1(dc, insn); 3809 if (IS_IMM) { /* immediate */ 3810 simm = GET_FIELDs(insn, 19, 31); 3811 tcg_gen_ori_tl(dst, cpu_src1, simm); 3812 gen_store_gpr(dc, rd, dst); 3813 } else { /* register */ 3814 rs2 = GET_FIELD(insn, 27, 31); 3815 if (rs2 == 0) { 3816 /* mov shortcut: or x, %g0, y -> mov x, y */ 3817 gen_store_gpr(dc, rd, cpu_src1); 3818 } else { 3819 cpu_src2 = gen_load_gpr(dc, rs2); 3820 tcg_gen_or_tl(dst, cpu_src1, cpu_src2); 3821 gen_store_gpr(dc, rd, dst); 3822 } 3823 } 3824 } 3825 #ifdef TARGET_SPARC64 3826 } else if (xop == 0x25) { /* sll, V9 sllx */ 3827 cpu_src1 = get_src1(dc, insn); 3828 if (IS_IMM) { /* immediate */ 3829 simm = GET_FIELDs(insn, 20, 31); 3830 if (insn & (1 << 12)) { 3831 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x3f); 3832 } else { 3833 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x1f); 3834 } 3835 } else { /* register */ 3836 rs2 = GET_FIELD(insn, 27, 31); 3837 cpu_src2 = gen_load_gpr(dc, rs2); 3838 cpu_tmp0 = tcg_temp_new(); 3839 if (insn & (1 << 12)) { 3840 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f); 3841 } else { 3842 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f); 3843 } 3844 tcg_gen_shl_i64(cpu_dst, cpu_src1, cpu_tmp0); 3845 } 3846 gen_store_gpr(dc, rd, cpu_dst); 3847 } else if (xop == 0x26) { /* srl, V9 srlx */ 3848 cpu_src1 = get_src1(dc, insn); 3849 if (IS_IMM) { /* immediate */ 3850 simm = GET_FIELDs(insn, 20, 31); 3851 if (insn & (1 << 12)) { 3852 tcg_gen_shri_i64(cpu_dst, cpu_src1, simm & 0x3f); 3853 } else { 3854 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL); 3855 tcg_gen_shri_i64(cpu_dst, cpu_dst, simm & 0x1f); 3856 } 3857 } else { /* register */ 3858 rs2 = GET_FIELD(insn, 27, 31); 3859 cpu_src2 = gen_load_gpr(dc, rs2); 3860 cpu_tmp0 = tcg_temp_new(); 3861 if (insn & (1 << 12)) { 3862 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f); 3863 tcg_gen_shr_i64(cpu_dst, cpu_src1, cpu_tmp0); 3864 } else { 3865 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f); 3866 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL); 3867 tcg_gen_shr_i64(cpu_dst, cpu_dst, cpu_tmp0); 3868 } 3869 } 3870 gen_store_gpr(dc, rd, cpu_dst); 3871 } else if (xop == 0x27) { /* sra, V9 srax */ 3872 cpu_src1 = get_src1(dc, insn); 3873 if (IS_IMM) { /* immediate */ 3874 simm = GET_FIELDs(insn, 20, 31); 3875 if (insn & (1 << 12)) { 3876 tcg_gen_sari_i64(cpu_dst, cpu_src1, simm & 0x3f); 3877 } else { 3878 tcg_gen_ext32s_i64(cpu_dst, cpu_src1); 3879 tcg_gen_sari_i64(cpu_dst, cpu_dst, simm & 0x1f); 3880 } 3881 } else { /* register */ 3882 rs2 = GET_FIELD(insn, 27, 31); 3883 cpu_src2 = gen_load_gpr(dc, rs2); 3884 cpu_tmp0 = tcg_temp_new(); 3885 if (insn & (1 << 12)) { 3886 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f); 3887 tcg_gen_sar_i64(cpu_dst, cpu_src1, cpu_tmp0); 3888 } else { 3889 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f); 3890 tcg_gen_ext32s_i64(cpu_dst, cpu_src1); 3891 tcg_gen_sar_i64(cpu_dst, cpu_dst, cpu_tmp0); 3892 } 3893 } 3894 gen_store_gpr(dc, rd, cpu_dst); 3895 #endif 3896 } else if (xop < 0x36) { 3897 if (xop < 0x20) { 3898 cpu_src1 = get_src1(dc, insn); 3899 cpu_src2 = get_src2(dc, insn); 3900 switch (xop & ~0x10) { 3901 case 0x0: /* add */ 3902 if (xop & 0x10) { 3903 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2); 3904 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD); 3905 dc->cc_op = CC_OP_ADD; 3906 } else { 3907 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2); 3908 } 3909 break; 3910 case 0x1: /* and */ 3911 tcg_gen_and_tl(cpu_dst, cpu_src1, cpu_src2); 3912 if (xop & 0x10) { 3913 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst); 3914 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC); 3915 dc->cc_op = CC_OP_LOGIC; 3916 } 3917 break; 3918 case 0x2: /* or */ 3919 tcg_gen_or_tl(cpu_dst, cpu_src1, cpu_src2); 3920 if (xop & 0x10) { 3921 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst); 3922 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC); 3923 dc->cc_op = CC_OP_LOGIC; 3924 } 3925 break; 3926 case 0x3: /* xor */ 3927 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2); 3928 if (xop & 0x10) { 3929 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst); 3930 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC); 3931 dc->cc_op = CC_OP_LOGIC; 3932 } 3933 break; 3934 case 0x4: /* sub */ 3935 if (xop & 0x10) { 3936 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2); 3937 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB); 3938 dc->cc_op = CC_OP_SUB; 3939 } else { 3940 tcg_gen_sub_tl(cpu_dst, cpu_src1, cpu_src2); 3941 } 3942 break; 3943 case 0x5: /* andn */ 3944 tcg_gen_andc_tl(cpu_dst, cpu_src1, cpu_src2); 3945 if (xop & 0x10) { 3946 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst); 3947 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC); 3948 dc->cc_op = CC_OP_LOGIC; 3949 } 3950 break; 3951 case 0x6: /* orn */ 3952 tcg_gen_orc_tl(cpu_dst, cpu_src1, cpu_src2); 3953 if (xop & 0x10) { 3954 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst); 3955 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC); 3956 dc->cc_op = CC_OP_LOGIC; 3957 } 3958 break; 3959 case 0x7: /* xorn */ 3960 tcg_gen_eqv_tl(cpu_dst, cpu_src1, cpu_src2); 3961 if (xop & 0x10) { 3962 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst); 3963 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC); 3964 dc->cc_op = CC_OP_LOGIC; 3965 } 3966 break; 3967 case 0x8: /* addx, V9 addc */ 3968 gen_op_addx_int(dc, cpu_dst, cpu_src1, cpu_src2, 3969 (xop & 0x10)); 3970 break; 3971 #ifdef TARGET_SPARC64 3972 case 0x9: /* V9 mulx */ 3973 tcg_gen_mul_i64(cpu_dst, cpu_src1, cpu_src2); 3974 break; 3975 #endif 3976 case 0xa: /* umul */ 3977 CHECK_IU_FEATURE(dc, MUL); 3978 gen_op_umul(cpu_dst, cpu_src1, cpu_src2); 3979 if (xop & 0x10) { 3980 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst); 3981 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC); 3982 dc->cc_op = CC_OP_LOGIC; 3983 } 3984 break; 3985 case 0xb: /* smul */ 3986 CHECK_IU_FEATURE(dc, MUL); 3987 gen_op_smul(cpu_dst, cpu_src1, cpu_src2); 3988 if (xop & 0x10) { 3989 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst); 3990 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC); 3991 dc->cc_op = CC_OP_LOGIC; 3992 } 3993 break; 3994 case 0xc: /* subx, V9 subc */ 3995 gen_op_subx_int(dc, cpu_dst, cpu_src1, cpu_src2, 3996 (xop & 0x10)); 3997 break; 3998 #ifdef TARGET_SPARC64 3999 case 0xd: /* V9 udivx */ 4000 gen_helper_udivx(cpu_dst, tcg_env, cpu_src1, cpu_src2); 4001 break; 4002 #endif 4003 case 0xe: /* udiv */ 4004 CHECK_IU_FEATURE(dc, DIV); 4005 if (xop & 0x10) { 4006 gen_helper_udiv_cc(cpu_dst, tcg_env, cpu_src1, 4007 cpu_src2); 4008 dc->cc_op = CC_OP_DIV; 4009 } else { 4010 gen_helper_udiv(cpu_dst, tcg_env, cpu_src1, 4011 cpu_src2); 4012 } 4013 break; 4014 case 0xf: /* sdiv */ 4015 CHECK_IU_FEATURE(dc, DIV); 4016 if (xop & 0x10) { 4017 gen_helper_sdiv_cc(cpu_dst, tcg_env, cpu_src1, 4018 cpu_src2); 4019 dc->cc_op = CC_OP_DIV; 4020 } else { 4021 gen_helper_sdiv(cpu_dst, tcg_env, cpu_src1, 4022 cpu_src2); 4023 } 4024 break; 4025 default: 4026 goto illegal_insn; 4027 } 4028 gen_store_gpr(dc, rd, cpu_dst); 4029 } else { 4030 cpu_src1 = get_src1(dc, insn); 4031 cpu_src2 = get_src2(dc, insn); 4032 switch (xop) { 4033 case 0x20: /* taddcc */ 4034 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2); 4035 gen_store_gpr(dc, rd, cpu_dst); 4036 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TADD); 4037 dc->cc_op = CC_OP_TADD; 4038 break; 4039 case 0x21: /* tsubcc */ 4040 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2); 4041 gen_store_gpr(dc, rd, cpu_dst); 4042 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TSUB); 4043 dc->cc_op = CC_OP_TSUB; 4044 break; 4045 case 0x22: /* taddcctv */ 4046 gen_helper_taddcctv(cpu_dst, tcg_env, 4047 cpu_src1, cpu_src2); 4048 gen_store_gpr(dc, rd, cpu_dst); 4049 dc->cc_op = CC_OP_TADDTV; 4050 break; 4051 case 0x23: /* tsubcctv */ 4052 gen_helper_tsubcctv(cpu_dst, tcg_env, 4053 cpu_src1, cpu_src2); 4054 gen_store_gpr(dc, rd, cpu_dst); 4055 dc->cc_op = CC_OP_TSUBTV; 4056 break; 4057 case 0x24: /* mulscc */ 4058 update_psr(dc); 4059 gen_op_mulscc(cpu_dst, cpu_src1, cpu_src2); 4060 gen_store_gpr(dc, rd, cpu_dst); 4061 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD); 4062 dc->cc_op = CC_OP_ADD; 4063 break; 4064 #ifndef TARGET_SPARC64 4065 case 0x25: /* sll */ 4066 if (IS_IMM) { /* immediate */ 4067 simm = GET_FIELDs(insn, 20, 31); 4068 tcg_gen_shli_tl(cpu_dst, cpu_src1, simm & 0x1f); 4069 } else { /* register */ 4070 cpu_tmp0 = tcg_temp_new(); 4071 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f); 4072 tcg_gen_shl_tl(cpu_dst, cpu_src1, cpu_tmp0); 4073 } 4074 gen_store_gpr(dc, rd, cpu_dst); 4075 break; 4076 case 0x26: /* srl */ 4077 if (IS_IMM) { /* immediate */ 4078 simm = GET_FIELDs(insn, 20, 31); 4079 tcg_gen_shri_tl(cpu_dst, cpu_src1, simm & 0x1f); 4080 } else { /* register */ 4081 cpu_tmp0 = tcg_temp_new(); 4082 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f); 4083 tcg_gen_shr_tl(cpu_dst, cpu_src1, cpu_tmp0); 4084 } 4085 gen_store_gpr(dc, rd, cpu_dst); 4086 break; 4087 case 0x27: /* sra */ 4088 if (IS_IMM) { /* immediate */ 4089 simm = GET_FIELDs(insn, 20, 31); 4090 tcg_gen_sari_tl(cpu_dst, cpu_src1, simm & 0x1f); 4091 } else { /* register */ 4092 cpu_tmp0 = tcg_temp_new(); 4093 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f); 4094 tcg_gen_sar_tl(cpu_dst, cpu_src1, cpu_tmp0); 4095 } 4096 gen_store_gpr(dc, rd, cpu_dst); 4097 break; 4098 #endif 4099 case 0x30: 4100 { 4101 cpu_tmp0 = tcg_temp_new(); 4102 switch(rd) { 4103 case 0: /* wry */ 4104 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2); 4105 tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff); 4106 break; 4107 #ifndef TARGET_SPARC64 4108 case 0x01 ... 0x0f: /* undefined in the 4109 SPARCv8 manual, nop 4110 on the microSPARC 4111 II */ 4112 case 0x10 ... 0x1f: /* implementation-dependent 4113 in the SPARCv8 4114 manual, nop on the 4115 microSPARC II */ 4116 if ((rd == 0x13) && (dc->def->features & 4117 CPU_FEATURE_POWERDOWN)) { 4118 /* LEON3 power-down */ 4119 save_state(dc); 4120 gen_helper_power_down(tcg_env); 4121 } 4122 break; 4123 #else 4124 case 0x2: /* V9 wrccr */ 4125 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2); 4126 gen_helper_wrccr(tcg_env, cpu_tmp0); 4127 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS); 4128 dc->cc_op = CC_OP_FLAGS; 4129 break; 4130 case 0x3: /* V9 wrasi */ 4131 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2); 4132 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xff); 4133 tcg_gen_st32_tl(cpu_tmp0, tcg_env, 4134 offsetof(CPUSPARCState, asi)); 4135 /* 4136 * End TB to notice changed ASI. 4137 * TODO: Could notice src1 = %g0 and IS_IMM, 4138 * update DisasContext and not exit the TB. 4139 */ 4140 save_state(dc); 4141 gen_op_next_insn(); 4142 tcg_gen_lookup_and_goto_ptr(); 4143 dc->base.is_jmp = DISAS_NORETURN; 4144 break; 4145 case 0x6: /* V9 wrfprs */ 4146 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2); 4147 tcg_gen_trunc_tl_i32(cpu_fprs, cpu_tmp0); 4148 dc->fprs_dirty = 0; 4149 save_state(dc); 4150 gen_op_next_insn(); 4151 tcg_gen_exit_tb(NULL, 0); 4152 dc->base.is_jmp = DISAS_NORETURN; 4153 break; 4154 case 0xf: /* V9 sir, nop if user */ 4155 #if !defined(CONFIG_USER_ONLY) 4156 if (supervisor(dc)) { 4157 ; // XXX 4158 } 4159 #endif 4160 break; 4161 case 0x13: /* Graphics Status */ 4162 if (gen_trap_ifnofpu(dc)) { 4163 goto jmp_insn; 4164 } 4165 tcg_gen_xor_tl(cpu_gsr, cpu_src1, cpu_src2); 4166 break; 4167 case 0x14: /* Softint set */ 4168 if (!supervisor(dc)) 4169 goto illegal_insn; 4170 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2); 4171 gen_helper_set_softint(tcg_env, cpu_tmp0); 4172 break; 4173 case 0x15: /* Softint clear */ 4174 if (!supervisor(dc)) 4175 goto illegal_insn; 4176 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2); 4177 gen_helper_clear_softint(tcg_env, cpu_tmp0); 4178 break; 4179 case 0x16: /* Softint write */ 4180 if (!supervisor(dc)) 4181 goto illegal_insn; 4182 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2); 4183 gen_helper_write_softint(tcg_env, cpu_tmp0); 4184 break; 4185 case 0x17: /* Tick compare */ 4186 #if !defined(CONFIG_USER_ONLY) 4187 if (!supervisor(dc)) 4188 goto illegal_insn; 4189 #endif 4190 { 4191 TCGv_ptr r_tickptr; 4192 4193 tcg_gen_xor_tl(cpu_tick_cmpr, cpu_src1, 4194 cpu_src2); 4195 r_tickptr = tcg_temp_new_ptr(); 4196 tcg_gen_ld_ptr(r_tickptr, tcg_env, 4197 offsetof(CPUSPARCState, tick)); 4198 translator_io_start(&dc->base); 4199 gen_helper_tick_set_limit(r_tickptr, 4200 cpu_tick_cmpr); 4201 /* End TB to handle timer interrupt */ 4202 dc->base.is_jmp = DISAS_EXIT; 4203 } 4204 break; 4205 case 0x18: /* System tick */ 4206 #if !defined(CONFIG_USER_ONLY) 4207 if (!supervisor(dc)) 4208 goto illegal_insn; 4209 #endif 4210 { 4211 TCGv_ptr r_tickptr; 4212 4213 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, 4214 cpu_src2); 4215 r_tickptr = tcg_temp_new_ptr(); 4216 tcg_gen_ld_ptr(r_tickptr, tcg_env, 4217 offsetof(CPUSPARCState, stick)); 4218 translator_io_start(&dc->base); 4219 gen_helper_tick_set_count(r_tickptr, 4220 cpu_tmp0); 4221 /* End TB to handle timer interrupt */ 4222 dc->base.is_jmp = DISAS_EXIT; 4223 } 4224 break; 4225 case 0x19: /* System tick compare */ 4226 #if !defined(CONFIG_USER_ONLY) 4227 if (!supervisor(dc)) 4228 goto illegal_insn; 4229 #endif 4230 { 4231 TCGv_ptr r_tickptr; 4232 4233 tcg_gen_xor_tl(cpu_stick_cmpr, cpu_src1, 4234 cpu_src2); 4235 r_tickptr = tcg_temp_new_ptr(); 4236 tcg_gen_ld_ptr(r_tickptr, tcg_env, 4237 offsetof(CPUSPARCState, stick)); 4238 translator_io_start(&dc->base); 4239 gen_helper_tick_set_limit(r_tickptr, 4240 cpu_stick_cmpr); 4241 /* End TB to handle timer interrupt */ 4242 dc->base.is_jmp = DISAS_EXIT; 4243 } 4244 break; 4245 4246 case 0x10: /* Performance Control */ 4247 case 0x11: /* Performance Instrumentation 4248 Counter */ 4249 case 0x12: /* Dispatch Control */ 4250 #endif 4251 default: 4252 goto illegal_insn; 4253 } 4254 } 4255 break; 4256 #if !defined(CONFIG_USER_ONLY) 4257 case 0x31: /* wrpsr, V9 saved, restored */ 4258 { 4259 if (!supervisor(dc)) 4260 goto priv_insn; 4261 #ifdef TARGET_SPARC64 4262 switch (rd) { 4263 case 0: 4264 gen_helper_saved(tcg_env); 4265 break; 4266 case 1: 4267 gen_helper_restored(tcg_env); 4268 break; 4269 case 2: /* UA2005 allclean */ 4270 case 3: /* UA2005 otherw */ 4271 case 4: /* UA2005 normalw */ 4272 case 5: /* UA2005 invalw */ 4273 // XXX 4274 default: 4275 goto illegal_insn; 4276 } 4277 #else 4278 cpu_tmp0 = tcg_temp_new(); 4279 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2); 4280 gen_helper_wrpsr(tcg_env, cpu_tmp0); 4281 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS); 4282 dc->cc_op = CC_OP_FLAGS; 4283 save_state(dc); 4284 gen_op_next_insn(); 4285 tcg_gen_exit_tb(NULL, 0); 4286 dc->base.is_jmp = DISAS_NORETURN; 4287 #endif 4288 } 4289 break; 4290 case 0x32: /* wrwim, V9 wrpr */ 4291 { 4292 if (!supervisor(dc)) 4293 goto priv_insn; 4294 cpu_tmp0 = tcg_temp_new(); 4295 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2); 4296 #ifdef TARGET_SPARC64 4297 switch (rd) { 4298 case 0: // tpc 4299 { 4300 TCGv_ptr r_tsptr; 4301 4302 r_tsptr = tcg_temp_new_ptr(); 4303 gen_load_trap_state_at_tl(r_tsptr, tcg_env); 4304 tcg_gen_st_tl(cpu_tmp0, r_tsptr, 4305 offsetof(trap_state, tpc)); 4306 } 4307 break; 4308 case 1: // tnpc 4309 { 4310 TCGv_ptr r_tsptr; 4311 4312 r_tsptr = tcg_temp_new_ptr(); 4313 gen_load_trap_state_at_tl(r_tsptr, tcg_env); 4314 tcg_gen_st_tl(cpu_tmp0, r_tsptr, 4315 offsetof(trap_state, tnpc)); 4316 } 4317 break; 4318 case 2: // tstate 4319 { 4320 TCGv_ptr r_tsptr; 4321 4322 r_tsptr = tcg_temp_new_ptr(); 4323 gen_load_trap_state_at_tl(r_tsptr, tcg_env); 4324 tcg_gen_st_tl(cpu_tmp0, r_tsptr, 4325 offsetof(trap_state, 4326 tstate)); 4327 } 4328 break; 4329 case 3: // tt 4330 { 4331 TCGv_ptr r_tsptr; 4332 4333 r_tsptr = tcg_temp_new_ptr(); 4334 gen_load_trap_state_at_tl(r_tsptr, tcg_env); 4335 tcg_gen_st32_tl(cpu_tmp0, r_tsptr, 4336 offsetof(trap_state, tt)); 4337 } 4338 break; 4339 case 4: // tick 4340 { 4341 TCGv_ptr r_tickptr; 4342 4343 r_tickptr = tcg_temp_new_ptr(); 4344 tcg_gen_ld_ptr(r_tickptr, tcg_env, 4345 offsetof(CPUSPARCState, tick)); 4346 translator_io_start(&dc->base); 4347 gen_helper_tick_set_count(r_tickptr, 4348 cpu_tmp0); 4349 /* End TB to handle timer interrupt */ 4350 dc->base.is_jmp = DISAS_EXIT; 4351 } 4352 break; 4353 case 5: // tba 4354 tcg_gen_mov_tl(cpu_tbr, cpu_tmp0); 4355 break; 4356 case 6: // pstate 4357 save_state(dc); 4358 if (translator_io_start(&dc->base)) { 4359 dc->base.is_jmp = DISAS_EXIT; 4360 } 4361 gen_helper_wrpstate(tcg_env, cpu_tmp0); 4362 dc->npc = DYNAMIC_PC; 4363 break; 4364 case 7: // tl 4365 save_state(dc); 4366 tcg_gen_st32_tl(cpu_tmp0, tcg_env, 4367 offsetof(CPUSPARCState, tl)); 4368 dc->npc = DYNAMIC_PC; 4369 break; 4370 case 8: // pil 4371 if (translator_io_start(&dc->base)) { 4372 dc->base.is_jmp = DISAS_EXIT; 4373 } 4374 gen_helper_wrpil(tcg_env, cpu_tmp0); 4375 break; 4376 case 9: // cwp 4377 gen_helper_wrcwp(tcg_env, cpu_tmp0); 4378 break; 4379 case 10: // cansave 4380 tcg_gen_st32_tl(cpu_tmp0, tcg_env, 4381 offsetof(CPUSPARCState, 4382 cansave)); 4383 break; 4384 case 11: // canrestore 4385 tcg_gen_st32_tl(cpu_tmp0, tcg_env, 4386 offsetof(CPUSPARCState, 4387 canrestore)); 4388 break; 4389 case 12: // cleanwin 4390 tcg_gen_st32_tl(cpu_tmp0, tcg_env, 4391 offsetof(CPUSPARCState, 4392 cleanwin)); 4393 break; 4394 case 13: // otherwin 4395 tcg_gen_st32_tl(cpu_tmp0, tcg_env, 4396 offsetof(CPUSPARCState, 4397 otherwin)); 4398 break; 4399 case 14: // wstate 4400 tcg_gen_st32_tl(cpu_tmp0, tcg_env, 4401 offsetof(CPUSPARCState, 4402 wstate)); 4403 break; 4404 case 16: // UA2005 gl 4405 CHECK_IU_FEATURE(dc, GL); 4406 gen_helper_wrgl(tcg_env, cpu_tmp0); 4407 break; 4408 case 26: // UA2005 strand status 4409 CHECK_IU_FEATURE(dc, HYPV); 4410 if (!hypervisor(dc)) 4411 goto priv_insn; 4412 tcg_gen_mov_tl(cpu_ssr, cpu_tmp0); 4413 break; 4414 default: 4415 goto illegal_insn; 4416 } 4417 #else 4418 tcg_gen_trunc_tl_i32(cpu_wim, cpu_tmp0); 4419 if (dc->def->nwindows != 32) { 4420 tcg_gen_andi_tl(cpu_wim, cpu_wim, 4421 (1 << dc->def->nwindows) - 1); 4422 } 4423 #endif 4424 } 4425 break; 4426 case 0x33: /* wrtbr, UA2005 wrhpr */ 4427 { 4428 #ifndef TARGET_SPARC64 4429 if (!supervisor(dc)) 4430 goto priv_insn; 4431 tcg_gen_xor_tl(cpu_tbr, cpu_src1, cpu_src2); 4432 #else 4433 CHECK_IU_FEATURE(dc, HYPV); 4434 if (!hypervisor(dc)) 4435 goto priv_insn; 4436 cpu_tmp0 = tcg_temp_new(); 4437 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2); 4438 switch (rd) { 4439 case 0: // hpstate 4440 tcg_gen_st_i64(cpu_tmp0, tcg_env, 4441 offsetof(CPUSPARCState, 4442 hpstate)); 4443 save_state(dc); 4444 gen_op_next_insn(); 4445 tcg_gen_exit_tb(NULL, 0); 4446 dc->base.is_jmp = DISAS_NORETURN; 4447 break; 4448 case 1: // htstate 4449 // XXX gen_op_wrhtstate(); 4450 break; 4451 case 3: // hintp 4452 tcg_gen_mov_tl(cpu_hintp, cpu_tmp0); 4453 break; 4454 case 5: // htba 4455 tcg_gen_mov_tl(cpu_htba, cpu_tmp0); 4456 break; 4457 case 31: // hstick_cmpr 4458 { 4459 TCGv_ptr r_tickptr; 4460 4461 tcg_gen_mov_tl(cpu_hstick_cmpr, cpu_tmp0); 4462 r_tickptr = tcg_temp_new_ptr(); 4463 tcg_gen_ld_ptr(r_tickptr, tcg_env, 4464 offsetof(CPUSPARCState, hstick)); 4465 translator_io_start(&dc->base); 4466 gen_helper_tick_set_limit(r_tickptr, 4467 cpu_hstick_cmpr); 4468 /* End TB to handle timer interrupt */ 4469 dc->base.is_jmp = DISAS_EXIT; 4470 } 4471 break; 4472 case 6: // hver readonly 4473 default: 4474 goto illegal_insn; 4475 } 4476 #endif 4477 } 4478 break; 4479 #endif 4480 #ifdef TARGET_SPARC64 4481 case 0x2c: /* V9 movcc */ 4482 { 4483 int cc = GET_FIELD_SP(insn, 11, 12); 4484 int cond = GET_FIELD_SP(insn, 14, 17); 4485 DisasCompare cmp; 4486 TCGv dst; 4487 4488 if (insn & (1 << 18)) { 4489 if (cc == 0) { 4490 gen_compare(&cmp, 0, cond, dc); 4491 } else if (cc == 2) { 4492 gen_compare(&cmp, 1, cond, dc); 4493 } else { 4494 goto illegal_insn; 4495 } 4496 } else { 4497 gen_fcompare(&cmp, cc, cond); 4498 } 4499 4500 /* The get_src2 above loaded the normal 13-bit 4501 immediate field, not the 11-bit field we have 4502 in movcc. But it did handle the reg case. */ 4503 if (IS_IMM) { 4504 simm = GET_FIELD_SPs(insn, 0, 10); 4505 tcg_gen_movi_tl(cpu_src2, simm); 4506 } 4507 4508 dst = gen_load_gpr(dc, rd); 4509 tcg_gen_movcond_tl(cmp.cond, dst, 4510 cmp.c1, cmp.c2, 4511 cpu_src2, dst); 4512 gen_store_gpr(dc, rd, dst); 4513 break; 4514 } 4515 case 0x2d: /* V9 sdivx */ 4516 gen_helper_sdivx(cpu_dst, tcg_env, cpu_src1, cpu_src2); 4517 gen_store_gpr(dc, rd, cpu_dst); 4518 break; 4519 case 0x2e: /* V9 popc */ 4520 tcg_gen_ctpop_tl(cpu_dst, cpu_src2); 4521 gen_store_gpr(dc, rd, cpu_dst); 4522 break; 4523 case 0x2f: /* V9 movr */ 4524 { 4525 int cond = GET_FIELD_SP(insn, 10, 12); 4526 DisasCompare cmp; 4527 TCGv dst; 4528 4529 gen_compare_reg(&cmp, cond, cpu_src1); 4530 4531 /* The get_src2 above loaded the normal 13-bit 4532 immediate field, not the 10-bit field we have 4533 in movr. But it did handle the reg case. */ 4534 if (IS_IMM) { 4535 simm = GET_FIELD_SPs(insn, 0, 9); 4536 tcg_gen_movi_tl(cpu_src2, simm); 4537 } 4538 4539 dst = gen_load_gpr(dc, rd); 4540 tcg_gen_movcond_tl(cmp.cond, dst, 4541 cmp.c1, cmp.c2, 4542 cpu_src2, dst); 4543 gen_store_gpr(dc, rd, dst); 4544 break; 4545 } 4546 #endif 4547 default: 4548 goto illegal_insn; 4549 } 4550 } 4551 } else if (xop == 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */ 4552 #ifdef TARGET_SPARC64 4553 int opf = GET_FIELD_SP(insn, 5, 13); 4554 rs1 = GET_FIELD(insn, 13, 17); 4555 rs2 = GET_FIELD(insn, 27, 31); 4556 if (gen_trap_ifnofpu(dc)) { 4557 goto jmp_insn; 4558 } 4559 4560 switch (opf) { 4561 case 0x000: /* VIS I edge8cc */ 4562 CHECK_FPU_FEATURE(dc, VIS1); 4563 cpu_src1 = gen_load_gpr(dc, rs1); 4564 cpu_src2 = gen_load_gpr(dc, rs2); 4565 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 0); 4566 gen_store_gpr(dc, rd, cpu_dst); 4567 break; 4568 case 0x001: /* VIS II edge8n */ 4569 CHECK_FPU_FEATURE(dc, VIS2); 4570 cpu_src1 = gen_load_gpr(dc, rs1); 4571 cpu_src2 = gen_load_gpr(dc, rs2); 4572 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 0); 4573 gen_store_gpr(dc, rd, cpu_dst); 4574 break; 4575 case 0x002: /* VIS I edge8lcc */ 4576 CHECK_FPU_FEATURE(dc, VIS1); 4577 cpu_src1 = gen_load_gpr(dc, rs1); 4578 cpu_src2 = gen_load_gpr(dc, rs2); 4579 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 1); 4580 gen_store_gpr(dc, rd, cpu_dst); 4581 break; 4582 case 0x003: /* VIS II edge8ln */ 4583 CHECK_FPU_FEATURE(dc, VIS2); 4584 cpu_src1 = gen_load_gpr(dc, rs1); 4585 cpu_src2 = gen_load_gpr(dc, rs2); 4586 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 1); 4587 gen_store_gpr(dc, rd, cpu_dst); 4588 break; 4589 case 0x004: /* VIS I edge16cc */ 4590 CHECK_FPU_FEATURE(dc, VIS1); 4591 cpu_src1 = gen_load_gpr(dc, rs1); 4592 cpu_src2 = gen_load_gpr(dc, rs2); 4593 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 0); 4594 gen_store_gpr(dc, rd, cpu_dst); 4595 break; 4596 case 0x005: /* VIS II edge16n */ 4597 CHECK_FPU_FEATURE(dc, VIS2); 4598 cpu_src1 = gen_load_gpr(dc, rs1); 4599 cpu_src2 = gen_load_gpr(dc, rs2); 4600 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 0); 4601 gen_store_gpr(dc, rd, cpu_dst); 4602 break; 4603 case 0x006: /* VIS I edge16lcc */ 4604 CHECK_FPU_FEATURE(dc, VIS1); 4605 cpu_src1 = gen_load_gpr(dc, rs1); 4606 cpu_src2 = gen_load_gpr(dc, rs2); 4607 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 1); 4608 gen_store_gpr(dc, rd, cpu_dst); 4609 break; 4610 case 0x007: /* VIS II edge16ln */ 4611 CHECK_FPU_FEATURE(dc, VIS2); 4612 cpu_src1 = gen_load_gpr(dc, rs1); 4613 cpu_src2 = gen_load_gpr(dc, rs2); 4614 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 1); 4615 gen_store_gpr(dc, rd, cpu_dst); 4616 break; 4617 case 0x008: /* VIS I edge32cc */ 4618 CHECK_FPU_FEATURE(dc, VIS1); 4619 cpu_src1 = gen_load_gpr(dc, rs1); 4620 cpu_src2 = gen_load_gpr(dc, rs2); 4621 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 0); 4622 gen_store_gpr(dc, rd, cpu_dst); 4623 break; 4624 case 0x009: /* VIS II edge32n */ 4625 CHECK_FPU_FEATURE(dc, VIS2); 4626 cpu_src1 = gen_load_gpr(dc, rs1); 4627 cpu_src2 = gen_load_gpr(dc, rs2); 4628 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 0); 4629 gen_store_gpr(dc, rd, cpu_dst); 4630 break; 4631 case 0x00a: /* VIS I edge32lcc */ 4632 CHECK_FPU_FEATURE(dc, VIS1); 4633 cpu_src1 = gen_load_gpr(dc, rs1); 4634 cpu_src2 = gen_load_gpr(dc, rs2); 4635 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 1); 4636 gen_store_gpr(dc, rd, cpu_dst); 4637 break; 4638 case 0x00b: /* VIS II edge32ln */ 4639 CHECK_FPU_FEATURE(dc, VIS2); 4640 cpu_src1 = gen_load_gpr(dc, rs1); 4641 cpu_src2 = gen_load_gpr(dc, rs2); 4642 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 1); 4643 gen_store_gpr(dc, rd, cpu_dst); 4644 break; 4645 case 0x010: /* VIS I array8 */ 4646 CHECK_FPU_FEATURE(dc, VIS1); 4647 cpu_src1 = gen_load_gpr(dc, rs1); 4648 cpu_src2 = gen_load_gpr(dc, rs2); 4649 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2); 4650 gen_store_gpr(dc, rd, cpu_dst); 4651 break; 4652 case 0x012: /* VIS I array16 */ 4653 CHECK_FPU_FEATURE(dc, VIS1); 4654 cpu_src1 = gen_load_gpr(dc, rs1); 4655 cpu_src2 = gen_load_gpr(dc, rs2); 4656 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2); 4657 tcg_gen_shli_i64(cpu_dst, cpu_dst, 1); 4658 gen_store_gpr(dc, rd, cpu_dst); 4659 break; 4660 case 0x014: /* VIS I array32 */ 4661 CHECK_FPU_FEATURE(dc, VIS1); 4662 cpu_src1 = gen_load_gpr(dc, rs1); 4663 cpu_src2 = gen_load_gpr(dc, rs2); 4664 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2); 4665 tcg_gen_shli_i64(cpu_dst, cpu_dst, 2); 4666 gen_store_gpr(dc, rd, cpu_dst); 4667 break; 4668 case 0x018: /* VIS I alignaddr */ 4669 CHECK_FPU_FEATURE(dc, VIS1); 4670 cpu_src1 = gen_load_gpr(dc, rs1); 4671 cpu_src2 = gen_load_gpr(dc, rs2); 4672 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 0); 4673 gen_store_gpr(dc, rd, cpu_dst); 4674 break; 4675 case 0x01a: /* VIS I alignaddrl */ 4676 CHECK_FPU_FEATURE(dc, VIS1); 4677 cpu_src1 = gen_load_gpr(dc, rs1); 4678 cpu_src2 = gen_load_gpr(dc, rs2); 4679 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 1); 4680 gen_store_gpr(dc, rd, cpu_dst); 4681 break; 4682 case 0x019: /* VIS II bmask */ 4683 CHECK_FPU_FEATURE(dc, VIS2); 4684 cpu_src1 = gen_load_gpr(dc, rs1); 4685 cpu_src2 = gen_load_gpr(dc, rs2); 4686 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2); 4687 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, cpu_dst, 32, 32); 4688 gen_store_gpr(dc, rd, cpu_dst); 4689 break; 4690 case 0x020: /* VIS I fcmple16 */ 4691 CHECK_FPU_FEATURE(dc, VIS1); 4692 cpu_src1_64 = gen_load_fpr_D(dc, rs1); 4693 cpu_src2_64 = gen_load_fpr_D(dc, rs2); 4694 gen_helper_fcmple16(cpu_dst, cpu_src1_64, cpu_src2_64); 4695 gen_store_gpr(dc, rd, cpu_dst); 4696 break; 4697 case 0x022: /* VIS I fcmpne16 */ 4698 CHECK_FPU_FEATURE(dc, VIS1); 4699 cpu_src1_64 = gen_load_fpr_D(dc, rs1); 4700 cpu_src2_64 = gen_load_fpr_D(dc, rs2); 4701 gen_helper_fcmpne16(cpu_dst, cpu_src1_64, cpu_src2_64); 4702 gen_store_gpr(dc, rd, cpu_dst); 4703 break; 4704 case 0x024: /* VIS I fcmple32 */ 4705 CHECK_FPU_FEATURE(dc, VIS1); 4706 cpu_src1_64 = gen_load_fpr_D(dc, rs1); 4707 cpu_src2_64 = gen_load_fpr_D(dc, rs2); 4708 gen_helper_fcmple32(cpu_dst, cpu_src1_64, cpu_src2_64); 4709 gen_store_gpr(dc, rd, cpu_dst); 4710 break; 4711 case 0x026: /* VIS I fcmpne32 */ 4712 CHECK_FPU_FEATURE(dc, VIS1); 4713 cpu_src1_64 = gen_load_fpr_D(dc, rs1); 4714 cpu_src2_64 = gen_load_fpr_D(dc, rs2); 4715 gen_helper_fcmpne32(cpu_dst, cpu_src1_64, cpu_src2_64); 4716 gen_store_gpr(dc, rd, cpu_dst); 4717 break; 4718 case 0x028: /* VIS I fcmpgt16 */ 4719 CHECK_FPU_FEATURE(dc, VIS1); 4720 cpu_src1_64 = gen_load_fpr_D(dc, rs1); 4721 cpu_src2_64 = gen_load_fpr_D(dc, rs2); 4722 gen_helper_fcmpgt16(cpu_dst, cpu_src1_64, cpu_src2_64); 4723 gen_store_gpr(dc, rd, cpu_dst); 4724 break; 4725 case 0x02a: /* VIS I fcmpeq16 */ 4726 CHECK_FPU_FEATURE(dc, VIS1); 4727 cpu_src1_64 = gen_load_fpr_D(dc, rs1); 4728 cpu_src2_64 = gen_load_fpr_D(dc, rs2); 4729 gen_helper_fcmpeq16(cpu_dst, cpu_src1_64, cpu_src2_64); 4730 gen_store_gpr(dc, rd, cpu_dst); 4731 break; 4732 case 0x02c: /* VIS I fcmpgt32 */ 4733 CHECK_FPU_FEATURE(dc, VIS1); 4734 cpu_src1_64 = gen_load_fpr_D(dc, rs1); 4735 cpu_src2_64 = gen_load_fpr_D(dc, rs2); 4736 gen_helper_fcmpgt32(cpu_dst, cpu_src1_64, cpu_src2_64); 4737 gen_store_gpr(dc, rd, cpu_dst); 4738 break; 4739 case 0x02e: /* VIS I fcmpeq32 */ 4740 CHECK_FPU_FEATURE(dc, VIS1); 4741 cpu_src1_64 = gen_load_fpr_D(dc, rs1); 4742 cpu_src2_64 = gen_load_fpr_D(dc, rs2); 4743 gen_helper_fcmpeq32(cpu_dst, cpu_src1_64, cpu_src2_64); 4744 gen_store_gpr(dc, rd, cpu_dst); 4745 break; 4746 case 0x031: /* VIS I fmul8x16 */ 4747 CHECK_FPU_FEATURE(dc, VIS1); 4748 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16); 4749 break; 4750 case 0x033: /* VIS I fmul8x16au */ 4751 CHECK_FPU_FEATURE(dc, VIS1); 4752 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16au); 4753 break; 4754 case 0x035: /* VIS I fmul8x16al */ 4755 CHECK_FPU_FEATURE(dc, VIS1); 4756 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16al); 4757 break; 4758 case 0x036: /* VIS I fmul8sux16 */ 4759 CHECK_FPU_FEATURE(dc, VIS1); 4760 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8sux16); 4761 break; 4762 case 0x037: /* VIS I fmul8ulx16 */ 4763 CHECK_FPU_FEATURE(dc, VIS1); 4764 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8ulx16); 4765 break; 4766 case 0x038: /* VIS I fmuld8sux16 */ 4767 CHECK_FPU_FEATURE(dc, VIS1); 4768 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8sux16); 4769 break; 4770 case 0x039: /* VIS I fmuld8ulx16 */ 4771 CHECK_FPU_FEATURE(dc, VIS1); 4772 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8ulx16); 4773 break; 4774 case 0x03a: /* VIS I fpack32 */ 4775 CHECK_FPU_FEATURE(dc, VIS1); 4776 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpack32); 4777 break; 4778 case 0x03b: /* VIS I fpack16 */ 4779 CHECK_FPU_FEATURE(dc, VIS1); 4780 cpu_src1_64 = gen_load_fpr_D(dc, rs2); 4781 cpu_dst_32 = gen_dest_fpr_F(dc); 4782 gen_helper_fpack16(cpu_dst_32, cpu_gsr, cpu_src1_64); 4783 gen_store_fpr_F(dc, rd, cpu_dst_32); 4784 break; 4785 case 0x03d: /* VIS I fpackfix */ 4786 CHECK_FPU_FEATURE(dc, VIS1); 4787 cpu_src1_64 = gen_load_fpr_D(dc, rs2); 4788 cpu_dst_32 = gen_dest_fpr_F(dc); 4789 gen_helper_fpackfix(cpu_dst_32, cpu_gsr, cpu_src1_64); 4790 gen_store_fpr_F(dc, rd, cpu_dst_32); 4791 break; 4792 case 0x03e: /* VIS I pdist */ 4793 CHECK_FPU_FEATURE(dc, VIS1); 4794 gen_ne_fop_DDDD(dc, rd, rs1, rs2, gen_helper_pdist); 4795 break; 4796 case 0x048: /* VIS I faligndata */ 4797 CHECK_FPU_FEATURE(dc, VIS1); 4798 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_faligndata); 4799 break; 4800 case 0x04b: /* VIS I fpmerge */ 4801 CHECK_FPU_FEATURE(dc, VIS1); 4802 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpmerge); 4803 break; 4804 case 0x04c: /* VIS II bshuffle */ 4805 CHECK_FPU_FEATURE(dc, VIS2); 4806 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_bshuffle); 4807 break; 4808 case 0x04d: /* VIS I fexpand */ 4809 CHECK_FPU_FEATURE(dc, VIS1); 4810 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fexpand); 4811 break; 4812 case 0x050: /* VIS I fpadd16 */ 4813 CHECK_FPU_FEATURE(dc, VIS1); 4814 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd16); 4815 break; 4816 case 0x051: /* VIS I fpadd16s */ 4817 CHECK_FPU_FEATURE(dc, VIS1); 4818 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpadd16s); 4819 break; 4820 case 0x052: /* VIS I fpadd32 */ 4821 CHECK_FPU_FEATURE(dc, VIS1); 4822 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd32); 4823 break; 4824 case 0x053: /* VIS I fpadd32s */ 4825 CHECK_FPU_FEATURE(dc, VIS1); 4826 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_add_i32); 4827 break; 4828 case 0x054: /* VIS I fpsub16 */ 4829 CHECK_FPU_FEATURE(dc, VIS1); 4830 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub16); 4831 break; 4832 case 0x055: /* VIS I fpsub16s */ 4833 CHECK_FPU_FEATURE(dc, VIS1); 4834 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpsub16s); 4835 break; 4836 case 0x056: /* VIS I fpsub32 */ 4837 CHECK_FPU_FEATURE(dc, VIS1); 4838 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub32); 4839 break; 4840 case 0x057: /* VIS I fpsub32s */ 4841 CHECK_FPU_FEATURE(dc, VIS1); 4842 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_sub_i32); 4843 break; 4844 case 0x060: /* VIS I fzero */ 4845 CHECK_FPU_FEATURE(dc, VIS1); 4846 cpu_dst_64 = gen_dest_fpr_D(dc, rd); 4847 tcg_gen_movi_i64(cpu_dst_64, 0); 4848 gen_store_fpr_D(dc, rd, cpu_dst_64); 4849 break; 4850 case 0x061: /* VIS I fzeros */ 4851 CHECK_FPU_FEATURE(dc, VIS1); 4852 cpu_dst_32 = gen_dest_fpr_F(dc); 4853 tcg_gen_movi_i32(cpu_dst_32, 0); 4854 gen_store_fpr_F(dc, rd, cpu_dst_32); 4855 break; 4856 case 0x062: /* VIS I fnor */ 4857 CHECK_FPU_FEATURE(dc, VIS1); 4858 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nor_i64); 4859 break; 4860 case 0x063: /* VIS I fnors */ 4861 CHECK_FPU_FEATURE(dc, VIS1); 4862 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nor_i32); 4863 break; 4864 case 0x064: /* VIS I fandnot2 */ 4865 CHECK_FPU_FEATURE(dc, VIS1); 4866 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_andc_i64); 4867 break; 4868 case 0x065: /* VIS I fandnot2s */ 4869 CHECK_FPU_FEATURE(dc, VIS1); 4870 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_andc_i32); 4871 break; 4872 case 0x066: /* VIS I fnot2 */ 4873 CHECK_FPU_FEATURE(dc, VIS1); 4874 gen_ne_fop_DD(dc, rd, rs2, tcg_gen_not_i64); 4875 break; 4876 case 0x067: /* VIS I fnot2s */ 4877 CHECK_FPU_FEATURE(dc, VIS1); 4878 gen_ne_fop_FF(dc, rd, rs2, tcg_gen_not_i32); 4879 break; 4880 case 0x068: /* VIS I fandnot1 */ 4881 CHECK_FPU_FEATURE(dc, VIS1); 4882 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_andc_i64); 4883 break; 4884 case 0x069: /* VIS I fandnot1s */ 4885 CHECK_FPU_FEATURE(dc, VIS1); 4886 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_andc_i32); 4887 break; 4888 case 0x06a: /* VIS I fnot1 */ 4889 CHECK_FPU_FEATURE(dc, VIS1); 4890 gen_ne_fop_DD(dc, rd, rs1, tcg_gen_not_i64); 4891 break; 4892 case 0x06b: /* VIS I fnot1s */ 4893 CHECK_FPU_FEATURE(dc, VIS1); 4894 gen_ne_fop_FF(dc, rd, rs1, tcg_gen_not_i32); 4895 break; 4896 case 0x06c: /* VIS I fxor */ 4897 CHECK_FPU_FEATURE(dc, VIS1); 4898 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_xor_i64); 4899 break; 4900 case 0x06d: /* VIS I fxors */ 4901 CHECK_FPU_FEATURE(dc, VIS1); 4902 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_xor_i32); 4903 break; 4904 case 0x06e: /* VIS I fnand */ 4905 CHECK_FPU_FEATURE(dc, VIS1); 4906 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nand_i64); 4907 break; 4908 case 0x06f: /* VIS I fnands */ 4909 CHECK_FPU_FEATURE(dc, VIS1); 4910 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nand_i32); 4911 break; 4912 case 0x070: /* VIS I fand */ 4913 CHECK_FPU_FEATURE(dc, VIS1); 4914 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_and_i64); 4915 break; 4916 case 0x071: /* VIS I fands */ 4917 CHECK_FPU_FEATURE(dc, VIS1); 4918 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_and_i32); 4919 break; 4920 case 0x072: /* VIS I fxnor */ 4921 CHECK_FPU_FEATURE(dc, VIS1); 4922 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_eqv_i64); 4923 break; 4924 case 0x073: /* VIS I fxnors */ 4925 CHECK_FPU_FEATURE(dc, VIS1); 4926 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_eqv_i32); 4927 break; 4928 case 0x074: /* VIS I fsrc1 */ 4929 CHECK_FPU_FEATURE(dc, VIS1); 4930 cpu_src1_64 = gen_load_fpr_D(dc, rs1); 4931 gen_store_fpr_D(dc, rd, cpu_src1_64); 4932 break; 4933 case 0x075: /* VIS I fsrc1s */ 4934 CHECK_FPU_FEATURE(dc, VIS1); 4935 cpu_src1_32 = gen_load_fpr_F(dc, rs1); 4936 gen_store_fpr_F(dc, rd, cpu_src1_32); 4937 break; 4938 case 0x076: /* VIS I fornot2 */ 4939 CHECK_FPU_FEATURE(dc, VIS1); 4940 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_orc_i64); 4941 break; 4942 case 0x077: /* VIS I fornot2s */ 4943 CHECK_FPU_FEATURE(dc, VIS1); 4944 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_orc_i32); 4945 break; 4946 case 0x078: /* VIS I fsrc2 */ 4947 CHECK_FPU_FEATURE(dc, VIS1); 4948 cpu_src1_64 = gen_load_fpr_D(dc, rs2); 4949 gen_store_fpr_D(dc, rd, cpu_src1_64); 4950 break; 4951 case 0x079: /* VIS I fsrc2s */ 4952 CHECK_FPU_FEATURE(dc, VIS1); 4953 cpu_src1_32 = gen_load_fpr_F(dc, rs2); 4954 gen_store_fpr_F(dc, rd, cpu_src1_32); 4955 break; 4956 case 0x07a: /* VIS I fornot1 */ 4957 CHECK_FPU_FEATURE(dc, VIS1); 4958 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_orc_i64); 4959 break; 4960 case 0x07b: /* VIS I fornot1s */ 4961 CHECK_FPU_FEATURE(dc, VIS1); 4962 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_orc_i32); 4963 break; 4964 case 0x07c: /* VIS I for */ 4965 CHECK_FPU_FEATURE(dc, VIS1); 4966 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_or_i64); 4967 break; 4968 case 0x07d: /* VIS I fors */ 4969 CHECK_FPU_FEATURE(dc, VIS1); 4970 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_or_i32); 4971 break; 4972 case 0x07e: /* VIS I fone */ 4973 CHECK_FPU_FEATURE(dc, VIS1); 4974 cpu_dst_64 = gen_dest_fpr_D(dc, rd); 4975 tcg_gen_movi_i64(cpu_dst_64, -1); 4976 gen_store_fpr_D(dc, rd, cpu_dst_64); 4977 break; 4978 case 0x07f: /* VIS I fones */ 4979 CHECK_FPU_FEATURE(dc, VIS1); 4980 cpu_dst_32 = gen_dest_fpr_F(dc); 4981 tcg_gen_movi_i32(cpu_dst_32, -1); 4982 gen_store_fpr_F(dc, rd, cpu_dst_32); 4983 break; 4984 case 0x080: /* VIS I shutdown */ 4985 case 0x081: /* VIS II siam */ 4986 // XXX 4987 goto illegal_insn; 4988 default: 4989 goto illegal_insn; 4990 } 4991 #else 4992 goto ncp_insn; 4993 #endif 4994 } else if (xop == 0x37) { /* V8 CPop2, V9 impdep2 */ 4995 #ifdef TARGET_SPARC64 4996 goto illegal_insn; 4997 #else 4998 goto ncp_insn; 4999 #endif 5000 #ifdef TARGET_SPARC64 5001 } else if (xop == 0x39) { /* V9 return */ 5002 save_state(dc); 5003 cpu_src1 = get_src1(dc, insn); 5004 cpu_tmp0 = tcg_temp_new(); 5005 if (IS_IMM) { /* immediate */ 5006 simm = GET_FIELDs(insn, 19, 31); 5007 tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm); 5008 } else { /* register */ 5009 rs2 = GET_FIELD(insn, 27, 31); 5010 if (rs2) { 5011 cpu_src2 = gen_load_gpr(dc, rs2); 5012 tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2); 5013 } else { 5014 tcg_gen_mov_tl(cpu_tmp0, cpu_src1); 5015 } 5016 } 5017 gen_check_align(dc, cpu_tmp0, 3); 5018 gen_helper_restore(tcg_env); 5019 gen_mov_pc_npc(dc); 5020 tcg_gen_mov_tl(cpu_npc, cpu_tmp0); 5021 dc->npc = DYNAMIC_PC_LOOKUP; 5022 goto jmp_insn; 5023 #endif 5024 } else { 5025 cpu_src1 = get_src1(dc, insn); 5026 cpu_tmp0 = tcg_temp_new(); 5027 if (IS_IMM) { /* immediate */ 5028 simm = GET_FIELDs(insn, 19, 31); 5029 tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm); 5030 } else { /* register */ 5031 rs2 = GET_FIELD(insn, 27, 31); 5032 if (rs2) { 5033 cpu_src2 = gen_load_gpr(dc, rs2); 5034 tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2); 5035 } else { 5036 tcg_gen_mov_tl(cpu_tmp0, cpu_src1); 5037 } 5038 } 5039 switch (xop) { 5040 case 0x38: /* jmpl */ 5041 { 5042 gen_check_align(dc, cpu_tmp0, 3); 5043 gen_store_gpr(dc, rd, tcg_constant_tl(dc->pc)); 5044 gen_mov_pc_npc(dc); 5045 gen_address_mask(dc, cpu_tmp0); 5046 tcg_gen_mov_tl(cpu_npc, cpu_tmp0); 5047 dc->npc = DYNAMIC_PC_LOOKUP; 5048 } 5049 goto jmp_insn; 5050 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64) 5051 case 0x39: /* rett, V9 return */ 5052 { 5053 if (!supervisor(dc)) 5054 goto priv_insn; 5055 gen_check_align(dc, cpu_tmp0, 3); 5056 gen_mov_pc_npc(dc); 5057 tcg_gen_mov_tl(cpu_npc, cpu_tmp0); 5058 dc->npc = DYNAMIC_PC; 5059 gen_helper_rett(tcg_env); 5060 } 5061 goto jmp_insn; 5062 #endif 5063 case 0x3b: /* flush */ 5064 /* nop */ 5065 break; 5066 case 0x3c: /* save */ 5067 gen_helper_save(tcg_env); 5068 gen_store_gpr(dc, rd, cpu_tmp0); 5069 break; 5070 case 0x3d: /* restore */ 5071 gen_helper_restore(tcg_env); 5072 gen_store_gpr(dc, rd, cpu_tmp0); 5073 break; 5074 #if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64) 5075 case 0x3e: /* V9 done/retry */ 5076 { 5077 switch (rd) { 5078 case 0: 5079 if (!supervisor(dc)) 5080 goto priv_insn; 5081 dc->npc = DYNAMIC_PC; 5082 dc->pc = DYNAMIC_PC; 5083 translator_io_start(&dc->base); 5084 gen_helper_done(tcg_env); 5085 goto jmp_insn; 5086 case 1: 5087 if (!supervisor(dc)) 5088 goto priv_insn; 5089 dc->npc = DYNAMIC_PC; 5090 dc->pc = DYNAMIC_PC; 5091 translator_io_start(&dc->base); 5092 gen_helper_retry(tcg_env); 5093 goto jmp_insn; 5094 default: 5095 goto illegal_insn; 5096 } 5097 } 5098 break; 5099 #endif 5100 default: 5101 goto illegal_insn; 5102 } 5103 } 5104 break; 5105 } 5106 break; 5107 case 3: /* load/store instructions */ 5108 { 5109 unsigned int xop = GET_FIELD(insn, 7, 12); 5110 /* ??? gen_address_mask prevents us from using a source 5111 register directly. Always generate a temporary. */ 5112 TCGv cpu_addr = tcg_temp_new(); 5113 5114 tcg_gen_mov_tl(cpu_addr, get_src1(dc, insn)); 5115 if (xop == 0x3c || xop == 0x3e) { 5116 /* V9 casa/casxa : no offset */ 5117 } else if (IS_IMM) { /* immediate */ 5118 simm = GET_FIELDs(insn, 19, 31); 5119 if (simm != 0) { 5120 tcg_gen_addi_tl(cpu_addr, cpu_addr, simm); 5121 } 5122 } else { /* register */ 5123 rs2 = GET_FIELD(insn, 27, 31); 5124 if (rs2 != 0) { 5125 tcg_gen_add_tl(cpu_addr, cpu_addr, gen_load_gpr(dc, rs2)); 5126 } 5127 } 5128 if (xop < 4 || (xop > 7 && xop < 0x14 && xop != 0x0e) || 5129 (xop > 0x17 && xop <= 0x1d ) || 5130 (xop > 0x2c && xop <= 0x33) || xop == 0x1f || xop == 0x3d) { 5131 TCGv cpu_val = gen_dest_gpr(dc, rd); 5132 5133 switch (xop) { 5134 case 0x0: /* ld, V9 lduw, load unsigned word */ 5135 gen_address_mask(dc, cpu_addr); 5136 tcg_gen_qemu_ld_tl(cpu_val, cpu_addr, 5137 dc->mem_idx, MO_TEUL | MO_ALIGN); 5138 break; 5139 case 0x1: /* ldub, load unsigned byte */ 5140 gen_address_mask(dc, cpu_addr); 5141 tcg_gen_qemu_ld_tl(cpu_val, cpu_addr, 5142 dc->mem_idx, MO_UB); 5143 break; 5144 case 0x2: /* lduh, load unsigned halfword */ 5145 gen_address_mask(dc, cpu_addr); 5146 tcg_gen_qemu_ld_tl(cpu_val, cpu_addr, 5147 dc->mem_idx, MO_TEUW | MO_ALIGN); 5148 break; 5149 case 0x3: /* ldd, load double word */ 5150 if (rd & 1) 5151 goto illegal_insn; 5152 else { 5153 TCGv_i64 t64; 5154 5155 gen_address_mask(dc, cpu_addr); 5156 t64 = tcg_temp_new_i64(); 5157 tcg_gen_qemu_ld_i64(t64, cpu_addr, 5158 dc->mem_idx, MO_TEUQ | MO_ALIGN); 5159 tcg_gen_trunc_i64_tl(cpu_val, t64); 5160 tcg_gen_ext32u_tl(cpu_val, cpu_val); 5161 gen_store_gpr(dc, rd + 1, cpu_val); 5162 tcg_gen_shri_i64(t64, t64, 32); 5163 tcg_gen_trunc_i64_tl(cpu_val, t64); 5164 tcg_gen_ext32u_tl(cpu_val, cpu_val); 5165 } 5166 break; 5167 case 0x9: /* ldsb, load signed byte */ 5168 gen_address_mask(dc, cpu_addr); 5169 tcg_gen_qemu_ld_tl(cpu_val, cpu_addr, dc->mem_idx, MO_SB); 5170 break; 5171 case 0xa: /* ldsh, load signed halfword */ 5172 gen_address_mask(dc, cpu_addr); 5173 tcg_gen_qemu_ld_tl(cpu_val, cpu_addr, 5174 dc->mem_idx, MO_TESW | MO_ALIGN); 5175 break; 5176 case 0xd: /* ldstub */ 5177 gen_ldstub(dc, cpu_val, cpu_addr, dc->mem_idx); 5178 break; 5179 case 0x0f: 5180 /* swap, swap register with memory. Also atomically */ 5181 cpu_src1 = gen_load_gpr(dc, rd); 5182 gen_swap(dc, cpu_val, cpu_src1, cpu_addr, 5183 dc->mem_idx, MO_TEUL); 5184 break; 5185 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64) 5186 case 0x10: /* lda, V9 lduwa, load word alternate */ 5187 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUL); 5188 break; 5189 case 0x11: /* lduba, load unsigned byte alternate */ 5190 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_UB); 5191 break; 5192 case 0x12: /* lduha, load unsigned halfword alternate */ 5193 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUW); 5194 break; 5195 case 0x13: /* ldda, load double word alternate */ 5196 if (rd & 1) { 5197 goto illegal_insn; 5198 } 5199 gen_ldda_asi(dc, cpu_addr, insn, rd); 5200 goto skip_move; 5201 case 0x19: /* ldsba, load signed byte alternate */ 5202 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_SB); 5203 break; 5204 case 0x1a: /* ldsha, load signed halfword alternate */ 5205 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESW); 5206 break; 5207 case 0x1d: /* ldstuba -- XXX: should be atomically */ 5208 gen_ldstub_asi(dc, cpu_val, cpu_addr, insn); 5209 break; 5210 case 0x1f: /* swapa, swap reg with alt. memory. Also 5211 atomically */ 5212 cpu_src1 = gen_load_gpr(dc, rd); 5213 gen_swap_asi(dc, cpu_val, cpu_src1, cpu_addr, insn); 5214 break; 5215 5216 #ifndef TARGET_SPARC64 5217 case 0x30: /* ldc */ 5218 case 0x31: /* ldcsr */ 5219 case 0x33: /* lddc */ 5220 goto ncp_insn; 5221 #endif 5222 #endif 5223 #ifdef TARGET_SPARC64 5224 case 0x08: /* V9 ldsw */ 5225 gen_address_mask(dc, cpu_addr); 5226 tcg_gen_qemu_ld_tl(cpu_val, cpu_addr, 5227 dc->mem_idx, MO_TESL | MO_ALIGN); 5228 break; 5229 case 0x0b: /* V9 ldx */ 5230 gen_address_mask(dc, cpu_addr); 5231 tcg_gen_qemu_ld_tl(cpu_val, cpu_addr, 5232 dc->mem_idx, MO_TEUQ | MO_ALIGN); 5233 break; 5234 case 0x18: /* V9 ldswa */ 5235 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESL); 5236 break; 5237 case 0x1b: /* V9 ldxa */ 5238 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUQ); 5239 break; 5240 case 0x2d: /* V9 prefetch, no effect */ 5241 goto skip_move; 5242 case 0x30: /* V9 ldfa */ 5243 if (gen_trap_ifnofpu(dc)) { 5244 goto jmp_insn; 5245 } 5246 gen_ldf_asi(dc, cpu_addr, insn, 4, rd); 5247 gen_update_fprs_dirty(dc, rd); 5248 goto skip_move; 5249 case 0x33: /* V9 lddfa */ 5250 if (gen_trap_ifnofpu(dc)) { 5251 goto jmp_insn; 5252 } 5253 gen_ldf_asi(dc, cpu_addr, insn, 8, DFPREG(rd)); 5254 gen_update_fprs_dirty(dc, DFPREG(rd)); 5255 goto skip_move; 5256 case 0x3d: /* V9 prefetcha, no effect */ 5257 goto skip_move; 5258 case 0x32: /* V9 ldqfa */ 5259 CHECK_FPU_FEATURE(dc, FLOAT128); 5260 if (gen_trap_ifnofpu(dc)) { 5261 goto jmp_insn; 5262 } 5263 gen_ldf_asi(dc, cpu_addr, insn, 16, QFPREG(rd)); 5264 gen_update_fprs_dirty(dc, QFPREG(rd)); 5265 goto skip_move; 5266 #endif 5267 default: 5268 goto illegal_insn; 5269 } 5270 gen_store_gpr(dc, rd, cpu_val); 5271 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64) 5272 skip_move: ; 5273 #endif 5274 } else if (xop >= 0x20 && xop < 0x24) { 5275 if (gen_trap_ifnofpu(dc)) { 5276 goto jmp_insn; 5277 } 5278 switch (xop) { 5279 case 0x20: /* ldf, load fpreg */ 5280 gen_address_mask(dc, cpu_addr); 5281 cpu_dst_32 = gen_dest_fpr_F(dc); 5282 tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr, 5283 dc->mem_idx, MO_TEUL | MO_ALIGN); 5284 gen_store_fpr_F(dc, rd, cpu_dst_32); 5285 break; 5286 case 0x21: /* ldfsr, V9 ldxfsr */ 5287 #ifdef TARGET_SPARC64 5288 gen_address_mask(dc, cpu_addr); 5289 if (rd == 1) { 5290 TCGv_i64 t64 = tcg_temp_new_i64(); 5291 tcg_gen_qemu_ld_i64(t64, cpu_addr, 5292 dc->mem_idx, MO_TEUQ | MO_ALIGN); 5293 gen_helper_ldxfsr(cpu_fsr, tcg_env, cpu_fsr, t64); 5294 break; 5295 } 5296 #endif 5297 cpu_dst_32 = tcg_temp_new_i32(); 5298 tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr, 5299 dc->mem_idx, MO_TEUL | MO_ALIGN); 5300 gen_helper_ldfsr(cpu_fsr, tcg_env, cpu_fsr, cpu_dst_32); 5301 break; 5302 case 0x22: /* ldqf, load quad fpreg */ 5303 CHECK_FPU_FEATURE(dc, FLOAT128); 5304 gen_address_mask(dc, cpu_addr); 5305 cpu_src1_64 = tcg_temp_new_i64(); 5306 tcg_gen_qemu_ld_i64(cpu_src1_64, cpu_addr, dc->mem_idx, 5307 MO_TEUQ | MO_ALIGN_4); 5308 tcg_gen_addi_tl(cpu_addr, cpu_addr, 8); 5309 cpu_src2_64 = tcg_temp_new_i64(); 5310 tcg_gen_qemu_ld_i64(cpu_src2_64, cpu_addr, dc->mem_idx, 5311 MO_TEUQ | MO_ALIGN_4); 5312 gen_store_fpr_Q(dc, rd, cpu_src1_64, cpu_src2_64); 5313 break; 5314 case 0x23: /* lddf, load double fpreg */ 5315 gen_address_mask(dc, cpu_addr); 5316 cpu_dst_64 = gen_dest_fpr_D(dc, rd); 5317 tcg_gen_qemu_ld_i64(cpu_dst_64, cpu_addr, dc->mem_idx, 5318 MO_TEUQ | MO_ALIGN_4); 5319 gen_store_fpr_D(dc, rd, cpu_dst_64); 5320 break; 5321 default: 5322 goto illegal_insn; 5323 } 5324 } else if (xop < 8 || (xop >= 0x14 && xop < 0x18) || 5325 xop == 0xe || xop == 0x1e) { 5326 TCGv cpu_val = gen_load_gpr(dc, rd); 5327 5328 switch (xop) { 5329 case 0x4: /* st, store word */ 5330 gen_address_mask(dc, cpu_addr); 5331 tcg_gen_qemu_st_tl(cpu_val, cpu_addr, 5332 dc->mem_idx, MO_TEUL | MO_ALIGN); 5333 break; 5334 case 0x5: /* stb, store byte */ 5335 gen_address_mask(dc, cpu_addr); 5336 tcg_gen_qemu_st_tl(cpu_val, cpu_addr, dc->mem_idx, MO_UB); 5337 break; 5338 case 0x6: /* sth, store halfword */ 5339 gen_address_mask(dc, cpu_addr); 5340 tcg_gen_qemu_st_tl(cpu_val, cpu_addr, 5341 dc->mem_idx, MO_TEUW | MO_ALIGN); 5342 break; 5343 case 0x7: /* std, store double word */ 5344 if (rd & 1) 5345 goto illegal_insn; 5346 else { 5347 TCGv_i64 t64; 5348 TCGv lo; 5349 5350 gen_address_mask(dc, cpu_addr); 5351 lo = gen_load_gpr(dc, rd + 1); 5352 t64 = tcg_temp_new_i64(); 5353 tcg_gen_concat_tl_i64(t64, lo, cpu_val); 5354 tcg_gen_qemu_st_i64(t64, cpu_addr, 5355 dc->mem_idx, MO_TEUQ | MO_ALIGN); 5356 } 5357 break; 5358 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64) 5359 case 0x14: /* sta, V9 stwa, store word alternate */ 5360 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUL); 5361 break; 5362 case 0x15: /* stba, store byte alternate */ 5363 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_UB); 5364 break; 5365 case 0x16: /* stha, store halfword alternate */ 5366 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUW); 5367 break; 5368 case 0x17: /* stda, store double word alternate */ 5369 if (rd & 1) { 5370 goto illegal_insn; 5371 } 5372 gen_stda_asi(dc, cpu_val, cpu_addr, insn, rd); 5373 break; 5374 #endif 5375 #ifdef TARGET_SPARC64 5376 case 0x0e: /* V9 stx */ 5377 gen_address_mask(dc, cpu_addr); 5378 tcg_gen_qemu_st_tl(cpu_val, cpu_addr, 5379 dc->mem_idx, MO_TEUQ | MO_ALIGN); 5380 break; 5381 case 0x1e: /* V9 stxa */ 5382 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUQ); 5383 break; 5384 #endif 5385 default: 5386 goto illegal_insn; 5387 } 5388 } else if (xop > 0x23 && xop < 0x28) { 5389 if (gen_trap_ifnofpu(dc)) { 5390 goto jmp_insn; 5391 } 5392 switch (xop) { 5393 case 0x24: /* stf, store fpreg */ 5394 gen_address_mask(dc, cpu_addr); 5395 cpu_src1_32 = gen_load_fpr_F(dc, rd); 5396 tcg_gen_qemu_st_i32(cpu_src1_32, cpu_addr, 5397 dc->mem_idx, MO_TEUL | MO_ALIGN); 5398 break; 5399 case 0x25: /* stfsr, V9 stxfsr */ 5400 { 5401 #ifdef TARGET_SPARC64 5402 gen_address_mask(dc, cpu_addr); 5403 if (rd == 1) { 5404 tcg_gen_qemu_st_tl(cpu_fsr, cpu_addr, 5405 dc->mem_idx, MO_TEUQ | MO_ALIGN); 5406 break; 5407 } 5408 #endif 5409 tcg_gen_qemu_st_tl(cpu_fsr, cpu_addr, 5410 dc->mem_idx, MO_TEUL | MO_ALIGN); 5411 } 5412 break; 5413 case 0x26: 5414 #ifdef TARGET_SPARC64 5415 /* V9 stqf, store quad fpreg */ 5416 CHECK_FPU_FEATURE(dc, FLOAT128); 5417 gen_address_mask(dc, cpu_addr); 5418 /* ??? While stqf only requires 4-byte alignment, it is 5419 legal for the cpu to signal the unaligned exception. 5420 The OS trap handler is then required to fix it up. 5421 For qemu, this avoids having to probe the second page 5422 before performing the first write. */ 5423 cpu_src1_64 = gen_load_fpr_Q0(dc, rd); 5424 tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr, 5425 dc->mem_idx, MO_TEUQ | MO_ALIGN_16); 5426 tcg_gen_addi_tl(cpu_addr, cpu_addr, 8); 5427 cpu_src2_64 = gen_load_fpr_Q1(dc, rd); 5428 tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr, 5429 dc->mem_idx, MO_TEUQ); 5430 break; 5431 #else /* !TARGET_SPARC64 */ 5432 /* stdfq, store floating point queue */ 5433 #if defined(CONFIG_USER_ONLY) 5434 goto illegal_insn; 5435 #else 5436 if (!supervisor(dc)) 5437 goto priv_insn; 5438 if (gen_trap_ifnofpu(dc)) { 5439 goto jmp_insn; 5440 } 5441 goto nfq_insn; 5442 #endif 5443 #endif 5444 case 0x27: /* stdf, store double fpreg */ 5445 gen_address_mask(dc, cpu_addr); 5446 cpu_src1_64 = gen_load_fpr_D(dc, rd); 5447 tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr, dc->mem_idx, 5448 MO_TEUQ | MO_ALIGN_4); 5449 break; 5450 default: 5451 goto illegal_insn; 5452 } 5453 } else if (xop > 0x33 && xop < 0x3f) { 5454 switch (xop) { 5455 #ifdef TARGET_SPARC64 5456 case 0x34: /* V9 stfa */ 5457 if (gen_trap_ifnofpu(dc)) { 5458 goto jmp_insn; 5459 } 5460 gen_stf_asi(dc, cpu_addr, insn, 4, rd); 5461 break; 5462 case 0x36: /* V9 stqfa */ 5463 { 5464 CHECK_FPU_FEATURE(dc, FLOAT128); 5465 if (gen_trap_ifnofpu(dc)) { 5466 goto jmp_insn; 5467 } 5468 gen_stf_asi(dc, cpu_addr, insn, 16, QFPREG(rd)); 5469 } 5470 break; 5471 case 0x37: /* V9 stdfa */ 5472 if (gen_trap_ifnofpu(dc)) { 5473 goto jmp_insn; 5474 } 5475 gen_stf_asi(dc, cpu_addr, insn, 8, DFPREG(rd)); 5476 break; 5477 case 0x3e: /* V9 casxa */ 5478 rs2 = GET_FIELD(insn, 27, 31); 5479 cpu_src2 = gen_load_gpr(dc, rs2); 5480 gen_casx_asi(dc, cpu_addr, cpu_src2, insn, rd); 5481 break; 5482 #else 5483 case 0x34: /* stc */ 5484 case 0x35: /* stcsr */ 5485 case 0x36: /* stdcq */ 5486 case 0x37: /* stdc */ 5487 goto ncp_insn; 5488 #endif 5489 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64) 5490 case 0x3c: /* V9 or LEON3 casa */ 5491 #ifndef TARGET_SPARC64 5492 CHECK_IU_FEATURE(dc, CASA); 5493 #endif 5494 rs2 = GET_FIELD(insn, 27, 31); 5495 cpu_src2 = gen_load_gpr(dc, rs2); 5496 gen_cas_asi(dc, cpu_addr, cpu_src2, insn, rd); 5497 break; 5498 #endif 5499 default: 5500 goto illegal_insn; 5501 } 5502 } else { 5503 goto illegal_insn; 5504 } 5505 } 5506 break; 5507 } 5508 advance_pc(dc); 5509 jmp_insn: 5510 return; 5511 illegal_insn: 5512 gen_exception(dc, TT_ILL_INSN); 5513 return; 5514 #if !defined(CONFIG_USER_ONLY) 5515 priv_insn: 5516 gen_exception(dc, TT_PRIV_INSN); 5517 return; 5518 #endif 5519 nfpu_insn: 5520 gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP); 5521 return; 5522 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64) 5523 nfq_insn: 5524 gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR); 5525 return; 5526 #endif 5527 #ifndef TARGET_SPARC64 5528 ncp_insn: 5529 gen_exception(dc, TT_NCP_INSN); 5530 return; 5531 #endif 5532 } 5533 5534 static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) 5535 { 5536 DisasContext *dc = container_of(dcbase, DisasContext, base); 5537 CPUSPARCState *env = cpu_env(cs); 5538 int bound; 5539 5540 dc->pc = dc->base.pc_first; 5541 dc->npc = (target_ulong)dc->base.tb->cs_base; 5542 dc->cc_op = CC_OP_DYNAMIC; 5543 dc->mem_idx = dc->base.tb->flags & TB_FLAG_MMU_MASK; 5544 dc->def = &env->def; 5545 dc->fpu_enabled = tb_fpu_enabled(dc->base.tb->flags); 5546 dc->address_mask_32bit = tb_am_enabled(dc->base.tb->flags); 5547 #ifndef CONFIG_USER_ONLY 5548 dc->supervisor = (dc->base.tb->flags & TB_FLAG_SUPER) != 0; 5549 #endif 5550 #ifdef TARGET_SPARC64 5551 dc->fprs_dirty = 0; 5552 dc->asi = (dc->base.tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff; 5553 #ifndef CONFIG_USER_ONLY 5554 dc->hypervisor = (dc->base.tb->flags & TB_FLAG_HYPER) != 0; 5555 #endif 5556 #endif 5557 /* 5558 * if we reach a page boundary, we stop generation so that the 5559 * PC of a TT_TFAULT exception is always in the right page 5560 */ 5561 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4; 5562 dc->base.max_insns = MIN(dc->base.max_insns, bound); 5563 } 5564 5565 static void sparc_tr_tb_start(DisasContextBase *db, CPUState *cs) 5566 { 5567 } 5568 5569 static void sparc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs) 5570 { 5571 DisasContext *dc = container_of(dcbase, DisasContext, base); 5572 target_ulong npc = dc->npc; 5573 5574 if (npc & 3) { 5575 switch (npc) { 5576 case JUMP_PC: 5577 assert(dc->jump_pc[1] == dc->pc + 4); 5578 npc = dc->jump_pc[0] | JUMP_PC; 5579 break; 5580 case DYNAMIC_PC: 5581 case DYNAMIC_PC_LOOKUP: 5582 npc = DYNAMIC_PC; 5583 break; 5584 default: 5585 g_assert_not_reached(); 5586 } 5587 } 5588 tcg_gen_insn_start(dc->pc, npc); 5589 } 5590 5591 static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) 5592 { 5593 DisasContext *dc = container_of(dcbase, DisasContext, base); 5594 CPUSPARCState *env = cpu_env(cs); 5595 unsigned int insn; 5596 5597 insn = translator_ldl(env, &dc->base, dc->pc); 5598 dc->base.pc_next += 4; 5599 5600 if (!decode(dc, insn)) { 5601 disas_sparc_legacy(dc, insn); 5602 } 5603 5604 if (dc->base.is_jmp == DISAS_NORETURN) { 5605 return; 5606 } 5607 if (dc->pc != dc->base.pc_next) { 5608 dc->base.is_jmp = DISAS_TOO_MANY; 5609 } 5610 } 5611 5612 static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) 5613 { 5614 DisasContext *dc = container_of(dcbase, DisasContext, base); 5615 DisasDelayException *e, *e_next; 5616 bool may_lookup; 5617 5618 switch (dc->base.is_jmp) { 5619 case DISAS_NEXT: 5620 case DISAS_TOO_MANY: 5621 if (((dc->pc | dc->npc) & 3) == 0) { 5622 /* static PC and NPC: we can use direct chaining */ 5623 gen_goto_tb(dc, 0, dc->pc, dc->npc); 5624 break; 5625 } 5626 5627 may_lookup = true; 5628 if (dc->pc & 3) { 5629 switch (dc->pc) { 5630 case DYNAMIC_PC_LOOKUP: 5631 break; 5632 case DYNAMIC_PC: 5633 may_lookup = false; 5634 break; 5635 default: 5636 g_assert_not_reached(); 5637 } 5638 } else { 5639 tcg_gen_movi_tl(cpu_pc, dc->pc); 5640 } 5641 5642 if (dc->npc & 3) { 5643 switch (dc->npc) { 5644 case JUMP_PC: 5645 gen_generic_branch(dc); 5646 break; 5647 case DYNAMIC_PC: 5648 may_lookup = false; 5649 break; 5650 case DYNAMIC_PC_LOOKUP: 5651 break; 5652 default: 5653 g_assert_not_reached(); 5654 } 5655 } else { 5656 tcg_gen_movi_tl(cpu_npc, dc->npc); 5657 } 5658 if (may_lookup) { 5659 tcg_gen_lookup_and_goto_ptr(); 5660 } else { 5661 tcg_gen_exit_tb(NULL, 0); 5662 } 5663 break; 5664 5665 case DISAS_NORETURN: 5666 break; 5667 5668 case DISAS_EXIT: 5669 /* Exit TB */ 5670 save_state(dc); 5671 tcg_gen_exit_tb(NULL, 0); 5672 break; 5673 5674 default: 5675 g_assert_not_reached(); 5676 } 5677 5678 for (e = dc->delay_excp_list; e ; e = e_next) { 5679 gen_set_label(e->lab); 5680 5681 tcg_gen_movi_tl(cpu_pc, e->pc); 5682 if (e->npc % 4 == 0) { 5683 tcg_gen_movi_tl(cpu_npc, e->npc); 5684 } 5685 gen_helper_raise_exception(tcg_env, e->excp); 5686 5687 e_next = e->next; 5688 g_free(e); 5689 } 5690 } 5691 5692 static void sparc_tr_disas_log(const DisasContextBase *dcbase, 5693 CPUState *cpu, FILE *logfile) 5694 { 5695 fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first)); 5696 target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size); 5697 } 5698 5699 static const TranslatorOps sparc_tr_ops = { 5700 .init_disas_context = sparc_tr_init_disas_context, 5701 .tb_start = sparc_tr_tb_start, 5702 .insn_start = sparc_tr_insn_start, 5703 .translate_insn = sparc_tr_translate_insn, 5704 .tb_stop = sparc_tr_tb_stop, 5705 .disas_log = sparc_tr_disas_log, 5706 }; 5707 5708 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns, 5709 target_ulong pc, void *host_pc) 5710 { 5711 DisasContext dc = {}; 5712 5713 translator_loop(cs, tb, max_insns, pc, host_pc, &sparc_tr_ops, &dc.base); 5714 } 5715 5716 void sparc_tcg_init(void) 5717 { 5718 static const char gregnames[32][4] = { 5719 "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7", 5720 "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7", 5721 "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7", 5722 "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7", 5723 }; 5724 static const char fregnames[32][4] = { 5725 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14", 5726 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30", 5727 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46", 5728 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62", 5729 }; 5730 5731 static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = { 5732 #ifdef TARGET_SPARC64 5733 { &cpu_xcc, offsetof(CPUSPARCState, xcc), "xcc" }, 5734 { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" }, 5735 #else 5736 { &cpu_wim, offsetof(CPUSPARCState, wim), "wim" }, 5737 #endif 5738 { &cpu_cc_op, offsetof(CPUSPARCState, cc_op), "cc_op" }, 5739 { &cpu_psr, offsetof(CPUSPARCState, psr), "psr" }, 5740 }; 5741 5742 static const struct { TCGv *ptr; int off; const char *name; } rtl[] = { 5743 #ifdef TARGET_SPARC64 5744 { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" }, 5745 { &cpu_tick_cmpr, offsetof(CPUSPARCState, tick_cmpr), "tick_cmpr" }, 5746 { &cpu_stick_cmpr, offsetof(CPUSPARCState, stick_cmpr), "stick_cmpr" }, 5747 { &cpu_hstick_cmpr, offsetof(CPUSPARCState, hstick_cmpr), 5748 "hstick_cmpr" }, 5749 { &cpu_hintp, offsetof(CPUSPARCState, hintp), "hintp" }, 5750 { &cpu_htba, offsetof(CPUSPARCState, htba), "htba" }, 5751 { &cpu_hver, offsetof(CPUSPARCState, hver), "hver" }, 5752 { &cpu_ssr, offsetof(CPUSPARCState, ssr), "ssr" }, 5753 { &cpu_ver, offsetof(CPUSPARCState, version), "ver" }, 5754 #endif 5755 { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" }, 5756 { &cpu_cc_src, offsetof(CPUSPARCState, cc_src), "cc_src" }, 5757 { &cpu_cc_src2, offsetof(CPUSPARCState, cc_src2), "cc_src2" }, 5758 { &cpu_cc_dst, offsetof(CPUSPARCState, cc_dst), "cc_dst" }, 5759 { &cpu_fsr, offsetof(CPUSPARCState, fsr), "fsr" }, 5760 { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" }, 5761 { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" }, 5762 { &cpu_y, offsetof(CPUSPARCState, y), "y" }, 5763 #ifndef CONFIG_USER_ONLY 5764 { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" }, 5765 #endif 5766 }; 5767 5768 unsigned int i; 5769 5770 cpu_regwptr = tcg_global_mem_new_ptr(tcg_env, 5771 offsetof(CPUSPARCState, regwptr), 5772 "regwptr"); 5773 5774 for (i = 0; i < ARRAY_SIZE(r32); ++i) { 5775 *r32[i].ptr = tcg_global_mem_new_i32(tcg_env, r32[i].off, r32[i].name); 5776 } 5777 5778 for (i = 0; i < ARRAY_SIZE(rtl); ++i) { 5779 *rtl[i].ptr = tcg_global_mem_new(tcg_env, rtl[i].off, rtl[i].name); 5780 } 5781 5782 cpu_regs[0] = NULL; 5783 for (i = 1; i < 8; ++i) { 5784 cpu_regs[i] = tcg_global_mem_new(tcg_env, 5785 offsetof(CPUSPARCState, gregs[i]), 5786 gregnames[i]); 5787 } 5788 5789 for (i = 8; i < 32; ++i) { 5790 cpu_regs[i] = tcg_global_mem_new(cpu_regwptr, 5791 (i - 8) * sizeof(target_ulong), 5792 gregnames[i]); 5793 } 5794 5795 for (i = 0; i < TARGET_DPREGS; i++) { 5796 cpu_fpr[i] = tcg_global_mem_new_i64(tcg_env, 5797 offsetof(CPUSPARCState, fpr[i]), 5798 fregnames[i]); 5799 } 5800 } 5801 5802 void sparc_restore_state_to_opc(CPUState *cs, 5803 const TranslationBlock *tb, 5804 const uint64_t *data) 5805 { 5806 SPARCCPU *cpu = SPARC_CPU(cs); 5807 CPUSPARCState *env = &cpu->env; 5808 target_ulong pc = data[0]; 5809 target_ulong npc = data[1]; 5810 5811 env->pc = pc; 5812 if (npc == DYNAMIC_PC) { 5813 /* dynamic NPC: already stored */ 5814 } else if (npc & JUMP_PC) { 5815 /* jump PC: use 'cond' and the jump targets of the translation */ 5816 if (env->cond) { 5817 env->npc = npc & ~3; 5818 } else { 5819 env->npc = pc + 4; 5820 } 5821 } else { 5822 env->npc = npc; 5823 } 5824 } 5825