translate.c (2da789ded5a89dff4a3485bc486b21278f8b61d3) translate.c (428881deba62aa8fd5ef9248deba79594f70615a)
1/*
2 SPARC translation
3
4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
6
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public

--- 3999 unchanged lines hidden (view full) ---

4008 * In the meantime, treat all 32-bit cpus like sparcv7.
4009 */
4010 if (avail_32(dc)) {
4011 return advance_pc(dc);
4012 }
4013 return false;
4014}
4015
1/*
2 SPARC translation
3
4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
6
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public

--- 3999 unchanged lines hidden (view full) ---

4008 * In the meantime, treat all 32-bit cpus like sparcv7.
4009 */
4010 if (avail_32(dc)) {
4011 return advance_pc(dc);
4012 }
4013 return false;
4014}
4015
4016static bool do_arith_int(DisasContext *dc, arg_r_r_ri_cc *a, int cc_op,
4017 void (*func)(TCGv, TCGv, TCGv),
4018 void (*funci)(TCGv, TCGv, target_long))
4019{
4020 TCGv dst, src1;
4021
4022 /* For simplicity, we under-decoded the rs2 form. */
4023 if (!a->imm && a->rs2_or_imm & ~0x1f) {
4024 return false;
4025 }
4026
4027 if (a->cc) {
4028 dst = cpu_cc_dst;
4029 } else {
4030 dst = gen_dest_gpr(dc, a->rd);
4031 }
4032 src1 = gen_load_gpr(dc, a->rs1);
4033
4034 if (a->imm || a->rs2_or_imm == 0) {
4035 if (funci) {
4036 funci(dst, src1, a->rs2_or_imm);
4037 } else {
4038 func(dst, src1, tcg_constant_tl(a->rs2_or_imm));
4039 }
4040 } else {
4041 func(dst, src1, cpu_regs[a->rs2_or_imm]);
4042 }
4043 gen_store_gpr(dc, a->rd, dst);
4044
4045 if (a->cc) {
4046 tcg_gen_movi_i32(cpu_cc_op, cc_op);
4047 dc->cc_op = cc_op;
4048 }
4049 return advance_pc(dc);
4050}
4051
4052static bool do_arith(DisasContext *dc, arg_r_r_ri_cc *a, int cc_op,
4053 void (*func)(TCGv, TCGv, TCGv),
4054 void (*funci)(TCGv, TCGv, target_long),
4055 void (*func_cc)(TCGv, TCGv, TCGv))
4056{
4057 if (a->cc) {
4058 return do_arith_int(dc, a, cc_op, func_cc, NULL);
4059 }
4060 return do_arith_int(dc, a, cc_op, func, funci);
4061}
4062
4063static bool do_logic(DisasContext *dc, arg_r_r_ri_cc *a,
4064 void (*func)(TCGv, TCGv, TCGv),
4065 void (*funci)(TCGv, TCGv, target_long))
4066{
4067 return do_arith_int(dc, a, CC_OP_LOGIC, func, funci);
4068}
4069
4070TRANS(ADD, ALL, do_arith, a, CC_OP_ADD,
4071 tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_add_cc)
4072TRANS(SUB, ALL, do_arith, a, CC_OP_SUB,
4073 tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_sub_cc)
4074
4075TRANS(AND, ALL, do_logic, a, tcg_gen_and_tl, tcg_gen_andi_tl)
4076TRANS(XOR, ALL, do_logic, a, tcg_gen_xor_tl, tcg_gen_xori_tl)
4077TRANS(ANDN, ALL, do_logic, a, tcg_gen_andc_tl, NULL)
4078TRANS(ORN, ALL, do_logic, a, tcg_gen_orc_tl, NULL)
4079TRANS(XORN, ALL, do_logic, a, tcg_gen_eqv_tl, NULL)
4080
4081static bool trans_OR(DisasContext *dc, arg_r_r_ri_cc *a)
4082{
4083 /* OR with %g0 is the canonical alias for MOV. */
4084 if (!a->cc && a->rs1 == 0) {
4085 if (a->imm || a->rs2_or_imm == 0) {
4086 gen_store_gpr(dc, a->rd, tcg_constant_tl(a->rs2_or_imm));
4087 } else if (a->rs2_or_imm & ~0x1f) {
4088 /* For simplicity, we under-decoded the rs2 form. */
4089 return false;
4090 } else {
4091 gen_store_gpr(dc, a->rd, cpu_regs[a->rs2_or_imm]);
4092 }
4093 return advance_pc(dc);
4094 }
4095 return do_logic(dc, a, tcg_gen_or_tl, tcg_gen_ori_tl);
4096}
4097
4016#define CHECK_IU_FEATURE(dc, FEATURE) \
4017 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
4018 goto illegal_insn;
4019#define CHECK_FPU_FEATURE(dc, FEATURE) \
4020 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
4021 goto nfpu_insn;
4022
4023/* before an instruction, dc->pc must be static */

--- 332 unchanged lines hidden (view full) ---

4356 CHECK_FPU_FEATURE(dc, FLOAT128);
4357 gen_op_load_fpr_QT0(QFPREG(rs1));
4358 gen_op_load_fpr_QT1(QFPREG(rs2));
4359 gen_op_fcmpeq(rd & 3);
4360 break;
4361 default:
4362 goto illegal_insn;
4363 }
4098#define CHECK_IU_FEATURE(dc, FEATURE) \
4099 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
4100 goto illegal_insn;
4101#define CHECK_FPU_FEATURE(dc, FEATURE) \
4102 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
4103 goto nfpu_insn;
4104
4105/* before an instruction, dc->pc must be static */

--- 332 unchanged lines hidden (view full) ---

4438 CHECK_FPU_FEATURE(dc, FLOAT128);
4439 gen_op_load_fpr_QT0(QFPREG(rs1));
4440 gen_op_load_fpr_QT1(QFPREG(rs2));
4441 gen_op_fcmpeq(rd & 3);
4442 break;
4443 default:
4444 goto illegal_insn;
4445 }
4364 } else if (xop == 0x2) {
4365 TCGv dst = gen_dest_gpr(dc, rd);
4366 rs1 = GET_FIELD(insn, 13, 17);
4367 if (rs1 == 0) {
4368 /* clr/mov shortcut : or %g0, x, y -> mov x, y */
4369 if (IS_IMM) { /* immediate */
4370 simm = GET_FIELDs(insn, 19, 31);
4371 tcg_gen_movi_tl(dst, simm);
4372 gen_store_gpr(dc, rd, dst);
4373 } else { /* register */
4374 rs2 = GET_FIELD(insn, 27, 31);
4375 if (rs2 == 0) {
4376 tcg_gen_movi_tl(dst, 0);
4377 gen_store_gpr(dc, rd, dst);
4378 } else {
4379 cpu_src2 = gen_load_gpr(dc, rs2);
4380 gen_store_gpr(dc, rd, cpu_src2);
4381 }
4382 }
4383 } else {
4384 cpu_src1 = get_src1(dc, insn);
4385 if (IS_IMM) { /* immediate */
4386 simm = GET_FIELDs(insn, 19, 31);
4387 tcg_gen_ori_tl(dst, cpu_src1, simm);
4388 gen_store_gpr(dc, rd, dst);
4389 } else { /* register */
4390 rs2 = GET_FIELD(insn, 27, 31);
4391 if (rs2 == 0) {
4392 /* mov shortcut: or x, %g0, y -> mov x, y */
4393 gen_store_gpr(dc, rd, cpu_src1);
4394 } else {
4395 cpu_src2 = gen_load_gpr(dc, rs2);
4396 tcg_gen_or_tl(dst, cpu_src1, cpu_src2);
4397 gen_store_gpr(dc, rd, dst);
4398 }
4399 }
4400 }
4401#ifdef TARGET_SPARC64
4402 } else if (xop == 0x25) { /* sll, V9 sllx */
4403 cpu_src1 = get_src1(dc, insn);
4404 if (IS_IMM) { /* immediate */
4405 simm = GET_FIELDs(insn, 20, 31);
4406 if (insn & (1 << 12)) {
4407 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x3f);
4408 } else {

--- 60 unchanged lines hidden (view full) ---

4469 }
4470 gen_store_gpr(dc, rd, cpu_dst);
4471#endif
4472 } else if (xop < 0x36) {
4473 if (xop < 0x20) {
4474 cpu_src1 = get_src1(dc, insn);
4475 cpu_src2 = get_src2(dc, insn);
4476 switch (xop & ~0x10) {
4446#ifdef TARGET_SPARC64
4447 } else if (xop == 0x25) { /* sll, V9 sllx */
4448 cpu_src1 = get_src1(dc, insn);
4449 if (IS_IMM) { /* immediate */
4450 simm = GET_FIELDs(insn, 20, 31);
4451 if (insn & (1 << 12)) {
4452 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x3f);
4453 } else {

--- 60 unchanged lines hidden (view full) ---

4514 }
4515 gen_store_gpr(dc, rd, cpu_dst);
4516#endif
4517 } else if (xop < 0x36) {
4518 if (xop < 0x20) {
4519 cpu_src1 = get_src1(dc, insn);
4520 cpu_src2 = get_src2(dc, insn);
4521 switch (xop & ~0x10) {
4477 case 0x0: /* add */
4478 if (xop & 0x10) {
4479 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
4480 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
4481 dc->cc_op = CC_OP_ADD;
4482 } else {
4483 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
4484 }
4485 break;
4486 case 0x1: /* and */
4487 tcg_gen_and_tl(cpu_dst, cpu_src1, cpu_src2);
4488 if (xop & 0x10) {
4489 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4490 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4491 dc->cc_op = CC_OP_LOGIC;
4492 }
4493 break;
4494 case 0x2: /* or */
4495 tcg_gen_or_tl(cpu_dst, cpu_src1, cpu_src2);
4496 if (xop & 0x10) {
4497 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4498 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4499 dc->cc_op = CC_OP_LOGIC;
4500 }
4501 break;
4502 case 0x3: /* xor */
4503 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
4504 if (xop & 0x10) {
4505 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4506 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4507 dc->cc_op = CC_OP_LOGIC;
4508 }
4509 break;
4510 case 0x4: /* sub */
4511 if (xop & 0x10) {
4512 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
4513 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
4514 dc->cc_op = CC_OP_SUB;
4515 } else {
4516 tcg_gen_sub_tl(cpu_dst, cpu_src1, cpu_src2);
4517 }
4518 break;
4519 case 0x5: /* andn */
4520 tcg_gen_andc_tl(cpu_dst, cpu_src1, cpu_src2);
4521 if (xop & 0x10) {
4522 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4523 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4524 dc->cc_op = CC_OP_LOGIC;
4525 }
4526 break;
4527 case 0x6: /* orn */
4528 tcg_gen_orc_tl(cpu_dst, cpu_src1, cpu_src2);
4529 if (xop & 0x10) {
4530 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4531 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4532 dc->cc_op = CC_OP_LOGIC;
4533 }
4534 break;
4535 case 0x7: /* xorn */
4536 tcg_gen_eqv_tl(cpu_dst, cpu_src1, cpu_src2);
4537 if (xop & 0x10) {
4538 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4539 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4540 dc->cc_op = CC_OP_LOGIC;
4541 }
4542 break;
4543 case 0x8: /* addx, V9 addc */
4544 gen_op_addx_int(dc, cpu_dst, cpu_src1, cpu_src2,
4545 (xop & 0x10));
4546 break;
4547#ifdef TARGET_SPARC64
4548 case 0x9: /* V9 mulx */
4549 tcg_gen_mul_i64(cpu_dst, cpu_src1, cpu_src2);
4550 break;

--- 1462 unchanged lines hidden ---
4522 case 0x8: /* addx, V9 addc */
4523 gen_op_addx_int(dc, cpu_dst, cpu_src1, cpu_src2,
4524 (xop & 0x10));
4525 break;
4526#ifdef TARGET_SPARC64
4527 case 0x9: /* V9 mulx */
4528 tcg_gen_mul_i64(cpu_dst, cpu_src1, cpu_src2);
4529 break;

--- 1462 unchanged lines hidden ---