1/* 2 * Power ISA decode for Fixed-Point Facility instructions 3 * 4 * Copyright (c) 2021 Instituto de Pesquisas Eldorado (eldorado.org.br) 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20/* 21 * Fixed-Point Load/Store Instructions 22 */ 23 24static bool do_ldst(DisasContext *ctx, int rt, int ra, TCGv displ, bool update, 25 bool store, MemOp mop) 26{ 27 TCGv ea; 28 29 if (update && (ra == 0 || (!store && ra == rt))) { 30 gen_invalid(ctx); 31 return true; 32 } 33 gen_set_access_type(ctx, ACCESS_INT); 34 35 ea = do_ea_calc(ctx, ra, displ); 36 mop ^= ctx->default_tcg_memop_mask; 37 if (store) { 38 tcg_gen_qemu_st_tl(cpu_gpr[rt], ea, ctx->mem_idx, mop); 39 } else { 40 tcg_gen_qemu_ld_tl(cpu_gpr[rt], ea, ctx->mem_idx, mop); 41 } 42 if (update) { 43 tcg_gen_mov_tl(cpu_gpr[ra], ea); 44 } 45 return true; 46} 47 48static bool do_ldst_D(DisasContext *ctx, arg_D *a, bool update, bool store, 49 MemOp mop) 50{ 51 return do_ldst(ctx, a->rt, a->ra, tcg_constant_tl(a->si), update, store, mop); 52} 53 54static bool do_ldst_PLS_D(DisasContext *ctx, arg_PLS_D *a, bool update, 55 bool store, MemOp mop) 56{ 57 arg_D d; 58 if (!resolve_PLS_D(ctx, &d, a)) { 59 return true; 60 } 61 return do_ldst_D(ctx, &d, update, store, mop); 62} 63 64static bool do_ldst_X(DisasContext *ctx, arg_X *a, bool update, 65 bool store, MemOp mop) 66{ 67 return do_ldst(ctx, a->rt, a->ra, cpu_gpr[a->rb], update, store, mop); 68} 69 70static bool do_ldst_quad(DisasContext *ctx, arg_D *a, bool store, bool prefixed) 71{ 72#if defined(TARGET_PPC64) 73 TCGv ea; 74 TCGv_i64 low_addr_gpr, high_addr_gpr; 75 MemOp mop; 76 77 REQUIRE_INSNS_FLAGS(ctx, 64BX); 78 79 if (!prefixed && !(ctx->insns_flags2 & PPC2_LSQ_ISA207)) { 80 /* lq and stq were privileged prior to V. 2.07 */ 81 REQUIRE_SV(ctx); 82 83 if (ctx->le_mode) { 84 gen_align_no_le(ctx); 85 return true; 86 } 87 } 88 89 if (!store && unlikely(a->ra == a->rt)) { 90 gen_invalid(ctx); 91 return true; 92 } 93 94 gen_set_access_type(ctx, ACCESS_INT); 95 ea = do_ea_calc(ctx, a->ra, tcg_constant_tl(a->si)); 96 97 if (prefixed || !ctx->le_mode) { 98 low_addr_gpr = cpu_gpr[a->rt]; 99 high_addr_gpr = cpu_gpr[a->rt + 1]; 100 } else { 101 low_addr_gpr = cpu_gpr[a->rt + 1]; 102 high_addr_gpr = cpu_gpr[a->rt]; 103 } 104 105 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { 106 if (HAVE_ATOMIC128) { 107 mop = DEF_MEMOP(MO_128); 108 TCGv_i32 oi = tcg_constant_i32(make_memop_idx(mop, ctx->mem_idx)); 109 if (store) { 110 if (ctx->le_mode) { 111 gen_helper_stq_le_parallel(cpu_env, ea, low_addr_gpr, 112 high_addr_gpr, oi); 113 } else { 114 gen_helper_stq_be_parallel(cpu_env, ea, high_addr_gpr, 115 low_addr_gpr, oi); 116 117 } 118 } else { 119 if (ctx->le_mode) { 120 gen_helper_lq_le_parallel(low_addr_gpr, cpu_env, ea, oi); 121 tcg_gen_ld_i64(high_addr_gpr, cpu_env, 122 offsetof(CPUPPCState, retxh)); 123 } else { 124 gen_helper_lq_be_parallel(high_addr_gpr, cpu_env, ea, oi); 125 tcg_gen_ld_i64(low_addr_gpr, cpu_env, 126 offsetof(CPUPPCState, retxh)); 127 } 128 } 129 } else { 130 /* Restart with exclusive lock. */ 131 gen_helper_exit_atomic(cpu_env); 132 ctx->base.is_jmp = DISAS_NORETURN; 133 } 134 } else { 135 mop = DEF_MEMOP(MO_UQ); 136 if (store) { 137 tcg_gen_qemu_st_i64(low_addr_gpr, ea, ctx->mem_idx, mop); 138 } else { 139 tcg_gen_qemu_ld_i64(low_addr_gpr, ea, ctx->mem_idx, mop); 140 } 141 142 gen_addr_add(ctx, ea, ea, 8); 143 144 if (store) { 145 tcg_gen_qemu_st_i64(high_addr_gpr, ea, ctx->mem_idx, mop); 146 } else { 147 tcg_gen_qemu_ld_i64(high_addr_gpr, ea, ctx->mem_idx, mop); 148 } 149 } 150#else 151 qemu_build_not_reached(); 152#endif 153 154 return true; 155} 156 157static bool do_ldst_quad_PLS_D(DisasContext *ctx, arg_PLS_D *a, bool store) 158{ 159 arg_D d; 160 if (!resolve_PLS_D(ctx, &d, a)) { 161 return true; 162 } 163 164 return do_ldst_quad(ctx, &d, store, true); 165} 166 167/* Load Byte and Zero */ 168TRANS(LBZ, do_ldst_D, false, false, MO_UB) 169TRANS(LBZX, do_ldst_X, false, false, MO_UB) 170TRANS(LBZU, do_ldst_D, true, false, MO_UB) 171TRANS(LBZUX, do_ldst_X, true, false, MO_UB) 172TRANS(PLBZ, do_ldst_PLS_D, false, false, MO_UB) 173 174/* Load Halfword and Zero */ 175TRANS(LHZ, do_ldst_D, false, false, MO_UW) 176TRANS(LHZX, do_ldst_X, false, false, MO_UW) 177TRANS(LHZU, do_ldst_D, true, false, MO_UW) 178TRANS(LHZUX, do_ldst_X, true, false, MO_UW) 179TRANS(PLHZ, do_ldst_PLS_D, false, false, MO_UW) 180 181/* Load Halfword Algebraic */ 182TRANS(LHA, do_ldst_D, false, false, MO_SW) 183TRANS(LHAX, do_ldst_X, false, false, MO_SW) 184TRANS(LHAU, do_ldst_D, true, false, MO_SW) 185TRANS(LHAXU, do_ldst_X, true, false, MO_SW) 186TRANS(PLHA, do_ldst_PLS_D, false, false, MO_SW) 187 188/* Load Word and Zero */ 189TRANS(LWZ, do_ldst_D, false, false, MO_UL) 190TRANS(LWZX, do_ldst_X, false, false, MO_UL) 191TRANS(LWZU, do_ldst_D, true, false, MO_UL) 192TRANS(LWZUX, do_ldst_X, true, false, MO_UL) 193TRANS(PLWZ, do_ldst_PLS_D, false, false, MO_UL) 194 195/* Load Word Algebraic */ 196TRANS64(LWA, do_ldst_D, false, false, MO_SL) 197TRANS64(LWAX, do_ldst_X, false, false, MO_SL) 198TRANS64(LWAUX, do_ldst_X, true, false, MO_SL) 199TRANS64(PLWA, do_ldst_PLS_D, false, false, MO_SL) 200 201/* Load Doubleword */ 202TRANS64(LD, do_ldst_D, false, false, MO_UQ) 203TRANS64(LDX, do_ldst_X, false, false, MO_UQ) 204TRANS64(LDU, do_ldst_D, true, false, MO_UQ) 205TRANS64(LDUX, do_ldst_X, true, false, MO_UQ) 206TRANS64(PLD, do_ldst_PLS_D, false, false, MO_UQ) 207 208/* Load Quadword */ 209TRANS64(LQ, do_ldst_quad, false, false); 210TRANS64(PLQ, do_ldst_quad_PLS_D, false); 211 212/* Store Byte */ 213TRANS(STB, do_ldst_D, false, true, MO_UB) 214TRANS(STBX, do_ldst_X, false, true, MO_UB) 215TRANS(STBU, do_ldst_D, true, true, MO_UB) 216TRANS(STBUX, do_ldst_X, true, true, MO_UB) 217TRANS(PSTB, do_ldst_PLS_D, false, true, MO_UB) 218 219/* Store Halfword */ 220TRANS(STH, do_ldst_D, false, true, MO_UW) 221TRANS(STHX, do_ldst_X, false, true, MO_UW) 222TRANS(STHU, do_ldst_D, true, true, MO_UW) 223TRANS(STHUX, do_ldst_X, true, true, MO_UW) 224TRANS(PSTH, do_ldst_PLS_D, false, true, MO_UW) 225 226/* Store Word */ 227TRANS(STW, do_ldst_D, false, true, MO_UL) 228TRANS(STWX, do_ldst_X, false, true, MO_UL) 229TRANS(STWU, do_ldst_D, true, true, MO_UL) 230TRANS(STWUX, do_ldst_X, true, true, MO_UL) 231TRANS(PSTW, do_ldst_PLS_D, false, true, MO_UL) 232 233/* Store Doubleword */ 234TRANS64(STD, do_ldst_D, false, true, MO_UQ) 235TRANS64(STDX, do_ldst_X, false, true, MO_UQ) 236TRANS64(STDU, do_ldst_D, true, true, MO_UQ) 237TRANS64(STDUX, do_ldst_X, true, true, MO_UQ) 238TRANS64(PSTD, do_ldst_PLS_D, false, true, MO_UQ) 239 240/* Store Quadword */ 241TRANS64(STQ, do_ldst_quad, true, false); 242TRANS64(PSTQ, do_ldst_quad_PLS_D, true); 243 244/* 245 * Fixed-Point Compare Instructions 246 */ 247 248static bool do_cmp_X(DisasContext *ctx, arg_X_bfl *a, bool s) 249{ 250 if ((ctx->insns_flags & PPC_64B) == 0) { 251 /* 252 * For 32-bit implementations, The Programming Environments Manual says 253 * that "the L field must be cleared, otherwise the instruction form is 254 * invalid." It seems, however, that most 32-bit CPUs ignore invalid 255 * forms (e.g., section "Instruction Formats" of the 405 and 440 256 * manuals, "Integer Compare Instructions" of the 601 manual), with the 257 * notable exception of the e500 and e500mc, where L=1 was reported to 258 * cause an exception. 259 */ 260 if (a->l) { 261 if ((ctx->insns_flags2 & PPC2_BOOKE206)) { 262 /* 263 * For 32-bit Book E v2.06 implementations (i.e. e500/e500mc), 264 * generate an illegal instruction exception. 265 */ 266 return false; 267 } else { 268 qemu_log_mask(LOG_GUEST_ERROR, 269 "Invalid form of CMP%s at 0x" TARGET_FMT_lx ", L = 1\n", 270 s ? "" : "L", ctx->cia); 271 } 272 } 273 gen_op_cmp32(cpu_gpr[a->ra], cpu_gpr[a->rb], s, a->bf); 274 return true; 275 } 276 277 /* For 64-bit implementations, deal with bit L accordingly. */ 278 if (a->l) { 279 gen_op_cmp(cpu_gpr[a->ra], cpu_gpr[a->rb], s, a->bf); 280 } else { 281 gen_op_cmp32(cpu_gpr[a->ra], cpu_gpr[a->rb], s, a->bf); 282 } 283 return true; 284} 285 286static bool do_cmp_D(DisasContext *ctx, arg_D_bf *a, bool s) 287{ 288 if ((ctx->insns_flags & PPC_64B) == 0) { 289 /* 290 * For 32-bit implementations, The Programming Environments Manual says 291 * that "the L field must be cleared, otherwise the instruction form is 292 * invalid." It seems, however, that most 32-bit CPUs ignore invalid 293 * forms (e.g., section "Instruction Formats" of the 405 and 440 294 * manuals, "Integer Compare Instructions" of the 601 manual), with the 295 * notable exception of the e500 and e500mc, where L=1 was reported to 296 * cause an exception. 297 */ 298 if (a->l) { 299 if ((ctx->insns_flags2 & PPC2_BOOKE206)) { 300 /* 301 * For 32-bit Book E v2.06 implementations (i.e. e500/e500mc), 302 * generate an illegal instruction exception. 303 */ 304 return false; 305 } else { 306 qemu_log_mask(LOG_GUEST_ERROR, 307 "Invalid form of CMP%s at 0x" TARGET_FMT_lx ", L = 1\n", 308 s ? "I" : "LI", ctx->cia); 309 } 310 } 311 gen_op_cmp32(cpu_gpr[a->ra], tcg_constant_tl(a->imm), s, a->bf); 312 return true; 313 } 314 315 /* For 64-bit implementations, deal with bit L accordingly. */ 316 if (a->l) { 317 gen_op_cmp(cpu_gpr[a->ra], tcg_constant_tl(a->imm), s, a->bf); 318 } else { 319 gen_op_cmp32(cpu_gpr[a->ra], tcg_constant_tl(a->imm), s, a->bf); 320 } 321 return true; 322} 323 324TRANS(CMP, do_cmp_X, true); 325TRANS(CMPL, do_cmp_X, false); 326TRANS(CMPI, do_cmp_D, true); 327TRANS(CMPLI, do_cmp_D, false); 328 329/* 330 * Fixed-Point Arithmetic Instructions 331 */ 332 333static bool trans_ADDI(DisasContext *ctx, arg_D *a) 334{ 335 if (a->ra) { 336 tcg_gen_addi_tl(cpu_gpr[a->rt], cpu_gpr[a->ra], a->si); 337 } else { 338 tcg_gen_movi_tl(cpu_gpr[a->rt], a->si); 339 } 340 return true; 341} 342 343static bool trans_PADDI(DisasContext *ctx, arg_PLS_D *a) 344{ 345 arg_D d; 346 if (!resolve_PLS_D(ctx, &d, a)) { 347 return true; 348 } 349 return trans_ADDI(ctx, &d); 350} 351 352static bool trans_ADDIS(DisasContext *ctx, arg_D *a) 353{ 354 a->si <<= 16; 355 return trans_ADDI(ctx, a); 356} 357 358static bool trans_ADDPCIS(DisasContext *ctx, arg_DX *a) 359{ 360 REQUIRE_INSNS_FLAGS2(ctx, ISA300); 361 tcg_gen_movi_tl(cpu_gpr[a->rt], ctx->base.pc_next + (a->d << 16)); 362 return true; 363} 364 365static bool trans_INVALID(DisasContext *ctx, arg_INVALID *a) 366{ 367 gen_invalid(ctx); 368 return true; 369} 370 371static bool trans_PNOP(DisasContext *ctx, arg_PNOP *a) 372{ 373 return true; 374} 375 376static bool do_set_bool_cond(DisasContext *ctx, arg_X_bi *a, bool neg, bool rev) 377{ 378 REQUIRE_INSNS_FLAGS2(ctx, ISA310); 379 uint32_t mask = 0x08 >> (a->bi & 0x03); 380 TCGCond cond = rev ? TCG_COND_EQ : TCG_COND_NE; 381 TCGv temp = tcg_temp_new(); 382 383 tcg_gen_extu_i32_tl(temp, cpu_crf[a->bi >> 2]); 384 tcg_gen_andi_tl(temp, temp, mask); 385 tcg_gen_setcondi_tl(cond, cpu_gpr[a->rt], temp, 0); 386 if (neg) { 387 tcg_gen_neg_tl(cpu_gpr[a->rt], cpu_gpr[a->rt]); 388 } 389 return true; 390} 391 392TRANS(SETBC, do_set_bool_cond, false, false) 393TRANS(SETBCR, do_set_bool_cond, false, true) 394TRANS(SETNBC, do_set_bool_cond, true, false) 395TRANS(SETNBCR, do_set_bool_cond, true, true) 396 397static bool trans_CFUGED(DisasContext *ctx, arg_X *a) 398{ 399 REQUIRE_64BIT(ctx); 400 REQUIRE_INSNS_FLAGS2(ctx, ISA310); 401#if defined(TARGET_PPC64) 402 gen_helper_CFUGED(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb]); 403#else 404 qemu_build_not_reached(); 405#endif 406 return true; 407} 408 409static void do_cntzdm(TCGv_i64 dst, TCGv_i64 src, TCGv_i64 mask, int64_t trail) 410{ 411 TCGv_i64 t0, t1; 412 413 t0 = tcg_temp_new_i64(); 414 t1 = tcg_temp_new_i64(); 415 416 tcg_gen_and_i64(t0, src, mask); 417 if (trail) { 418 tcg_gen_ctzi_i64(t0, t0, -1); 419 } else { 420 tcg_gen_clzi_i64(t0, t0, -1); 421 } 422 423 tcg_gen_setcondi_i64(TCG_COND_NE, t1, t0, -1); 424 tcg_gen_andi_i64(t0, t0, 63); 425 tcg_gen_xori_i64(t0, t0, 63); 426 if (trail) { 427 tcg_gen_shl_i64(t0, mask, t0); 428 tcg_gen_shl_i64(t0, t0, t1); 429 } else { 430 tcg_gen_shr_i64(t0, mask, t0); 431 tcg_gen_shr_i64(t0, t0, t1); 432 } 433 434 tcg_gen_ctpop_i64(dst, t0); 435} 436 437static bool trans_CNTLZDM(DisasContext *ctx, arg_X *a) 438{ 439 REQUIRE_64BIT(ctx); 440 REQUIRE_INSNS_FLAGS2(ctx, ISA310); 441#if defined(TARGET_PPC64) 442 do_cntzdm(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb], false); 443#else 444 qemu_build_not_reached(); 445#endif 446 return true; 447} 448 449static bool trans_CNTTZDM(DisasContext *ctx, arg_X *a) 450{ 451 REQUIRE_64BIT(ctx); 452 REQUIRE_INSNS_FLAGS2(ctx, ISA310); 453#if defined(TARGET_PPC64) 454 do_cntzdm(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb], true); 455#else 456 qemu_build_not_reached(); 457#endif 458 return true; 459} 460 461static bool trans_PDEPD(DisasContext *ctx, arg_X *a) 462{ 463 REQUIRE_64BIT(ctx); 464 REQUIRE_INSNS_FLAGS2(ctx, ISA310); 465#if defined(TARGET_PPC64) 466 gen_helper_PDEPD(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb]); 467#else 468 qemu_build_not_reached(); 469#endif 470 return true; 471} 472 473static bool trans_PEXTD(DisasContext *ctx, arg_X *a) 474{ 475 REQUIRE_64BIT(ctx); 476 REQUIRE_INSNS_FLAGS2(ctx, ISA310); 477#if defined(TARGET_PPC64) 478 gen_helper_PEXTD(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb]); 479#else 480 qemu_build_not_reached(); 481#endif 482 return true; 483} 484 485static bool trans_ADDG6S(DisasContext *ctx, arg_X *a) 486{ 487 const target_ulong carry_bits = (target_ulong)-1 / 0xf; 488 TCGv in1, in2, carryl, carryh, tmp; 489 TCGv zero = tcg_constant_tl(0); 490 491 REQUIRE_INSNS_FLAGS2(ctx, BCDA_ISA206); 492 493 in1 = cpu_gpr[a->ra]; 494 in2 = cpu_gpr[a->rb]; 495 tmp = tcg_temp_new(); 496 carryl = tcg_temp_new(); 497 carryh = tcg_temp_new(); 498 499 /* Addition with carry. */ 500 tcg_gen_add2_tl(carryl, carryh, in1, zero, in2, zero); 501 /* Addition without carry. */ 502 tcg_gen_xor_tl(tmp, in1, in2); 503 /* Difference between the two is carry in to each bit. */ 504 tcg_gen_xor_tl(carryl, carryl, tmp); 505 506 /* 507 * The carry-out that we're looking for is the carry-in to 508 * the next nibble. Shift the double-word down one nibble, 509 * which puts all of the bits back into one word. 510 */ 511 tcg_gen_extract2_tl(carryl, carryl, carryh, 4); 512 513 /* Invert, isolate the carry bits, and produce 6's. */ 514 tcg_gen_andc_tl(carryl, tcg_constant_tl(carry_bits), carryl); 515 tcg_gen_muli_tl(cpu_gpr[a->rt], carryl, 6); 516 return true; 517} 518 519static bool trans_CDTBCD(DisasContext *ctx, arg_X_sa *a) 520{ 521 REQUIRE_INSNS_FLAGS2(ctx, BCDA_ISA206); 522 gen_helper_CDTBCD(cpu_gpr[a->ra], cpu_gpr[a->rs]); 523 return true; 524} 525 526static bool trans_CBCDTD(DisasContext *ctx, arg_X_sa *a) 527{ 528 REQUIRE_INSNS_FLAGS2(ctx, BCDA_ISA206); 529 gen_helper_CBCDTD(cpu_gpr[a->ra], cpu_gpr[a->rs]); 530 return true; 531} 532 533static bool do_hash(DisasContext *ctx, arg_X *a, bool priv, 534 void (*helper)(TCGv_ptr, TCGv, TCGv, TCGv)) 535{ 536 TCGv ea; 537 538 if (!(ctx->insns_flags2 & PPC2_ISA310)) { 539 /* if version is before v3.1, this operation is a nop */ 540 return true; 541 } 542 543 if (priv) { 544 /* if instruction is privileged but the context is in user space */ 545 REQUIRE_SV(ctx); 546 } 547 548 if (unlikely(a->ra == 0)) { 549 /* if RA=0, the instruction form is invalid */ 550 gen_invalid(ctx); 551 return true; 552 } 553 554 ea = do_ea_calc(ctx, a->ra, tcg_constant_tl(a->rt)); 555 helper(cpu_env, ea, cpu_gpr[a->ra], cpu_gpr[a->rb]); 556 return true; 557} 558 559TRANS(HASHST, do_hash, false, gen_helper_HASHST) 560TRANS(HASHCHK, do_hash, false, gen_helper_HASHCHK) 561TRANS(HASHSTP, do_hash, true, gen_helper_HASHSTP) 562TRANS(HASHCHKP, do_hash, true, gen_helper_HASHCHKP) 563