1/* 2 * Power ISA decode for Fixed-Point Facility instructions 3 * 4 * Copyright (c) 2021 Instituto de Pesquisas Eldorado (eldorado.org.br) 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20/* 21 * Fixed-Point Load/Store Instructions 22 */ 23 24static bool do_ldst(DisasContext *ctx, int rt, int ra, TCGv displ, bool update, 25 bool store, MemOp mop) 26{ 27 TCGv ea; 28 29 if (update && (ra == 0 || (!store && ra == rt))) { 30 gen_invalid(ctx); 31 return true; 32 } 33 gen_set_access_type(ctx, ACCESS_INT); 34 35 ea = do_ea_calc(ctx, ra, displ); 36 mop ^= ctx->default_tcg_memop_mask; 37 if (store) { 38 tcg_gen_qemu_st_tl(cpu_gpr[rt], ea, ctx->mem_idx, mop); 39 } else { 40 tcg_gen_qemu_ld_tl(cpu_gpr[rt], ea, ctx->mem_idx, mop); 41 } 42 if (update) { 43 tcg_gen_mov_tl(cpu_gpr[ra], ea); 44 } 45 tcg_temp_free(ea); 46 47 return true; 48} 49 50static bool do_ldst_D(DisasContext *ctx, arg_D *a, bool update, bool store, 51 MemOp mop) 52{ 53 return do_ldst(ctx, a->rt, a->ra, tcg_constant_tl(a->si), update, store, mop); 54} 55 56static bool do_ldst_PLS_D(DisasContext *ctx, arg_PLS_D *a, bool update, 57 bool store, MemOp mop) 58{ 59 arg_D d; 60 if (!resolve_PLS_D(ctx, &d, a)) { 61 return true; 62 } 63 return do_ldst_D(ctx, &d, update, store, mop); 64} 65 66static bool do_ldst_X(DisasContext *ctx, arg_X *a, bool update, 67 bool store, MemOp mop) 68{ 69 return do_ldst(ctx, a->rt, a->ra, cpu_gpr[a->rb], update, store, mop); 70} 71 72static bool do_ldst_quad(DisasContext *ctx, arg_D *a, bool store, bool prefixed) 73{ 74#if defined(TARGET_PPC64) 75 TCGv ea; 76 TCGv_i64 low_addr_gpr, high_addr_gpr; 77 MemOp mop; 78 79 REQUIRE_INSNS_FLAGS(ctx, 64BX); 80 81 if (!prefixed && !(ctx->insns_flags2 & PPC2_LSQ_ISA207)) { 82 if (ctx->pr) { 83 /* lq and stq were privileged prior to V. 2.07 */ 84 gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC); 85 return true; 86 } 87 88 if (ctx->le_mode) { 89 gen_align_no_le(ctx); 90 return true; 91 } 92 } 93 94 if (!store && unlikely(a->ra == a->rt)) { 95 gen_invalid(ctx); 96 return true; 97 } 98 99 gen_set_access_type(ctx, ACCESS_INT); 100 ea = do_ea_calc(ctx, a->ra, tcg_constant_tl(a->si)); 101 102 if (prefixed || !ctx->le_mode) { 103 low_addr_gpr = cpu_gpr[a->rt]; 104 high_addr_gpr = cpu_gpr[a->rt + 1]; 105 } else { 106 low_addr_gpr = cpu_gpr[a->rt + 1]; 107 high_addr_gpr = cpu_gpr[a->rt]; 108 } 109 110 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { 111 if (HAVE_ATOMIC128) { 112 mop = DEF_MEMOP(MO_128); 113 TCGv_i32 oi = tcg_constant_i32(make_memop_idx(mop, ctx->mem_idx)); 114 if (store) { 115 if (ctx->le_mode) { 116 gen_helper_stq_le_parallel(cpu_env, ea, low_addr_gpr, 117 high_addr_gpr, oi); 118 } else { 119 gen_helper_stq_be_parallel(cpu_env, ea, high_addr_gpr, 120 low_addr_gpr, oi); 121 122 } 123 } else { 124 if (ctx->le_mode) { 125 gen_helper_lq_le_parallel(low_addr_gpr, cpu_env, ea, oi); 126 tcg_gen_ld_i64(high_addr_gpr, cpu_env, 127 offsetof(CPUPPCState, retxh)); 128 } else { 129 gen_helper_lq_be_parallel(high_addr_gpr, cpu_env, ea, oi); 130 tcg_gen_ld_i64(low_addr_gpr, cpu_env, 131 offsetof(CPUPPCState, retxh)); 132 } 133 } 134 } else { 135 /* Restart with exclusive lock. */ 136 gen_helper_exit_atomic(cpu_env); 137 ctx->base.is_jmp = DISAS_NORETURN; 138 } 139 } else { 140 mop = DEF_MEMOP(MO_UQ); 141 if (store) { 142 tcg_gen_qemu_st_i64(low_addr_gpr, ea, ctx->mem_idx, mop); 143 } else { 144 tcg_gen_qemu_ld_i64(low_addr_gpr, ea, ctx->mem_idx, mop); 145 } 146 147 gen_addr_add(ctx, ea, ea, 8); 148 149 if (store) { 150 tcg_gen_qemu_st_i64(high_addr_gpr, ea, ctx->mem_idx, mop); 151 } else { 152 tcg_gen_qemu_ld_i64(high_addr_gpr, ea, ctx->mem_idx, mop); 153 } 154 } 155 tcg_temp_free(ea); 156#else 157 qemu_build_not_reached(); 158#endif 159 160 return true; 161} 162 163static bool do_ldst_quad_PLS_D(DisasContext *ctx, arg_PLS_D *a, bool store) 164{ 165 arg_D d; 166 if (!resolve_PLS_D(ctx, &d, a)) { 167 return true; 168 } 169 170 return do_ldst_quad(ctx, &d, store, true); 171} 172 173/* Load Byte and Zero */ 174TRANS(LBZ, do_ldst_D, false, false, MO_UB) 175TRANS(LBZX, do_ldst_X, false, false, MO_UB) 176TRANS(LBZU, do_ldst_D, true, false, MO_UB) 177TRANS(LBZUX, do_ldst_X, true, false, MO_UB) 178TRANS(PLBZ, do_ldst_PLS_D, false, false, MO_UB) 179 180/* Load Halfword and Zero */ 181TRANS(LHZ, do_ldst_D, false, false, MO_UW) 182TRANS(LHZX, do_ldst_X, false, false, MO_UW) 183TRANS(LHZU, do_ldst_D, true, false, MO_UW) 184TRANS(LHZUX, do_ldst_X, true, false, MO_UW) 185TRANS(PLHZ, do_ldst_PLS_D, false, false, MO_UW) 186 187/* Load Halfword Algebraic */ 188TRANS(LHA, do_ldst_D, false, false, MO_SW) 189TRANS(LHAX, do_ldst_X, false, false, MO_SW) 190TRANS(LHAU, do_ldst_D, true, false, MO_SW) 191TRANS(LHAXU, do_ldst_X, true, false, MO_SW) 192TRANS(PLHA, do_ldst_PLS_D, false, false, MO_SW) 193 194/* Load Word and Zero */ 195TRANS(LWZ, do_ldst_D, false, false, MO_UL) 196TRANS(LWZX, do_ldst_X, false, false, MO_UL) 197TRANS(LWZU, do_ldst_D, true, false, MO_UL) 198TRANS(LWZUX, do_ldst_X, true, false, MO_UL) 199TRANS(PLWZ, do_ldst_PLS_D, false, false, MO_UL) 200 201/* Load Word Algebraic */ 202TRANS64(LWA, do_ldst_D, false, false, MO_SL) 203TRANS64(LWAX, do_ldst_X, false, false, MO_SL) 204TRANS64(LWAUX, do_ldst_X, true, false, MO_SL) 205TRANS64(PLWA, do_ldst_PLS_D, false, false, MO_SL) 206 207/* Load Doubleword */ 208TRANS64(LD, do_ldst_D, false, false, MO_UQ) 209TRANS64(LDX, do_ldst_X, false, false, MO_UQ) 210TRANS64(LDU, do_ldst_D, true, false, MO_UQ) 211TRANS64(LDUX, do_ldst_X, true, false, MO_UQ) 212TRANS64(PLD, do_ldst_PLS_D, false, false, MO_UQ) 213 214/* Load Quadword */ 215TRANS64(LQ, do_ldst_quad, false, false); 216TRANS64(PLQ, do_ldst_quad_PLS_D, false); 217 218/* Store Byte */ 219TRANS(STB, do_ldst_D, false, true, MO_UB) 220TRANS(STBX, do_ldst_X, false, true, MO_UB) 221TRANS(STBU, do_ldst_D, true, true, MO_UB) 222TRANS(STBUX, do_ldst_X, true, true, MO_UB) 223TRANS(PSTB, do_ldst_PLS_D, false, true, MO_UB) 224 225/* Store Halfword */ 226TRANS(STH, do_ldst_D, false, true, MO_UW) 227TRANS(STHX, do_ldst_X, false, true, MO_UW) 228TRANS(STHU, do_ldst_D, true, true, MO_UW) 229TRANS(STHUX, do_ldst_X, true, true, MO_UW) 230TRANS(PSTH, do_ldst_PLS_D, false, true, MO_UW) 231 232/* Store Word */ 233TRANS(STW, do_ldst_D, false, true, MO_UL) 234TRANS(STWX, do_ldst_X, false, true, MO_UL) 235TRANS(STWU, do_ldst_D, true, true, MO_UL) 236TRANS(STWUX, do_ldst_X, true, true, MO_UL) 237TRANS(PSTW, do_ldst_PLS_D, false, true, MO_UL) 238 239/* Store Doubleword */ 240TRANS64(STD, do_ldst_D, false, true, MO_UQ) 241TRANS64(STDX, do_ldst_X, false, true, MO_UQ) 242TRANS64(STDU, do_ldst_D, true, true, MO_UQ) 243TRANS64(STDUX, do_ldst_X, true, true, MO_UQ) 244TRANS64(PSTD, do_ldst_PLS_D, false, true, MO_UQ) 245 246/* Store Quadword */ 247TRANS64(STQ, do_ldst_quad, true, false); 248TRANS64(PSTQ, do_ldst_quad_PLS_D, true); 249 250/* 251 * Fixed-Point Compare Instructions 252 */ 253 254static bool do_cmp_X(DisasContext *ctx, arg_X_bfl *a, bool s) 255{ 256 if ((ctx->insns_flags & PPC_64B) == 0) { 257 /* 258 * For 32-bit implementations, The Programming Environments Manual says 259 * that "the L field must be cleared, otherwise the instruction form is 260 * invalid." It seems, however, that most 32-bit CPUs ignore invalid 261 * forms (e.g., section "Instruction Formats" of the 405 and 440 262 * manuals, "Integer Compare Instructions" of the 601 manual), with the 263 * notable exception of the e500 and e500mc, where L=1 was reported to 264 * cause an exception. 265 */ 266 if (a->l) { 267 if ((ctx->insns_flags2 & PPC2_BOOKE206)) { 268 /* 269 * For 32-bit Book E v2.06 implementations (i.e. e500/e500mc), 270 * generate an illegal instruction exception. 271 */ 272 return false; 273 } else { 274 qemu_log_mask(LOG_GUEST_ERROR, 275 "Invalid form of CMP%s at 0x" TARGET_FMT_lx ", L = 1\n", 276 s ? "" : "L", ctx->cia); 277 } 278 } 279 gen_op_cmp32(cpu_gpr[a->ra], cpu_gpr[a->rb], s, a->bf); 280 return true; 281 } 282 283 /* For 64-bit implementations, deal with bit L accordingly. */ 284 if (a->l) { 285 gen_op_cmp(cpu_gpr[a->ra], cpu_gpr[a->rb], s, a->bf); 286 } else { 287 gen_op_cmp32(cpu_gpr[a->ra], cpu_gpr[a->rb], s, a->bf); 288 } 289 return true; 290} 291 292static bool do_cmp_D(DisasContext *ctx, arg_D_bf *a, bool s) 293{ 294 if ((ctx->insns_flags & PPC_64B) == 0) { 295 /* 296 * For 32-bit implementations, The Programming Environments Manual says 297 * that "the L field must be cleared, otherwise the instruction form is 298 * invalid." It seems, however, that most 32-bit CPUs ignore invalid 299 * forms (e.g., section "Instruction Formats" of the 405 and 440 300 * manuals, "Integer Compare Instructions" of the 601 manual), with the 301 * notable exception of the e500 and e500mc, where L=1 was reported to 302 * cause an exception. 303 */ 304 if (a->l) { 305 if ((ctx->insns_flags2 & PPC2_BOOKE206)) { 306 /* 307 * For 32-bit Book E v2.06 implementations (i.e. e500/e500mc), 308 * generate an illegal instruction exception. 309 */ 310 return false; 311 } else { 312 qemu_log_mask(LOG_GUEST_ERROR, 313 "Invalid form of CMP%s at 0x" TARGET_FMT_lx ", L = 1\n", 314 s ? "I" : "LI", ctx->cia); 315 } 316 } 317 gen_op_cmp32(cpu_gpr[a->ra], tcg_constant_tl(a->imm), s, a->bf); 318 return true; 319 } 320 321 /* For 64-bit implementations, deal with bit L accordingly. */ 322 if (a->l) { 323 gen_op_cmp(cpu_gpr[a->ra], tcg_constant_tl(a->imm), s, a->bf); 324 } else { 325 gen_op_cmp32(cpu_gpr[a->ra], tcg_constant_tl(a->imm), s, a->bf); 326 } 327 return true; 328} 329 330TRANS(CMP, do_cmp_X, true); 331TRANS(CMPL, do_cmp_X, false); 332TRANS(CMPI, do_cmp_D, true); 333TRANS(CMPLI, do_cmp_D, false); 334 335/* 336 * Fixed-Point Arithmetic Instructions 337 */ 338 339static bool trans_ADDI(DisasContext *ctx, arg_D *a) 340{ 341 if (a->ra) { 342 tcg_gen_addi_tl(cpu_gpr[a->rt], cpu_gpr[a->ra], a->si); 343 } else { 344 tcg_gen_movi_tl(cpu_gpr[a->rt], a->si); 345 } 346 return true; 347} 348 349static bool trans_PADDI(DisasContext *ctx, arg_PLS_D *a) 350{ 351 arg_D d; 352 if (!resolve_PLS_D(ctx, &d, a)) { 353 return true; 354 } 355 return trans_ADDI(ctx, &d); 356} 357 358static bool trans_ADDIS(DisasContext *ctx, arg_D *a) 359{ 360 a->si <<= 16; 361 return trans_ADDI(ctx, a); 362} 363 364static bool trans_ADDPCIS(DisasContext *ctx, arg_DX *a) 365{ 366 REQUIRE_INSNS_FLAGS2(ctx, ISA300); 367 tcg_gen_movi_tl(cpu_gpr[a->rt], ctx->base.pc_next + (a->d << 16)); 368 return true; 369} 370 371static bool trans_INVALID(DisasContext *ctx, arg_INVALID *a) 372{ 373 gen_invalid(ctx); 374 return true; 375} 376 377static bool trans_PNOP(DisasContext *ctx, arg_PNOP *a) 378{ 379 return true; 380} 381 382static bool do_set_bool_cond(DisasContext *ctx, arg_X_bi *a, bool neg, bool rev) 383{ 384 REQUIRE_INSNS_FLAGS2(ctx, ISA310); 385 uint32_t mask = 0x08 >> (a->bi & 0x03); 386 TCGCond cond = rev ? TCG_COND_EQ : TCG_COND_NE; 387 TCGv temp = tcg_temp_new(); 388 389 tcg_gen_extu_i32_tl(temp, cpu_crf[a->bi >> 2]); 390 tcg_gen_andi_tl(temp, temp, mask); 391 tcg_gen_setcondi_tl(cond, cpu_gpr[a->rt], temp, 0); 392 if (neg) { 393 tcg_gen_neg_tl(cpu_gpr[a->rt], cpu_gpr[a->rt]); 394 } 395 tcg_temp_free(temp); 396 397 return true; 398} 399 400TRANS(SETBC, do_set_bool_cond, false, false) 401TRANS(SETBCR, do_set_bool_cond, false, true) 402TRANS(SETNBC, do_set_bool_cond, true, false) 403TRANS(SETNBCR, do_set_bool_cond, true, true) 404 405static bool trans_CFUGED(DisasContext *ctx, arg_X *a) 406{ 407 REQUIRE_64BIT(ctx); 408 REQUIRE_INSNS_FLAGS2(ctx, ISA310); 409#if defined(TARGET_PPC64) 410 gen_helper_CFUGED(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb]); 411#else 412 qemu_build_not_reached(); 413#endif 414 return true; 415} 416 417static void do_cntzdm(TCGv_i64 dst, TCGv_i64 src, TCGv_i64 mask, int64_t trail) 418{ 419 TCGv_i64 t0, t1; 420 421 t0 = tcg_temp_new_i64(); 422 t1 = tcg_temp_new_i64(); 423 424 tcg_gen_and_i64(t0, src, mask); 425 if (trail) { 426 tcg_gen_ctzi_i64(t0, t0, -1); 427 } else { 428 tcg_gen_clzi_i64(t0, t0, -1); 429 } 430 431 tcg_gen_setcondi_i64(TCG_COND_NE, t1, t0, -1); 432 tcg_gen_andi_i64(t0, t0, 63); 433 tcg_gen_xori_i64(t0, t0, 63); 434 if (trail) { 435 tcg_gen_shl_i64(t0, mask, t0); 436 tcg_gen_shl_i64(t0, t0, t1); 437 } else { 438 tcg_gen_shr_i64(t0, mask, t0); 439 tcg_gen_shr_i64(t0, t0, t1); 440 } 441 442 tcg_gen_ctpop_i64(dst, t0); 443 444 tcg_temp_free_i64(t0); 445 tcg_temp_free_i64(t1); 446} 447 448static bool trans_CNTLZDM(DisasContext *ctx, arg_X *a) 449{ 450 REQUIRE_64BIT(ctx); 451 REQUIRE_INSNS_FLAGS2(ctx, ISA310); 452#if defined(TARGET_PPC64) 453 do_cntzdm(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb], false); 454#else 455 qemu_build_not_reached(); 456#endif 457 return true; 458} 459 460static bool trans_CNTTZDM(DisasContext *ctx, arg_X *a) 461{ 462 REQUIRE_64BIT(ctx); 463 REQUIRE_INSNS_FLAGS2(ctx, ISA310); 464#if defined(TARGET_PPC64) 465 do_cntzdm(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb], true); 466#else 467 qemu_build_not_reached(); 468#endif 469 return true; 470} 471 472static bool trans_PDEPD(DisasContext *ctx, arg_X *a) 473{ 474 REQUIRE_64BIT(ctx); 475 REQUIRE_INSNS_FLAGS2(ctx, ISA310); 476#if defined(TARGET_PPC64) 477 gen_helper_PDEPD(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb]); 478#else 479 qemu_build_not_reached(); 480#endif 481 return true; 482} 483 484static bool trans_PEXTD(DisasContext *ctx, arg_X *a) 485{ 486 REQUIRE_64BIT(ctx); 487 REQUIRE_INSNS_FLAGS2(ctx, ISA310); 488#if defined(TARGET_PPC64) 489 gen_helper_PEXTD(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb]); 490#else 491 qemu_build_not_reached(); 492#endif 493 return true; 494} 495