1 /* 2 * AArch64 SVE translation 3 * 4 * Copyright (c) 2018 Linaro, Ltd 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "cpu.h" 22 #include "exec/exec-all.h" 23 #include "tcg/tcg-op.h" 24 #include "tcg/tcg-op-gvec.h" 25 #include "tcg/tcg-gvec-desc.h" 26 #include "qemu/log.h" 27 #include "arm_ldst.h" 28 #include "translate.h" 29 #include "internals.h" 30 #include "exec/helper-proto.h" 31 #include "exec/helper-gen.h" 32 #include "exec/log.h" 33 #include "translate-a64.h" 34 #include "fpu/softfloat.h" 35 36 37 typedef void GVecGen2sFn(unsigned, uint32_t, uint32_t, 38 TCGv_i64, uint32_t, uint32_t); 39 40 typedef void gen_helper_gvec_flags_3(TCGv_i32, TCGv_ptr, TCGv_ptr, 41 TCGv_ptr, TCGv_i32); 42 typedef void gen_helper_gvec_flags_4(TCGv_i32, TCGv_ptr, TCGv_ptr, 43 TCGv_ptr, TCGv_ptr, TCGv_i32); 44 45 typedef void gen_helper_gvec_mem(TCGv_env, TCGv_ptr, TCGv_i64, TCGv_i32); 46 typedef void gen_helper_gvec_mem_scatter(TCGv_env, TCGv_ptr, TCGv_ptr, 47 TCGv_ptr, TCGv_i64, TCGv_i32); 48 49 /* 50 * Helpers for extracting complex instruction fields. 51 */ 52 53 /* See e.g. ASR (immediate, predicated). 54 * Returns -1 for unallocated encoding; diagnose later. 55 */ 56 static int tszimm_esz(DisasContext *s, int x) 57 { 58 x >>= 3; /* discard imm3 */ 59 return 31 - clz32(x); 60 } 61 62 static int tszimm_shr(DisasContext *s, int x) 63 { 64 return (16 << tszimm_esz(s, x)) - x; 65 } 66 67 /* See e.g. LSL (immediate, predicated). */ 68 static int tszimm_shl(DisasContext *s, int x) 69 { 70 return x - (8 << tszimm_esz(s, x)); 71 } 72 73 /* The SH bit is in bit 8. Extract the low 8 and shift. */ 74 static inline int expand_imm_sh8s(DisasContext *s, int x) 75 { 76 return (int8_t)x << (x & 0x100 ? 8 : 0); 77 } 78 79 static inline int expand_imm_sh8u(DisasContext *s, int x) 80 { 81 return (uint8_t)x << (x & 0x100 ? 8 : 0); 82 } 83 84 /* Convert a 2-bit memory size (msz) to a 4-bit data type (dtype) 85 * with unsigned data. C.f. SVE Memory Contiguous Load Group. 86 */ 87 static inline int msz_dtype(DisasContext *s, int msz) 88 { 89 static const uint8_t dtype[4] = { 0, 5, 10, 15 }; 90 return dtype[msz]; 91 } 92 93 /* 94 * Include the generated decoder. 95 */ 96 97 #include "decode-sve.c.inc" 98 99 /* 100 * Implement all of the translator functions referenced by the decoder. 101 */ 102 103 /* Invoke an out-of-line helper on 2 Zregs. */ 104 static bool gen_gvec_ool_zz(DisasContext *s, gen_helper_gvec_2 *fn, 105 int rd, int rn, int data) 106 { 107 if (fn == NULL) { 108 return false; 109 } 110 if (sve_access_check(s)) { 111 unsigned vsz = vec_full_reg_size(s); 112 tcg_gen_gvec_2_ool(vec_full_reg_offset(s, rd), 113 vec_full_reg_offset(s, rn), 114 vsz, vsz, data, fn); 115 } 116 return true; 117 } 118 119 static bool gen_gvec_fpst_zz(DisasContext *s, gen_helper_gvec_2_ptr *fn, 120 int rd, int rn, int data, 121 ARMFPStatusFlavour flavour) 122 { 123 if (fn == NULL) { 124 return false; 125 } 126 if (sve_access_check(s)) { 127 unsigned vsz = vec_full_reg_size(s); 128 TCGv_ptr status = fpstatus_ptr(flavour); 129 130 tcg_gen_gvec_2_ptr(vec_full_reg_offset(s, rd), 131 vec_full_reg_offset(s, rn), 132 status, vsz, vsz, data, fn); 133 } 134 return true; 135 } 136 137 static bool gen_gvec_fpst_arg_zz(DisasContext *s, gen_helper_gvec_2_ptr *fn, 138 arg_rr_esz *a, int data) 139 { 140 return gen_gvec_fpst_zz(s, fn, a->rd, a->rn, data, 141 a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); 142 } 143 144 /* Invoke an out-of-line helper on 3 Zregs. */ 145 static bool gen_gvec_ool_zzz(DisasContext *s, gen_helper_gvec_3 *fn, 146 int rd, int rn, int rm, int data) 147 { 148 if (fn == NULL) { 149 return false; 150 } 151 if (sve_access_check(s)) { 152 unsigned vsz = vec_full_reg_size(s); 153 tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd), 154 vec_full_reg_offset(s, rn), 155 vec_full_reg_offset(s, rm), 156 vsz, vsz, data, fn); 157 } 158 return true; 159 } 160 161 static bool gen_gvec_ool_arg_zzz(DisasContext *s, gen_helper_gvec_3 *fn, 162 arg_rrr_esz *a, int data) 163 { 164 return gen_gvec_ool_zzz(s, fn, a->rd, a->rn, a->rm, data); 165 } 166 167 /* Invoke an out-of-line helper on 3 Zregs, plus float_status. */ 168 static bool gen_gvec_fpst_zzz(DisasContext *s, gen_helper_gvec_3_ptr *fn, 169 int rd, int rn, int rm, 170 int data, ARMFPStatusFlavour flavour) 171 { 172 if (fn == NULL) { 173 return false; 174 } 175 if (sve_access_check(s)) { 176 unsigned vsz = vec_full_reg_size(s); 177 TCGv_ptr status = fpstatus_ptr(flavour); 178 179 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd), 180 vec_full_reg_offset(s, rn), 181 vec_full_reg_offset(s, rm), 182 status, vsz, vsz, data, fn); 183 } 184 return true; 185 } 186 187 static bool gen_gvec_fpst_arg_zzz(DisasContext *s, gen_helper_gvec_3_ptr *fn, 188 arg_rrr_esz *a, int data) 189 { 190 return gen_gvec_fpst_zzz(s, fn, a->rd, a->rn, a->rm, data, 191 a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); 192 } 193 194 /* Invoke an out-of-line helper on 4 Zregs. */ 195 static bool gen_gvec_ool_zzzz(DisasContext *s, gen_helper_gvec_4 *fn, 196 int rd, int rn, int rm, int ra, int data) 197 { 198 if (fn == NULL) { 199 return false; 200 } 201 if (sve_access_check(s)) { 202 unsigned vsz = vec_full_reg_size(s); 203 tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd), 204 vec_full_reg_offset(s, rn), 205 vec_full_reg_offset(s, rm), 206 vec_full_reg_offset(s, ra), 207 vsz, vsz, data, fn); 208 } 209 return true; 210 } 211 212 static bool gen_gvec_ool_arg_zzzz(DisasContext *s, gen_helper_gvec_4 *fn, 213 arg_rrrr_esz *a, int data) 214 { 215 return gen_gvec_ool_zzzz(s, fn, a->rd, a->rn, a->rm, a->ra, data); 216 } 217 218 static bool gen_gvec_ool_arg_zzxz(DisasContext *s, gen_helper_gvec_4 *fn, 219 arg_rrxr_esz *a) 220 { 221 return gen_gvec_ool_zzzz(s, fn, a->rd, a->rn, a->rm, a->ra, a->index); 222 } 223 224 /* Invoke an out-of-line helper on 4 Zregs, plus a pointer. */ 225 static bool gen_gvec_ptr_zzzz(DisasContext *s, gen_helper_gvec_4_ptr *fn, 226 int rd, int rn, int rm, int ra, 227 int data, TCGv_ptr ptr) 228 { 229 if (fn == NULL) { 230 return false; 231 } 232 if (sve_access_check(s)) { 233 unsigned vsz = vec_full_reg_size(s); 234 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, rd), 235 vec_full_reg_offset(s, rn), 236 vec_full_reg_offset(s, rm), 237 vec_full_reg_offset(s, ra), 238 ptr, vsz, vsz, data, fn); 239 } 240 return true; 241 } 242 243 static bool gen_gvec_fpst_zzzz(DisasContext *s, gen_helper_gvec_4_ptr *fn, 244 int rd, int rn, int rm, int ra, 245 int data, ARMFPStatusFlavour flavour) 246 { 247 TCGv_ptr status = fpstatus_ptr(flavour); 248 bool ret = gen_gvec_ptr_zzzz(s, fn, rd, rn, rm, ra, data, status); 249 return ret; 250 } 251 252 /* Invoke an out-of-line helper on 4 Zregs, 1 Preg, plus fpst. */ 253 static bool gen_gvec_fpst_zzzzp(DisasContext *s, gen_helper_gvec_5_ptr *fn, 254 int rd, int rn, int rm, int ra, int pg, 255 int data, ARMFPStatusFlavour flavour) 256 { 257 if (fn == NULL) { 258 return false; 259 } 260 if (sve_access_check(s)) { 261 unsigned vsz = vec_full_reg_size(s); 262 TCGv_ptr status = fpstatus_ptr(flavour); 263 264 tcg_gen_gvec_5_ptr(vec_full_reg_offset(s, rd), 265 vec_full_reg_offset(s, rn), 266 vec_full_reg_offset(s, rm), 267 vec_full_reg_offset(s, ra), 268 pred_full_reg_offset(s, pg), 269 status, vsz, vsz, data, fn); 270 } 271 return true; 272 } 273 274 /* Invoke an out-of-line helper on 2 Zregs and a predicate. */ 275 static bool gen_gvec_ool_zzp(DisasContext *s, gen_helper_gvec_3 *fn, 276 int rd, int rn, int pg, int data) 277 { 278 if (fn == NULL) { 279 return false; 280 } 281 if (sve_access_check(s)) { 282 unsigned vsz = vec_full_reg_size(s); 283 tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd), 284 vec_full_reg_offset(s, rn), 285 pred_full_reg_offset(s, pg), 286 vsz, vsz, data, fn); 287 } 288 return true; 289 } 290 291 static bool gen_gvec_ool_arg_zpz(DisasContext *s, gen_helper_gvec_3 *fn, 292 arg_rpr_esz *a, int data) 293 { 294 return gen_gvec_ool_zzp(s, fn, a->rd, a->rn, a->pg, data); 295 } 296 297 static bool gen_gvec_ool_arg_zpzi(DisasContext *s, gen_helper_gvec_3 *fn, 298 arg_rpri_esz *a) 299 { 300 return gen_gvec_ool_zzp(s, fn, a->rd, a->rn, a->pg, a->imm); 301 } 302 303 static bool gen_gvec_fpst_zzp(DisasContext *s, gen_helper_gvec_3_ptr *fn, 304 int rd, int rn, int pg, int data, 305 ARMFPStatusFlavour flavour) 306 { 307 if (fn == NULL) { 308 return false; 309 } 310 if (sve_access_check(s)) { 311 unsigned vsz = vec_full_reg_size(s); 312 TCGv_ptr status = fpstatus_ptr(flavour); 313 314 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd), 315 vec_full_reg_offset(s, rn), 316 pred_full_reg_offset(s, pg), 317 status, vsz, vsz, data, fn); 318 } 319 return true; 320 } 321 322 static bool gen_gvec_fpst_arg_zpz(DisasContext *s, gen_helper_gvec_3_ptr *fn, 323 arg_rpr_esz *a, int data, 324 ARMFPStatusFlavour flavour) 325 { 326 return gen_gvec_fpst_zzp(s, fn, a->rd, a->rn, a->pg, data, flavour); 327 } 328 329 /* Invoke an out-of-line helper on 3 Zregs and a predicate. */ 330 static bool gen_gvec_ool_zzzp(DisasContext *s, gen_helper_gvec_4 *fn, 331 int rd, int rn, int rm, int pg, int data) 332 { 333 if (fn == NULL) { 334 return false; 335 } 336 if (sve_access_check(s)) { 337 unsigned vsz = vec_full_reg_size(s); 338 tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd), 339 vec_full_reg_offset(s, rn), 340 vec_full_reg_offset(s, rm), 341 pred_full_reg_offset(s, pg), 342 vsz, vsz, data, fn); 343 } 344 return true; 345 } 346 347 static bool gen_gvec_ool_arg_zpzz(DisasContext *s, gen_helper_gvec_4 *fn, 348 arg_rprr_esz *a, int data) 349 { 350 return gen_gvec_ool_zzzp(s, fn, a->rd, a->rn, a->rm, a->pg, data); 351 } 352 353 /* Invoke an out-of-line helper on 3 Zregs and a predicate. */ 354 static bool gen_gvec_fpst_zzzp(DisasContext *s, gen_helper_gvec_4_ptr *fn, 355 int rd, int rn, int rm, int pg, int data, 356 ARMFPStatusFlavour flavour) 357 { 358 if (fn == NULL) { 359 return false; 360 } 361 if (sve_access_check(s)) { 362 unsigned vsz = vec_full_reg_size(s); 363 TCGv_ptr status = fpstatus_ptr(flavour); 364 365 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, rd), 366 vec_full_reg_offset(s, rn), 367 vec_full_reg_offset(s, rm), 368 pred_full_reg_offset(s, pg), 369 status, vsz, vsz, data, fn); 370 } 371 return true; 372 } 373 374 static bool gen_gvec_fpst_arg_zpzz(DisasContext *s, gen_helper_gvec_4_ptr *fn, 375 arg_rprr_esz *a) 376 { 377 return gen_gvec_fpst_zzzp(s, fn, a->rd, a->rn, a->rm, a->pg, 0, 378 a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); 379 } 380 381 /* Invoke a vector expander on two Zregs and an immediate. */ 382 static bool gen_gvec_fn_zzi(DisasContext *s, GVecGen2iFn *gvec_fn, 383 int esz, int rd, int rn, uint64_t imm) 384 { 385 if (gvec_fn == NULL) { 386 return false; 387 } 388 if (sve_access_check(s)) { 389 unsigned vsz = vec_full_reg_size(s); 390 gvec_fn(esz, vec_full_reg_offset(s, rd), 391 vec_full_reg_offset(s, rn), imm, vsz, vsz); 392 } 393 return true; 394 } 395 396 static bool gen_gvec_fn_arg_zzi(DisasContext *s, GVecGen2iFn *gvec_fn, 397 arg_rri_esz *a) 398 { 399 if (a->esz < 0) { 400 /* Invalid tsz encoding -- see tszimm_esz. */ 401 return false; 402 } 403 return gen_gvec_fn_zzi(s, gvec_fn, a->esz, a->rd, a->rn, a->imm); 404 } 405 406 /* Invoke a vector expander on three Zregs. */ 407 static bool gen_gvec_fn_zzz(DisasContext *s, GVecGen3Fn *gvec_fn, 408 int esz, int rd, int rn, int rm) 409 { 410 if (gvec_fn == NULL) { 411 return false; 412 } 413 if (sve_access_check(s)) { 414 unsigned vsz = vec_full_reg_size(s); 415 gvec_fn(esz, vec_full_reg_offset(s, rd), 416 vec_full_reg_offset(s, rn), 417 vec_full_reg_offset(s, rm), vsz, vsz); 418 } 419 return true; 420 } 421 422 static bool gen_gvec_fn_arg_zzz(DisasContext *s, GVecGen3Fn *fn, 423 arg_rrr_esz *a) 424 { 425 return gen_gvec_fn_zzz(s, fn, a->esz, a->rd, a->rn, a->rm); 426 } 427 428 /* Invoke a vector expander on four Zregs. */ 429 static bool gen_gvec_fn_arg_zzzz(DisasContext *s, GVecGen4Fn *gvec_fn, 430 arg_rrrr_esz *a) 431 { 432 if (gvec_fn == NULL) { 433 return false; 434 } 435 if (sve_access_check(s)) { 436 unsigned vsz = vec_full_reg_size(s); 437 gvec_fn(a->esz, vec_full_reg_offset(s, a->rd), 438 vec_full_reg_offset(s, a->rn), 439 vec_full_reg_offset(s, a->rm), 440 vec_full_reg_offset(s, a->ra), vsz, vsz); 441 } 442 return true; 443 } 444 445 /* Invoke a vector move on two Zregs. */ 446 static bool do_mov_z(DisasContext *s, int rd, int rn) 447 { 448 if (sve_access_check(s)) { 449 unsigned vsz = vec_full_reg_size(s); 450 tcg_gen_gvec_mov(MO_8, vec_full_reg_offset(s, rd), 451 vec_full_reg_offset(s, rn), vsz, vsz); 452 } 453 return true; 454 } 455 456 /* Initialize a Zreg with replications of a 64-bit immediate. */ 457 static void do_dupi_z(DisasContext *s, int rd, uint64_t word) 458 { 459 unsigned vsz = vec_full_reg_size(s); 460 tcg_gen_gvec_dup_imm(MO_64, vec_full_reg_offset(s, rd), vsz, vsz, word); 461 } 462 463 /* Invoke a vector expander on three Pregs. */ 464 static bool gen_gvec_fn_ppp(DisasContext *s, GVecGen3Fn *gvec_fn, 465 int rd, int rn, int rm) 466 { 467 if (sve_access_check(s)) { 468 unsigned psz = pred_gvec_reg_size(s); 469 gvec_fn(MO_64, pred_full_reg_offset(s, rd), 470 pred_full_reg_offset(s, rn), 471 pred_full_reg_offset(s, rm), psz, psz); 472 } 473 return true; 474 } 475 476 /* Invoke a vector move on two Pregs. */ 477 static bool do_mov_p(DisasContext *s, int rd, int rn) 478 { 479 if (sve_access_check(s)) { 480 unsigned psz = pred_gvec_reg_size(s); 481 tcg_gen_gvec_mov(MO_8, pred_full_reg_offset(s, rd), 482 pred_full_reg_offset(s, rn), psz, psz); 483 } 484 return true; 485 } 486 487 /* Set the cpu flags as per a return from an SVE helper. */ 488 static void do_pred_flags(TCGv_i32 t) 489 { 490 tcg_gen_mov_i32(cpu_NF, t); 491 tcg_gen_andi_i32(cpu_ZF, t, 2); 492 tcg_gen_andi_i32(cpu_CF, t, 1); 493 tcg_gen_movi_i32(cpu_VF, 0); 494 } 495 496 /* Subroutines computing the ARM PredTest psuedofunction. */ 497 static void do_predtest1(TCGv_i64 d, TCGv_i64 g) 498 { 499 TCGv_i32 t = tcg_temp_new_i32(); 500 501 gen_helper_sve_predtest1(t, d, g); 502 do_pred_flags(t); 503 } 504 505 static void do_predtest(DisasContext *s, int dofs, int gofs, int words) 506 { 507 TCGv_ptr dptr = tcg_temp_new_ptr(); 508 TCGv_ptr gptr = tcg_temp_new_ptr(); 509 TCGv_i32 t = tcg_temp_new_i32(); 510 511 tcg_gen_addi_ptr(dptr, cpu_env, dofs); 512 tcg_gen_addi_ptr(gptr, cpu_env, gofs); 513 514 gen_helper_sve_predtest(t, dptr, gptr, tcg_constant_i32(words)); 515 516 do_pred_flags(t); 517 } 518 519 /* For each element size, the bits within a predicate word that are active. */ 520 const uint64_t pred_esz_masks[5] = { 521 0xffffffffffffffffull, 0x5555555555555555ull, 522 0x1111111111111111ull, 0x0101010101010101ull, 523 0x0001000100010001ull, 524 }; 525 526 static bool trans_INVALID(DisasContext *s, arg_INVALID *a) 527 { 528 unallocated_encoding(s); 529 return true; 530 } 531 532 /* 533 *** SVE Logical - Unpredicated Group 534 */ 535 536 TRANS_FEAT(AND_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_and, a) 537 TRANS_FEAT(ORR_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_or, a) 538 TRANS_FEAT(EOR_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_xor, a) 539 TRANS_FEAT(BIC_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_andc, a) 540 541 static void gen_xar8_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, int64_t sh) 542 { 543 TCGv_i64 t = tcg_temp_new_i64(); 544 uint64_t mask = dup_const(MO_8, 0xff >> sh); 545 546 tcg_gen_xor_i64(t, n, m); 547 tcg_gen_shri_i64(d, t, sh); 548 tcg_gen_shli_i64(t, t, 8 - sh); 549 tcg_gen_andi_i64(d, d, mask); 550 tcg_gen_andi_i64(t, t, ~mask); 551 tcg_gen_or_i64(d, d, t); 552 } 553 554 static void gen_xar16_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, int64_t sh) 555 { 556 TCGv_i64 t = tcg_temp_new_i64(); 557 uint64_t mask = dup_const(MO_16, 0xffff >> sh); 558 559 tcg_gen_xor_i64(t, n, m); 560 tcg_gen_shri_i64(d, t, sh); 561 tcg_gen_shli_i64(t, t, 16 - sh); 562 tcg_gen_andi_i64(d, d, mask); 563 tcg_gen_andi_i64(t, t, ~mask); 564 tcg_gen_or_i64(d, d, t); 565 } 566 567 static void gen_xar_i32(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, int32_t sh) 568 { 569 tcg_gen_xor_i32(d, n, m); 570 tcg_gen_rotri_i32(d, d, sh); 571 } 572 573 static void gen_xar_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, int64_t sh) 574 { 575 tcg_gen_xor_i64(d, n, m); 576 tcg_gen_rotri_i64(d, d, sh); 577 } 578 579 static void gen_xar_vec(unsigned vece, TCGv_vec d, TCGv_vec n, 580 TCGv_vec m, int64_t sh) 581 { 582 tcg_gen_xor_vec(vece, d, n, m); 583 tcg_gen_rotri_vec(vece, d, d, sh); 584 } 585 586 void gen_gvec_xar(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, 587 uint32_t rm_ofs, int64_t shift, 588 uint32_t opr_sz, uint32_t max_sz) 589 { 590 static const TCGOpcode vecop[] = { INDEX_op_rotli_vec, 0 }; 591 static const GVecGen3i ops[4] = { 592 { .fni8 = gen_xar8_i64, 593 .fniv = gen_xar_vec, 594 .fno = gen_helper_sve2_xar_b, 595 .opt_opc = vecop, 596 .vece = MO_8 }, 597 { .fni8 = gen_xar16_i64, 598 .fniv = gen_xar_vec, 599 .fno = gen_helper_sve2_xar_h, 600 .opt_opc = vecop, 601 .vece = MO_16 }, 602 { .fni4 = gen_xar_i32, 603 .fniv = gen_xar_vec, 604 .fno = gen_helper_sve2_xar_s, 605 .opt_opc = vecop, 606 .vece = MO_32 }, 607 { .fni8 = gen_xar_i64, 608 .fniv = gen_xar_vec, 609 .fno = gen_helper_gvec_xar_d, 610 .opt_opc = vecop, 611 .vece = MO_64 } 612 }; 613 int esize = 8 << vece; 614 615 /* The SVE2 range is 1 .. esize; the AdvSIMD range is 0 .. esize-1. */ 616 tcg_debug_assert(shift >= 0); 617 tcg_debug_assert(shift <= esize); 618 shift &= esize - 1; 619 620 if (shift == 0) { 621 /* xar with no rotate devolves to xor. */ 622 tcg_gen_gvec_xor(vece, rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz); 623 } else { 624 tcg_gen_gvec_3i(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, 625 shift, &ops[vece]); 626 } 627 } 628 629 static bool trans_XAR(DisasContext *s, arg_rrri_esz *a) 630 { 631 if (a->esz < 0 || !dc_isar_feature(aa64_sve2, s)) { 632 return false; 633 } 634 if (sve_access_check(s)) { 635 unsigned vsz = vec_full_reg_size(s); 636 gen_gvec_xar(a->esz, vec_full_reg_offset(s, a->rd), 637 vec_full_reg_offset(s, a->rn), 638 vec_full_reg_offset(s, a->rm), a->imm, vsz, vsz); 639 } 640 return true; 641 } 642 643 static void gen_eor3_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k) 644 { 645 tcg_gen_xor_i64(d, n, m); 646 tcg_gen_xor_i64(d, d, k); 647 } 648 649 static void gen_eor3_vec(unsigned vece, TCGv_vec d, TCGv_vec n, 650 TCGv_vec m, TCGv_vec k) 651 { 652 tcg_gen_xor_vec(vece, d, n, m); 653 tcg_gen_xor_vec(vece, d, d, k); 654 } 655 656 static void gen_eor3(unsigned vece, uint32_t d, uint32_t n, uint32_t m, 657 uint32_t a, uint32_t oprsz, uint32_t maxsz) 658 { 659 static const GVecGen4 op = { 660 .fni8 = gen_eor3_i64, 661 .fniv = gen_eor3_vec, 662 .fno = gen_helper_sve2_eor3, 663 .vece = MO_64, 664 .prefer_i64 = TCG_TARGET_REG_BITS == 64, 665 }; 666 tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op); 667 } 668 669 TRANS_FEAT(EOR3, aa64_sve2, gen_gvec_fn_arg_zzzz, gen_eor3, a) 670 671 static void gen_bcax_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k) 672 { 673 tcg_gen_andc_i64(d, m, k); 674 tcg_gen_xor_i64(d, d, n); 675 } 676 677 static void gen_bcax_vec(unsigned vece, TCGv_vec d, TCGv_vec n, 678 TCGv_vec m, TCGv_vec k) 679 { 680 tcg_gen_andc_vec(vece, d, m, k); 681 tcg_gen_xor_vec(vece, d, d, n); 682 } 683 684 static void gen_bcax(unsigned vece, uint32_t d, uint32_t n, uint32_t m, 685 uint32_t a, uint32_t oprsz, uint32_t maxsz) 686 { 687 static const GVecGen4 op = { 688 .fni8 = gen_bcax_i64, 689 .fniv = gen_bcax_vec, 690 .fno = gen_helper_sve2_bcax, 691 .vece = MO_64, 692 .prefer_i64 = TCG_TARGET_REG_BITS == 64, 693 }; 694 tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op); 695 } 696 697 TRANS_FEAT(BCAX, aa64_sve2, gen_gvec_fn_arg_zzzz, gen_bcax, a) 698 699 static void gen_bsl(unsigned vece, uint32_t d, uint32_t n, uint32_t m, 700 uint32_t a, uint32_t oprsz, uint32_t maxsz) 701 { 702 /* BSL differs from the generic bitsel in argument ordering. */ 703 tcg_gen_gvec_bitsel(vece, d, a, n, m, oprsz, maxsz); 704 } 705 706 TRANS_FEAT(BSL, aa64_sve2, gen_gvec_fn_arg_zzzz, gen_bsl, a) 707 708 static void gen_bsl1n_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k) 709 { 710 tcg_gen_andc_i64(n, k, n); 711 tcg_gen_andc_i64(m, m, k); 712 tcg_gen_or_i64(d, n, m); 713 } 714 715 static void gen_bsl1n_vec(unsigned vece, TCGv_vec d, TCGv_vec n, 716 TCGv_vec m, TCGv_vec k) 717 { 718 if (TCG_TARGET_HAS_bitsel_vec) { 719 tcg_gen_not_vec(vece, n, n); 720 tcg_gen_bitsel_vec(vece, d, k, n, m); 721 } else { 722 tcg_gen_andc_vec(vece, n, k, n); 723 tcg_gen_andc_vec(vece, m, m, k); 724 tcg_gen_or_vec(vece, d, n, m); 725 } 726 } 727 728 static void gen_bsl1n(unsigned vece, uint32_t d, uint32_t n, uint32_t m, 729 uint32_t a, uint32_t oprsz, uint32_t maxsz) 730 { 731 static const GVecGen4 op = { 732 .fni8 = gen_bsl1n_i64, 733 .fniv = gen_bsl1n_vec, 734 .fno = gen_helper_sve2_bsl1n, 735 .vece = MO_64, 736 .prefer_i64 = TCG_TARGET_REG_BITS == 64, 737 }; 738 tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op); 739 } 740 741 TRANS_FEAT(BSL1N, aa64_sve2, gen_gvec_fn_arg_zzzz, gen_bsl1n, a) 742 743 static void gen_bsl2n_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k) 744 { 745 /* 746 * Z[dn] = (n & k) | (~m & ~k) 747 * = | ~(m | k) 748 */ 749 tcg_gen_and_i64(n, n, k); 750 if (TCG_TARGET_HAS_orc_i64) { 751 tcg_gen_or_i64(m, m, k); 752 tcg_gen_orc_i64(d, n, m); 753 } else { 754 tcg_gen_nor_i64(m, m, k); 755 tcg_gen_or_i64(d, n, m); 756 } 757 } 758 759 static void gen_bsl2n_vec(unsigned vece, TCGv_vec d, TCGv_vec n, 760 TCGv_vec m, TCGv_vec k) 761 { 762 if (TCG_TARGET_HAS_bitsel_vec) { 763 tcg_gen_not_vec(vece, m, m); 764 tcg_gen_bitsel_vec(vece, d, k, n, m); 765 } else { 766 tcg_gen_and_vec(vece, n, n, k); 767 tcg_gen_or_vec(vece, m, m, k); 768 tcg_gen_orc_vec(vece, d, n, m); 769 } 770 } 771 772 static void gen_bsl2n(unsigned vece, uint32_t d, uint32_t n, uint32_t m, 773 uint32_t a, uint32_t oprsz, uint32_t maxsz) 774 { 775 static const GVecGen4 op = { 776 .fni8 = gen_bsl2n_i64, 777 .fniv = gen_bsl2n_vec, 778 .fno = gen_helper_sve2_bsl2n, 779 .vece = MO_64, 780 .prefer_i64 = TCG_TARGET_REG_BITS == 64, 781 }; 782 tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op); 783 } 784 785 TRANS_FEAT(BSL2N, aa64_sve2, gen_gvec_fn_arg_zzzz, gen_bsl2n, a) 786 787 static void gen_nbsl_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k) 788 { 789 tcg_gen_and_i64(n, n, k); 790 tcg_gen_andc_i64(m, m, k); 791 tcg_gen_nor_i64(d, n, m); 792 } 793 794 static void gen_nbsl_vec(unsigned vece, TCGv_vec d, TCGv_vec n, 795 TCGv_vec m, TCGv_vec k) 796 { 797 tcg_gen_bitsel_vec(vece, d, k, n, m); 798 tcg_gen_not_vec(vece, d, d); 799 } 800 801 static void gen_nbsl(unsigned vece, uint32_t d, uint32_t n, uint32_t m, 802 uint32_t a, uint32_t oprsz, uint32_t maxsz) 803 { 804 static const GVecGen4 op = { 805 .fni8 = gen_nbsl_i64, 806 .fniv = gen_nbsl_vec, 807 .fno = gen_helper_sve2_nbsl, 808 .vece = MO_64, 809 .prefer_i64 = TCG_TARGET_REG_BITS == 64, 810 }; 811 tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op); 812 } 813 814 TRANS_FEAT(NBSL, aa64_sve2, gen_gvec_fn_arg_zzzz, gen_nbsl, a) 815 816 /* 817 *** SVE Integer Arithmetic - Unpredicated Group 818 */ 819 820 TRANS_FEAT(ADD_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_add, a) 821 TRANS_FEAT(SUB_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_sub, a) 822 TRANS_FEAT(SQADD_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_ssadd, a) 823 TRANS_FEAT(SQSUB_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_sssub, a) 824 TRANS_FEAT(UQADD_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_usadd, a) 825 TRANS_FEAT(UQSUB_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_ussub, a) 826 827 /* 828 *** SVE Integer Arithmetic - Binary Predicated Group 829 */ 830 831 /* Select active elememnts from Zn and inactive elements from Zm, 832 * storing the result in Zd. 833 */ 834 static bool do_sel_z(DisasContext *s, int rd, int rn, int rm, int pg, int esz) 835 { 836 static gen_helper_gvec_4 * const fns[4] = { 837 gen_helper_sve_sel_zpzz_b, gen_helper_sve_sel_zpzz_h, 838 gen_helper_sve_sel_zpzz_s, gen_helper_sve_sel_zpzz_d 839 }; 840 return gen_gvec_ool_zzzp(s, fns[esz], rd, rn, rm, pg, 0); 841 } 842 843 #define DO_ZPZZ(NAME, FEAT, name) \ 844 static gen_helper_gvec_4 * const name##_zpzz_fns[4] = { \ 845 gen_helper_##name##_zpzz_b, gen_helper_##name##_zpzz_h, \ 846 gen_helper_##name##_zpzz_s, gen_helper_##name##_zpzz_d, \ 847 }; \ 848 TRANS_FEAT(NAME, FEAT, gen_gvec_ool_arg_zpzz, \ 849 name##_zpzz_fns[a->esz], a, 0) 850 851 DO_ZPZZ(AND_zpzz, aa64_sve, sve_and) 852 DO_ZPZZ(EOR_zpzz, aa64_sve, sve_eor) 853 DO_ZPZZ(ORR_zpzz, aa64_sve, sve_orr) 854 DO_ZPZZ(BIC_zpzz, aa64_sve, sve_bic) 855 856 DO_ZPZZ(ADD_zpzz, aa64_sve, sve_add) 857 DO_ZPZZ(SUB_zpzz, aa64_sve, sve_sub) 858 859 DO_ZPZZ(SMAX_zpzz, aa64_sve, sve_smax) 860 DO_ZPZZ(UMAX_zpzz, aa64_sve, sve_umax) 861 DO_ZPZZ(SMIN_zpzz, aa64_sve, sve_smin) 862 DO_ZPZZ(UMIN_zpzz, aa64_sve, sve_umin) 863 DO_ZPZZ(SABD_zpzz, aa64_sve, sve_sabd) 864 DO_ZPZZ(UABD_zpzz, aa64_sve, sve_uabd) 865 866 DO_ZPZZ(MUL_zpzz, aa64_sve, sve_mul) 867 DO_ZPZZ(SMULH_zpzz, aa64_sve, sve_smulh) 868 DO_ZPZZ(UMULH_zpzz, aa64_sve, sve_umulh) 869 870 DO_ZPZZ(ASR_zpzz, aa64_sve, sve_asr) 871 DO_ZPZZ(LSR_zpzz, aa64_sve, sve_lsr) 872 DO_ZPZZ(LSL_zpzz, aa64_sve, sve_lsl) 873 874 static gen_helper_gvec_4 * const sdiv_fns[4] = { 875 NULL, NULL, gen_helper_sve_sdiv_zpzz_s, gen_helper_sve_sdiv_zpzz_d 876 }; 877 TRANS_FEAT(SDIV_zpzz, aa64_sve, gen_gvec_ool_arg_zpzz, sdiv_fns[a->esz], a, 0) 878 879 static gen_helper_gvec_4 * const udiv_fns[4] = { 880 NULL, NULL, gen_helper_sve_udiv_zpzz_s, gen_helper_sve_udiv_zpzz_d 881 }; 882 TRANS_FEAT(UDIV_zpzz, aa64_sve, gen_gvec_ool_arg_zpzz, udiv_fns[a->esz], a, 0) 883 884 TRANS_FEAT(SEL_zpzz, aa64_sve, do_sel_z, a->rd, a->rn, a->rm, a->pg, a->esz) 885 886 /* 887 *** SVE Integer Arithmetic - Unary Predicated Group 888 */ 889 890 #define DO_ZPZ(NAME, FEAT, name) \ 891 static gen_helper_gvec_3 * const name##_fns[4] = { \ 892 gen_helper_##name##_b, gen_helper_##name##_h, \ 893 gen_helper_##name##_s, gen_helper_##name##_d, \ 894 }; \ 895 TRANS_FEAT(NAME, FEAT, gen_gvec_ool_arg_zpz, name##_fns[a->esz], a, 0) 896 897 DO_ZPZ(CLS, aa64_sve, sve_cls) 898 DO_ZPZ(CLZ, aa64_sve, sve_clz) 899 DO_ZPZ(CNT_zpz, aa64_sve, sve_cnt_zpz) 900 DO_ZPZ(CNOT, aa64_sve, sve_cnot) 901 DO_ZPZ(NOT_zpz, aa64_sve, sve_not_zpz) 902 DO_ZPZ(ABS, aa64_sve, sve_abs) 903 DO_ZPZ(NEG, aa64_sve, sve_neg) 904 DO_ZPZ(RBIT, aa64_sve, sve_rbit) 905 906 static gen_helper_gvec_3 * const fabs_fns[4] = { 907 NULL, gen_helper_sve_fabs_h, 908 gen_helper_sve_fabs_s, gen_helper_sve_fabs_d, 909 }; 910 TRANS_FEAT(FABS, aa64_sve, gen_gvec_ool_arg_zpz, fabs_fns[a->esz], a, 0) 911 912 static gen_helper_gvec_3 * const fneg_fns[4] = { 913 NULL, gen_helper_sve_fneg_h, 914 gen_helper_sve_fneg_s, gen_helper_sve_fneg_d, 915 }; 916 TRANS_FEAT(FNEG, aa64_sve, gen_gvec_ool_arg_zpz, fneg_fns[a->esz], a, 0) 917 918 static gen_helper_gvec_3 * const sxtb_fns[4] = { 919 NULL, gen_helper_sve_sxtb_h, 920 gen_helper_sve_sxtb_s, gen_helper_sve_sxtb_d, 921 }; 922 TRANS_FEAT(SXTB, aa64_sve, gen_gvec_ool_arg_zpz, sxtb_fns[a->esz], a, 0) 923 924 static gen_helper_gvec_3 * const uxtb_fns[4] = { 925 NULL, gen_helper_sve_uxtb_h, 926 gen_helper_sve_uxtb_s, gen_helper_sve_uxtb_d, 927 }; 928 TRANS_FEAT(UXTB, aa64_sve, gen_gvec_ool_arg_zpz, uxtb_fns[a->esz], a, 0) 929 930 static gen_helper_gvec_3 * const sxth_fns[4] = { 931 NULL, NULL, gen_helper_sve_sxth_s, gen_helper_sve_sxth_d 932 }; 933 TRANS_FEAT(SXTH, aa64_sve, gen_gvec_ool_arg_zpz, sxth_fns[a->esz], a, 0) 934 935 static gen_helper_gvec_3 * const uxth_fns[4] = { 936 NULL, NULL, gen_helper_sve_uxth_s, gen_helper_sve_uxth_d 937 }; 938 TRANS_FEAT(UXTH, aa64_sve, gen_gvec_ool_arg_zpz, uxth_fns[a->esz], a, 0) 939 940 TRANS_FEAT(SXTW, aa64_sve, gen_gvec_ool_arg_zpz, 941 a->esz == 3 ? gen_helper_sve_sxtw_d : NULL, a, 0) 942 TRANS_FEAT(UXTW, aa64_sve, gen_gvec_ool_arg_zpz, 943 a->esz == 3 ? gen_helper_sve_uxtw_d : NULL, a, 0) 944 945 /* 946 *** SVE Integer Reduction Group 947 */ 948 949 typedef void gen_helper_gvec_reduc(TCGv_i64, TCGv_ptr, TCGv_ptr, TCGv_i32); 950 static bool do_vpz_ool(DisasContext *s, arg_rpr_esz *a, 951 gen_helper_gvec_reduc *fn) 952 { 953 unsigned vsz = vec_full_reg_size(s); 954 TCGv_ptr t_zn, t_pg; 955 TCGv_i32 desc; 956 TCGv_i64 temp; 957 958 if (fn == NULL) { 959 return false; 960 } 961 if (!sve_access_check(s)) { 962 return true; 963 } 964 965 desc = tcg_constant_i32(simd_desc(vsz, vsz, 0)); 966 temp = tcg_temp_new_i64(); 967 t_zn = tcg_temp_new_ptr(); 968 t_pg = tcg_temp_new_ptr(); 969 970 tcg_gen_addi_ptr(t_zn, cpu_env, vec_full_reg_offset(s, a->rn)); 971 tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, a->pg)); 972 fn(temp, t_zn, t_pg, desc); 973 974 write_fp_dreg(s, a->rd, temp); 975 return true; 976 } 977 978 #define DO_VPZ(NAME, name) \ 979 static gen_helper_gvec_reduc * const name##_fns[4] = { \ 980 gen_helper_sve_##name##_b, gen_helper_sve_##name##_h, \ 981 gen_helper_sve_##name##_s, gen_helper_sve_##name##_d, \ 982 }; \ 983 TRANS_FEAT(NAME, aa64_sve, do_vpz_ool, a, name##_fns[a->esz]) 984 985 DO_VPZ(ORV, orv) 986 DO_VPZ(ANDV, andv) 987 DO_VPZ(EORV, eorv) 988 989 DO_VPZ(UADDV, uaddv) 990 DO_VPZ(SMAXV, smaxv) 991 DO_VPZ(UMAXV, umaxv) 992 DO_VPZ(SMINV, sminv) 993 DO_VPZ(UMINV, uminv) 994 995 static gen_helper_gvec_reduc * const saddv_fns[4] = { 996 gen_helper_sve_saddv_b, gen_helper_sve_saddv_h, 997 gen_helper_sve_saddv_s, NULL 998 }; 999 TRANS_FEAT(SADDV, aa64_sve, do_vpz_ool, a, saddv_fns[a->esz]) 1000 1001 #undef DO_VPZ 1002 1003 /* 1004 *** SVE Shift by Immediate - Predicated Group 1005 */ 1006 1007 /* 1008 * Copy Zn into Zd, storing zeros into inactive elements. 1009 * If invert, store zeros into the active elements. 1010 */ 1011 static bool do_movz_zpz(DisasContext *s, int rd, int rn, int pg, 1012 int esz, bool invert) 1013 { 1014 static gen_helper_gvec_3 * const fns[4] = { 1015 gen_helper_sve_movz_b, gen_helper_sve_movz_h, 1016 gen_helper_sve_movz_s, gen_helper_sve_movz_d, 1017 }; 1018 return gen_gvec_ool_zzp(s, fns[esz], rd, rn, pg, invert); 1019 } 1020 1021 static bool do_shift_zpzi(DisasContext *s, arg_rpri_esz *a, bool asr, 1022 gen_helper_gvec_3 * const fns[4]) 1023 { 1024 int max; 1025 1026 if (a->esz < 0) { 1027 /* Invalid tsz encoding -- see tszimm_esz. */ 1028 return false; 1029 } 1030 1031 /* 1032 * Shift by element size is architecturally valid. 1033 * For arithmetic right-shift, it's the same as by one less. 1034 * For logical shifts and ASRD, it is a zeroing operation. 1035 */ 1036 max = 8 << a->esz; 1037 if (a->imm >= max) { 1038 if (asr) { 1039 a->imm = max - 1; 1040 } else { 1041 return do_movz_zpz(s, a->rd, a->rd, a->pg, a->esz, true); 1042 } 1043 } 1044 return gen_gvec_ool_arg_zpzi(s, fns[a->esz], a); 1045 } 1046 1047 static gen_helper_gvec_3 * const asr_zpzi_fns[4] = { 1048 gen_helper_sve_asr_zpzi_b, gen_helper_sve_asr_zpzi_h, 1049 gen_helper_sve_asr_zpzi_s, gen_helper_sve_asr_zpzi_d, 1050 }; 1051 TRANS_FEAT(ASR_zpzi, aa64_sve, do_shift_zpzi, a, true, asr_zpzi_fns) 1052 1053 static gen_helper_gvec_3 * const lsr_zpzi_fns[4] = { 1054 gen_helper_sve_lsr_zpzi_b, gen_helper_sve_lsr_zpzi_h, 1055 gen_helper_sve_lsr_zpzi_s, gen_helper_sve_lsr_zpzi_d, 1056 }; 1057 TRANS_FEAT(LSR_zpzi, aa64_sve, do_shift_zpzi, a, false, lsr_zpzi_fns) 1058 1059 static gen_helper_gvec_3 * const lsl_zpzi_fns[4] = { 1060 gen_helper_sve_lsl_zpzi_b, gen_helper_sve_lsl_zpzi_h, 1061 gen_helper_sve_lsl_zpzi_s, gen_helper_sve_lsl_zpzi_d, 1062 }; 1063 TRANS_FEAT(LSL_zpzi, aa64_sve, do_shift_zpzi, a, false, lsl_zpzi_fns) 1064 1065 static gen_helper_gvec_3 * const asrd_fns[4] = { 1066 gen_helper_sve_asrd_b, gen_helper_sve_asrd_h, 1067 gen_helper_sve_asrd_s, gen_helper_sve_asrd_d, 1068 }; 1069 TRANS_FEAT(ASRD, aa64_sve, do_shift_zpzi, a, false, asrd_fns) 1070 1071 static gen_helper_gvec_3 * const sqshl_zpzi_fns[4] = { 1072 gen_helper_sve2_sqshl_zpzi_b, gen_helper_sve2_sqshl_zpzi_h, 1073 gen_helper_sve2_sqshl_zpzi_s, gen_helper_sve2_sqshl_zpzi_d, 1074 }; 1075 TRANS_FEAT(SQSHL_zpzi, aa64_sve2, gen_gvec_ool_arg_zpzi, 1076 a->esz < 0 ? NULL : sqshl_zpzi_fns[a->esz], a) 1077 1078 static gen_helper_gvec_3 * const uqshl_zpzi_fns[4] = { 1079 gen_helper_sve2_uqshl_zpzi_b, gen_helper_sve2_uqshl_zpzi_h, 1080 gen_helper_sve2_uqshl_zpzi_s, gen_helper_sve2_uqshl_zpzi_d, 1081 }; 1082 TRANS_FEAT(UQSHL_zpzi, aa64_sve2, gen_gvec_ool_arg_zpzi, 1083 a->esz < 0 ? NULL : uqshl_zpzi_fns[a->esz], a) 1084 1085 static gen_helper_gvec_3 * const srshr_fns[4] = { 1086 gen_helper_sve2_srshr_b, gen_helper_sve2_srshr_h, 1087 gen_helper_sve2_srshr_s, gen_helper_sve2_srshr_d, 1088 }; 1089 TRANS_FEAT(SRSHR, aa64_sve2, gen_gvec_ool_arg_zpzi, 1090 a->esz < 0 ? NULL : srshr_fns[a->esz], a) 1091 1092 static gen_helper_gvec_3 * const urshr_fns[4] = { 1093 gen_helper_sve2_urshr_b, gen_helper_sve2_urshr_h, 1094 gen_helper_sve2_urshr_s, gen_helper_sve2_urshr_d, 1095 }; 1096 TRANS_FEAT(URSHR, aa64_sve2, gen_gvec_ool_arg_zpzi, 1097 a->esz < 0 ? NULL : urshr_fns[a->esz], a) 1098 1099 static gen_helper_gvec_3 * const sqshlu_fns[4] = { 1100 gen_helper_sve2_sqshlu_b, gen_helper_sve2_sqshlu_h, 1101 gen_helper_sve2_sqshlu_s, gen_helper_sve2_sqshlu_d, 1102 }; 1103 TRANS_FEAT(SQSHLU, aa64_sve2, gen_gvec_ool_arg_zpzi, 1104 a->esz < 0 ? NULL : sqshlu_fns[a->esz], a) 1105 1106 /* 1107 *** SVE Bitwise Shift - Predicated Group 1108 */ 1109 1110 #define DO_ZPZW(NAME, name) \ 1111 static gen_helper_gvec_4 * const name##_zpzw_fns[4] = { \ 1112 gen_helper_sve_##name##_zpzw_b, gen_helper_sve_##name##_zpzw_h, \ 1113 gen_helper_sve_##name##_zpzw_s, NULL \ 1114 }; \ 1115 TRANS_FEAT(NAME##_zpzw, aa64_sve, gen_gvec_ool_arg_zpzz, \ 1116 a->esz < 0 ? NULL : name##_zpzw_fns[a->esz], a, 0) 1117 1118 DO_ZPZW(ASR, asr) 1119 DO_ZPZW(LSR, lsr) 1120 DO_ZPZW(LSL, lsl) 1121 1122 #undef DO_ZPZW 1123 1124 /* 1125 *** SVE Bitwise Shift - Unpredicated Group 1126 */ 1127 1128 static bool do_shift_imm(DisasContext *s, arg_rri_esz *a, bool asr, 1129 void (*gvec_fn)(unsigned, uint32_t, uint32_t, 1130 int64_t, uint32_t, uint32_t)) 1131 { 1132 if (a->esz < 0) { 1133 /* Invalid tsz encoding -- see tszimm_esz. */ 1134 return false; 1135 } 1136 if (sve_access_check(s)) { 1137 unsigned vsz = vec_full_reg_size(s); 1138 /* Shift by element size is architecturally valid. For 1139 arithmetic right-shift, it's the same as by one less. 1140 Otherwise it is a zeroing operation. */ 1141 if (a->imm >= 8 << a->esz) { 1142 if (asr) { 1143 a->imm = (8 << a->esz) - 1; 1144 } else { 1145 do_dupi_z(s, a->rd, 0); 1146 return true; 1147 } 1148 } 1149 gvec_fn(a->esz, vec_full_reg_offset(s, a->rd), 1150 vec_full_reg_offset(s, a->rn), a->imm, vsz, vsz); 1151 } 1152 return true; 1153 } 1154 1155 TRANS_FEAT(ASR_zzi, aa64_sve, do_shift_imm, a, true, tcg_gen_gvec_sari) 1156 TRANS_FEAT(LSR_zzi, aa64_sve, do_shift_imm, a, false, tcg_gen_gvec_shri) 1157 TRANS_FEAT(LSL_zzi, aa64_sve, do_shift_imm, a, false, tcg_gen_gvec_shli) 1158 1159 #define DO_ZZW(NAME, name) \ 1160 static gen_helper_gvec_3 * const name##_zzw_fns[4] = { \ 1161 gen_helper_sve_##name##_zzw_b, gen_helper_sve_##name##_zzw_h, \ 1162 gen_helper_sve_##name##_zzw_s, NULL \ 1163 }; \ 1164 TRANS_FEAT(NAME, aa64_sve, gen_gvec_ool_arg_zzz, \ 1165 name##_zzw_fns[a->esz], a, 0) 1166 1167 DO_ZZW(ASR_zzw, asr) 1168 DO_ZZW(LSR_zzw, lsr) 1169 DO_ZZW(LSL_zzw, lsl) 1170 1171 #undef DO_ZZW 1172 1173 /* 1174 *** SVE Integer Multiply-Add Group 1175 */ 1176 1177 static bool do_zpzzz_ool(DisasContext *s, arg_rprrr_esz *a, 1178 gen_helper_gvec_5 *fn) 1179 { 1180 if (sve_access_check(s)) { 1181 unsigned vsz = vec_full_reg_size(s); 1182 tcg_gen_gvec_5_ool(vec_full_reg_offset(s, a->rd), 1183 vec_full_reg_offset(s, a->ra), 1184 vec_full_reg_offset(s, a->rn), 1185 vec_full_reg_offset(s, a->rm), 1186 pred_full_reg_offset(s, a->pg), 1187 vsz, vsz, 0, fn); 1188 } 1189 return true; 1190 } 1191 1192 static gen_helper_gvec_5 * const mla_fns[4] = { 1193 gen_helper_sve_mla_b, gen_helper_sve_mla_h, 1194 gen_helper_sve_mla_s, gen_helper_sve_mla_d, 1195 }; 1196 TRANS_FEAT(MLA, aa64_sve, do_zpzzz_ool, a, mla_fns[a->esz]) 1197 1198 static gen_helper_gvec_5 * const mls_fns[4] = { 1199 gen_helper_sve_mls_b, gen_helper_sve_mls_h, 1200 gen_helper_sve_mls_s, gen_helper_sve_mls_d, 1201 }; 1202 TRANS_FEAT(MLS, aa64_sve, do_zpzzz_ool, a, mls_fns[a->esz]) 1203 1204 /* 1205 *** SVE Index Generation Group 1206 */ 1207 1208 static bool do_index(DisasContext *s, int esz, int rd, 1209 TCGv_i64 start, TCGv_i64 incr) 1210 { 1211 unsigned vsz; 1212 TCGv_i32 desc; 1213 TCGv_ptr t_zd; 1214 1215 if (!sve_access_check(s)) { 1216 return true; 1217 } 1218 1219 vsz = vec_full_reg_size(s); 1220 desc = tcg_constant_i32(simd_desc(vsz, vsz, 0)); 1221 t_zd = tcg_temp_new_ptr(); 1222 1223 tcg_gen_addi_ptr(t_zd, cpu_env, vec_full_reg_offset(s, rd)); 1224 if (esz == 3) { 1225 gen_helper_sve_index_d(t_zd, start, incr, desc); 1226 } else { 1227 typedef void index_fn(TCGv_ptr, TCGv_i32, TCGv_i32, TCGv_i32); 1228 static index_fn * const fns[3] = { 1229 gen_helper_sve_index_b, 1230 gen_helper_sve_index_h, 1231 gen_helper_sve_index_s, 1232 }; 1233 TCGv_i32 s32 = tcg_temp_new_i32(); 1234 TCGv_i32 i32 = tcg_temp_new_i32(); 1235 1236 tcg_gen_extrl_i64_i32(s32, start); 1237 tcg_gen_extrl_i64_i32(i32, incr); 1238 fns[esz](t_zd, s32, i32, desc); 1239 } 1240 return true; 1241 } 1242 1243 TRANS_FEAT(INDEX_ii, aa64_sve, do_index, a->esz, a->rd, 1244 tcg_constant_i64(a->imm1), tcg_constant_i64(a->imm2)) 1245 TRANS_FEAT(INDEX_ir, aa64_sve, do_index, a->esz, a->rd, 1246 tcg_constant_i64(a->imm), cpu_reg(s, a->rm)) 1247 TRANS_FEAT(INDEX_ri, aa64_sve, do_index, a->esz, a->rd, 1248 cpu_reg(s, a->rn), tcg_constant_i64(a->imm)) 1249 TRANS_FEAT(INDEX_rr, aa64_sve, do_index, a->esz, a->rd, 1250 cpu_reg(s, a->rn), cpu_reg(s, a->rm)) 1251 1252 /* 1253 *** SVE Stack Allocation Group 1254 */ 1255 1256 static bool trans_ADDVL(DisasContext *s, arg_ADDVL *a) 1257 { 1258 if (!dc_isar_feature(aa64_sve, s)) { 1259 return false; 1260 } 1261 if (sve_access_check(s)) { 1262 TCGv_i64 rd = cpu_reg_sp(s, a->rd); 1263 TCGv_i64 rn = cpu_reg_sp(s, a->rn); 1264 tcg_gen_addi_i64(rd, rn, a->imm * vec_full_reg_size(s)); 1265 } 1266 return true; 1267 } 1268 1269 static bool trans_ADDSVL(DisasContext *s, arg_ADDSVL *a) 1270 { 1271 if (!dc_isar_feature(aa64_sme, s)) { 1272 return false; 1273 } 1274 if (sme_enabled_check(s)) { 1275 TCGv_i64 rd = cpu_reg_sp(s, a->rd); 1276 TCGv_i64 rn = cpu_reg_sp(s, a->rn); 1277 tcg_gen_addi_i64(rd, rn, a->imm * streaming_vec_reg_size(s)); 1278 } 1279 return true; 1280 } 1281 1282 static bool trans_ADDPL(DisasContext *s, arg_ADDPL *a) 1283 { 1284 if (!dc_isar_feature(aa64_sve, s)) { 1285 return false; 1286 } 1287 if (sve_access_check(s)) { 1288 TCGv_i64 rd = cpu_reg_sp(s, a->rd); 1289 TCGv_i64 rn = cpu_reg_sp(s, a->rn); 1290 tcg_gen_addi_i64(rd, rn, a->imm * pred_full_reg_size(s)); 1291 } 1292 return true; 1293 } 1294 1295 static bool trans_ADDSPL(DisasContext *s, arg_ADDSPL *a) 1296 { 1297 if (!dc_isar_feature(aa64_sme, s)) { 1298 return false; 1299 } 1300 if (sme_enabled_check(s)) { 1301 TCGv_i64 rd = cpu_reg_sp(s, a->rd); 1302 TCGv_i64 rn = cpu_reg_sp(s, a->rn); 1303 tcg_gen_addi_i64(rd, rn, a->imm * streaming_pred_reg_size(s)); 1304 } 1305 return true; 1306 } 1307 1308 static bool trans_RDVL(DisasContext *s, arg_RDVL *a) 1309 { 1310 if (!dc_isar_feature(aa64_sve, s)) { 1311 return false; 1312 } 1313 if (sve_access_check(s)) { 1314 TCGv_i64 reg = cpu_reg(s, a->rd); 1315 tcg_gen_movi_i64(reg, a->imm * vec_full_reg_size(s)); 1316 } 1317 return true; 1318 } 1319 1320 static bool trans_RDSVL(DisasContext *s, arg_RDSVL *a) 1321 { 1322 if (!dc_isar_feature(aa64_sme, s)) { 1323 return false; 1324 } 1325 if (sme_enabled_check(s)) { 1326 TCGv_i64 reg = cpu_reg(s, a->rd); 1327 tcg_gen_movi_i64(reg, a->imm * streaming_vec_reg_size(s)); 1328 } 1329 return true; 1330 } 1331 1332 /* 1333 *** SVE Compute Vector Address Group 1334 */ 1335 1336 static bool do_adr(DisasContext *s, arg_rrri *a, gen_helper_gvec_3 *fn) 1337 { 1338 return gen_gvec_ool_zzz(s, fn, a->rd, a->rn, a->rm, a->imm); 1339 } 1340 1341 TRANS_FEAT_NONSTREAMING(ADR_p32, aa64_sve, do_adr, a, gen_helper_sve_adr_p32) 1342 TRANS_FEAT_NONSTREAMING(ADR_p64, aa64_sve, do_adr, a, gen_helper_sve_adr_p64) 1343 TRANS_FEAT_NONSTREAMING(ADR_s32, aa64_sve, do_adr, a, gen_helper_sve_adr_s32) 1344 TRANS_FEAT_NONSTREAMING(ADR_u32, aa64_sve, do_adr, a, gen_helper_sve_adr_u32) 1345 1346 /* 1347 *** SVE Integer Misc - Unpredicated Group 1348 */ 1349 1350 static gen_helper_gvec_2 * const fexpa_fns[4] = { 1351 NULL, gen_helper_sve_fexpa_h, 1352 gen_helper_sve_fexpa_s, gen_helper_sve_fexpa_d, 1353 }; 1354 TRANS_FEAT_NONSTREAMING(FEXPA, aa64_sve, gen_gvec_ool_zz, 1355 fexpa_fns[a->esz], a->rd, a->rn, 0) 1356 1357 static gen_helper_gvec_3 * const ftssel_fns[4] = { 1358 NULL, gen_helper_sve_ftssel_h, 1359 gen_helper_sve_ftssel_s, gen_helper_sve_ftssel_d, 1360 }; 1361 TRANS_FEAT_NONSTREAMING(FTSSEL, aa64_sve, gen_gvec_ool_arg_zzz, 1362 ftssel_fns[a->esz], a, 0) 1363 1364 /* 1365 *** SVE Predicate Logical Operations Group 1366 */ 1367 1368 static bool do_pppp_flags(DisasContext *s, arg_rprr_s *a, 1369 const GVecGen4 *gvec_op) 1370 { 1371 if (!sve_access_check(s)) { 1372 return true; 1373 } 1374 1375 unsigned psz = pred_gvec_reg_size(s); 1376 int dofs = pred_full_reg_offset(s, a->rd); 1377 int nofs = pred_full_reg_offset(s, a->rn); 1378 int mofs = pred_full_reg_offset(s, a->rm); 1379 int gofs = pred_full_reg_offset(s, a->pg); 1380 1381 if (!a->s) { 1382 tcg_gen_gvec_4(dofs, nofs, mofs, gofs, psz, psz, gvec_op); 1383 return true; 1384 } 1385 1386 if (psz == 8) { 1387 /* Do the operation and the flags generation in temps. */ 1388 TCGv_i64 pd = tcg_temp_new_i64(); 1389 TCGv_i64 pn = tcg_temp_new_i64(); 1390 TCGv_i64 pm = tcg_temp_new_i64(); 1391 TCGv_i64 pg = tcg_temp_new_i64(); 1392 1393 tcg_gen_ld_i64(pn, cpu_env, nofs); 1394 tcg_gen_ld_i64(pm, cpu_env, mofs); 1395 tcg_gen_ld_i64(pg, cpu_env, gofs); 1396 1397 gvec_op->fni8(pd, pn, pm, pg); 1398 tcg_gen_st_i64(pd, cpu_env, dofs); 1399 1400 do_predtest1(pd, pg); 1401 } else { 1402 /* The operation and flags generation is large. The computation 1403 * of the flags depends on the original contents of the guarding 1404 * predicate. If the destination overwrites the guarding predicate, 1405 * then the easiest way to get this right is to save a copy. 1406 */ 1407 int tofs = gofs; 1408 if (a->rd == a->pg) { 1409 tofs = offsetof(CPUARMState, vfp.preg_tmp); 1410 tcg_gen_gvec_mov(0, tofs, gofs, psz, psz); 1411 } 1412 1413 tcg_gen_gvec_4(dofs, nofs, mofs, gofs, psz, psz, gvec_op); 1414 do_predtest(s, dofs, tofs, psz / 8); 1415 } 1416 return true; 1417 } 1418 1419 static void gen_and_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg) 1420 { 1421 tcg_gen_and_i64(pd, pn, pm); 1422 tcg_gen_and_i64(pd, pd, pg); 1423 } 1424 1425 static void gen_and_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn, 1426 TCGv_vec pm, TCGv_vec pg) 1427 { 1428 tcg_gen_and_vec(vece, pd, pn, pm); 1429 tcg_gen_and_vec(vece, pd, pd, pg); 1430 } 1431 1432 static bool trans_AND_pppp(DisasContext *s, arg_rprr_s *a) 1433 { 1434 static const GVecGen4 op = { 1435 .fni8 = gen_and_pg_i64, 1436 .fniv = gen_and_pg_vec, 1437 .fno = gen_helper_sve_and_pppp, 1438 .prefer_i64 = TCG_TARGET_REG_BITS == 64, 1439 }; 1440 1441 if (!dc_isar_feature(aa64_sve, s)) { 1442 return false; 1443 } 1444 if (!a->s) { 1445 if (a->rn == a->rm) { 1446 if (a->pg == a->rn) { 1447 return do_mov_p(s, a->rd, a->rn); 1448 } 1449 return gen_gvec_fn_ppp(s, tcg_gen_gvec_and, a->rd, a->rn, a->pg); 1450 } else if (a->pg == a->rn || a->pg == a->rm) { 1451 return gen_gvec_fn_ppp(s, tcg_gen_gvec_and, a->rd, a->rn, a->rm); 1452 } 1453 } 1454 return do_pppp_flags(s, a, &op); 1455 } 1456 1457 static void gen_bic_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg) 1458 { 1459 tcg_gen_andc_i64(pd, pn, pm); 1460 tcg_gen_and_i64(pd, pd, pg); 1461 } 1462 1463 static void gen_bic_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn, 1464 TCGv_vec pm, TCGv_vec pg) 1465 { 1466 tcg_gen_andc_vec(vece, pd, pn, pm); 1467 tcg_gen_and_vec(vece, pd, pd, pg); 1468 } 1469 1470 static bool trans_BIC_pppp(DisasContext *s, arg_rprr_s *a) 1471 { 1472 static const GVecGen4 op = { 1473 .fni8 = gen_bic_pg_i64, 1474 .fniv = gen_bic_pg_vec, 1475 .fno = gen_helper_sve_bic_pppp, 1476 .prefer_i64 = TCG_TARGET_REG_BITS == 64, 1477 }; 1478 1479 if (!dc_isar_feature(aa64_sve, s)) { 1480 return false; 1481 } 1482 if (!a->s && a->pg == a->rn) { 1483 return gen_gvec_fn_ppp(s, tcg_gen_gvec_andc, a->rd, a->rn, a->rm); 1484 } 1485 return do_pppp_flags(s, a, &op); 1486 } 1487 1488 static void gen_eor_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg) 1489 { 1490 tcg_gen_xor_i64(pd, pn, pm); 1491 tcg_gen_and_i64(pd, pd, pg); 1492 } 1493 1494 static void gen_eor_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn, 1495 TCGv_vec pm, TCGv_vec pg) 1496 { 1497 tcg_gen_xor_vec(vece, pd, pn, pm); 1498 tcg_gen_and_vec(vece, pd, pd, pg); 1499 } 1500 1501 static bool trans_EOR_pppp(DisasContext *s, arg_rprr_s *a) 1502 { 1503 static const GVecGen4 op = { 1504 .fni8 = gen_eor_pg_i64, 1505 .fniv = gen_eor_pg_vec, 1506 .fno = gen_helper_sve_eor_pppp, 1507 .prefer_i64 = TCG_TARGET_REG_BITS == 64, 1508 }; 1509 1510 if (!dc_isar_feature(aa64_sve, s)) { 1511 return false; 1512 } 1513 /* Alias NOT (predicate) is EOR Pd.B, Pg/Z, Pn.B, Pg.B */ 1514 if (!a->s && a->pg == a->rm) { 1515 return gen_gvec_fn_ppp(s, tcg_gen_gvec_andc, a->rd, a->pg, a->rn); 1516 } 1517 return do_pppp_flags(s, a, &op); 1518 } 1519 1520 static bool trans_SEL_pppp(DisasContext *s, arg_rprr_s *a) 1521 { 1522 if (a->s || !dc_isar_feature(aa64_sve, s)) { 1523 return false; 1524 } 1525 if (sve_access_check(s)) { 1526 unsigned psz = pred_gvec_reg_size(s); 1527 tcg_gen_gvec_bitsel(MO_8, pred_full_reg_offset(s, a->rd), 1528 pred_full_reg_offset(s, a->pg), 1529 pred_full_reg_offset(s, a->rn), 1530 pred_full_reg_offset(s, a->rm), psz, psz); 1531 } 1532 return true; 1533 } 1534 1535 static void gen_orr_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg) 1536 { 1537 tcg_gen_or_i64(pd, pn, pm); 1538 tcg_gen_and_i64(pd, pd, pg); 1539 } 1540 1541 static void gen_orr_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn, 1542 TCGv_vec pm, TCGv_vec pg) 1543 { 1544 tcg_gen_or_vec(vece, pd, pn, pm); 1545 tcg_gen_and_vec(vece, pd, pd, pg); 1546 } 1547 1548 static bool trans_ORR_pppp(DisasContext *s, arg_rprr_s *a) 1549 { 1550 static const GVecGen4 op = { 1551 .fni8 = gen_orr_pg_i64, 1552 .fniv = gen_orr_pg_vec, 1553 .fno = gen_helper_sve_orr_pppp, 1554 .prefer_i64 = TCG_TARGET_REG_BITS == 64, 1555 }; 1556 1557 if (!dc_isar_feature(aa64_sve, s)) { 1558 return false; 1559 } 1560 if (!a->s && a->pg == a->rn && a->rn == a->rm) { 1561 return do_mov_p(s, a->rd, a->rn); 1562 } 1563 return do_pppp_flags(s, a, &op); 1564 } 1565 1566 static void gen_orn_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg) 1567 { 1568 tcg_gen_orc_i64(pd, pn, pm); 1569 tcg_gen_and_i64(pd, pd, pg); 1570 } 1571 1572 static void gen_orn_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn, 1573 TCGv_vec pm, TCGv_vec pg) 1574 { 1575 tcg_gen_orc_vec(vece, pd, pn, pm); 1576 tcg_gen_and_vec(vece, pd, pd, pg); 1577 } 1578 1579 static bool trans_ORN_pppp(DisasContext *s, arg_rprr_s *a) 1580 { 1581 static const GVecGen4 op = { 1582 .fni8 = gen_orn_pg_i64, 1583 .fniv = gen_orn_pg_vec, 1584 .fno = gen_helper_sve_orn_pppp, 1585 .prefer_i64 = TCG_TARGET_REG_BITS == 64, 1586 }; 1587 1588 if (!dc_isar_feature(aa64_sve, s)) { 1589 return false; 1590 } 1591 return do_pppp_flags(s, a, &op); 1592 } 1593 1594 static void gen_nor_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg) 1595 { 1596 tcg_gen_or_i64(pd, pn, pm); 1597 tcg_gen_andc_i64(pd, pg, pd); 1598 } 1599 1600 static void gen_nor_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn, 1601 TCGv_vec pm, TCGv_vec pg) 1602 { 1603 tcg_gen_or_vec(vece, pd, pn, pm); 1604 tcg_gen_andc_vec(vece, pd, pg, pd); 1605 } 1606 1607 static bool trans_NOR_pppp(DisasContext *s, arg_rprr_s *a) 1608 { 1609 static const GVecGen4 op = { 1610 .fni8 = gen_nor_pg_i64, 1611 .fniv = gen_nor_pg_vec, 1612 .fno = gen_helper_sve_nor_pppp, 1613 .prefer_i64 = TCG_TARGET_REG_BITS == 64, 1614 }; 1615 1616 if (!dc_isar_feature(aa64_sve, s)) { 1617 return false; 1618 } 1619 return do_pppp_flags(s, a, &op); 1620 } 1621 1622 static void gen_nand_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg) 1623 { 1624 tcg_gen_and_i64(pd, pn, pm); 1625 tcg_gen_andc_i64(pd, pg, pd); 1626 } 1627 1628 static void gen_nand_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn, 1629 TCGv_vec pm, TCGv_vec pg) 1630 { 1631 tcg_gen_and_vec(vece, pd, pn, pm); 1632 tcg_gen_andc_vec(vece, pd, pg, pd); 1633 } 1634 1635 static bool trans_NAND_pppp(DisasContext *s, arg_rprr_s *a) 1636 { 1637 static const GVecGen4 op = { 1638 .fni8 = gen_nand_pg_i64, 1639 .fniv = gen_nand_pg_vec, 1640 .fno = gen_helper_sve_nand_pppp, 1641 .prefer_i64 = TCG_TARGET_REG_BITS == 64, 1642 }; 1643 1644 if (!dc_isar_feature(aa64_sve, s)) { 1645 return false; 1646 } 1647 return do_pppp_flags(s, a, &op); 1648 } 1649 1650 /* 1651 *** SVE Predicate Misc Group 1652 */ 1653 1654 static bool trans_PTEST(DisasContext *s, arg_PTEST *a) 1655 { 1656 if (!dc_isar_feature(aa64_sve, s)) { 1657 return false; 1658 } 1659 if (sve_access_check(s)) { 1660 int nofs = pred_full_reg_offset(s, a->rn); 1661 int gofs = pred_full_reg_offset(s, a->pg); 1662 int words = DIV_ROUND_UP(pred_full_reg_size(s), 8); 1663 1664 if (words == 1) { 1665 TCGv_i64 pn = tcg_temp_new_i64(); 1666 TCGv_i64 pg = tcg_temp_new_i64(); 1667 1668 tcg_gen_ld_i64(pn, cpu_env, nofs); 1669 tcg_gen_ld_i64(pg, cpu_env, gofs); 1670 do_predtest1(pn, pg); 1671 } else { 1672 do_predtest(s, nofs, gofs, words); 1673 } 1674 } 1675 return true; 1676 } 1677 1678 /* See the ARM pseudocode DecodePredCount. */ 1679 static unsigned decode_pred_count(unsigned fullsz, int pattern, int esz) 1680 { 1681 unsigned elements = fullsz >> esz; 1682 unsigned bound; 1683 1684 switch (pattern) { 1685 case 0x0: /* POW2 */ 1686 return pow2floor(elements); 1687 case 0x1: /* VL1 */ 1688 case 0x2: /* VL2 */ 1689 case 0x3: /* VL3 */ 1690 case 0x4: /* VL4 */ 1691 case 0x5: /* VL5 */ 1692 case 0x6: /* VL6 */ 1693 case 0x7: /* VL7 */ 1694 case 0x8: /* VL8 */ 1695 bound = pattern; 1696 break; 1697 case 0x9: /* VL16 */ 1698 case 0xa: /* VL32 */ 1699 case 0xb: /* VL64 */ 1700 case 0xc: /* VL128 */ 1701 case 0xd: /* VL256 */ 1702 bound = 16 << (pattern - 9); 1703 break; 1704 case 0x1d: /* MUL4 */ 1705 return elements - elements % 4; 1706 case 0x1e: /* MUL3 */ 1707 return elements - elements % 3; 1708 case 0x1f: /* ALL */ 1709 return elements; 1710 default: /* #uimm5 */ 1711 return 0; 1712 } 1713 return elements >= bound ? bound : 0; 1714 } 1715 1716 /* This handles all of the predicate initialization instructions, 1717 * PTRUE, PFALSE, SETFFR. For PFALSE, we will have set PAT == 32 1718 * so that decode_pred_count returns 0. For SETFFR, we will have 1719 * set RD == 16 == FFR. 1720 */ 1721 static bool do_predset(DisasContext *s, int esz, int rd, int pat, bool setflag) 1722 { 1723 if (!sve_access_check(s)) { 1724 return true; 1725 } 1726 1727 unsigned fullsz = vec_full_reg_size(s); 1728 unsigned ofs = pred_full_reg_offset(s, rd); 1729 unsigned numelem, setsz, i; 1730 uint64_t word, lastword; 1731 TCGv_i64 t; 1732 1733 numelem = decode_pred_count(fullsz, pat, esz); 1734 1735 /* Determine what we must store into each bit, and how many. */ 1736 if (numelem == 0) { 1737 lastword = word = 0; 1738 setsz = fullsz; 1739 } else { 1740 setsz = numelem << esz; 1741 lastword = word = pred_esz_masks[esz]; 1742 if (setsz % 64) { 1743 lastword &= MAKE_64BIT_MASK(0, setsz % 64); 1744 } 1745 } 1746 1747 t = tcg_temp_new_i64(); 1748 if (fullsz <= 64) { 1749 tcg_gen_movi_i64(t, lastword); 1750 tcg_gen_st_i64(t, cpu_env, ofs); 1751 goto done; 1752 } 1753 1754 if (word == lastword) { 1755 unsigned maxsz = size_for_gvec(fullsz / 8); 1756 unsigned oprsz = size_for_gvec(setsz / 8); 1757 1758 if (oprsz * 8 == setsz) { 1759 tcg_gen_gvec_dup_imm(MO_64, ofs, oprsz, maxsz, word); 1760 goto done; 1761 } 1762 } 1763 1764 setsz /= 8; 1765 fullsz /= 8; 1766 1767 tcg_gen_movi_i64(t, word); 1768 for (i = 0; i < QEMU_ALIGN_DOWN(setsz, 8); i += 8) { 1769 tcg_gen_st_i64(t, cpu_env, ofs + i); 1770 } 1771 if (lastword != word) { 1772 tcg_gen_movi_i64(t, lastword); 1773 tcg_gen_st_i64(t, cpu_env, ofs + i); 1774 i += 8; 1775 } 1776 if (i < fullsz) { 1777 tcg_gen_movi_i64(t, 0); 1778 for (; i < fullsz; i += 8) { 1779 tcg_gen_st_i64(t, cpu_env, ofs + i); 1780 } 1781 } 1782 1783 done: 1784 /* PTRUES */ 1785 if (setflag) { 1786 tcg_gen_movi_i32(cpu_NF, -(word != 0)); 1787 tcg_gen_movi_i32(cpu_CF, word == 0); 1788 tcg_gen_movi_i32(cpu_VF, 0); 1789 tcg_gen_mov_i32(cpu_ZF, cpu_NF); 1790 } 1791 return true; 1792 } 1793 1794 TRANS_FEAT(PTRUE, aa64_sve, do_predset, a->esz, a->rd, a->pat, a->s) 1795 1796 /* Note pat == 31 is #all, to set all elements. */ 1797 TRANS_FEAT_NONSTREAMING(SETFFR, aa64_sve, 1798 do_predset, 0, FFR_PRED_NUM, 31, false) 1799 1800 /* Note pat == 32 is #unimp, to set no elements. */ 1801 TRANS_FEAT(PFALSE, aa64_sve, do_predset, 0, a->rd, 32, false) 1802 1803 static bool trans_RDFFR_p(DisasContext *s, arg_RDFFR_p *a) 1804 { 1805 /* The path through do_pppp_flags is complicated enough to want to avoid 1806 * duplication. Frob the arguments into the form of a predicated AND. 1807 */ 1808 arg_rprr_s alt_a = { 1809 .rd = a->rd, .pg = a->pg, .s = a->s, 1810 .rn = FFR_PRED_NUM, .rm = FFR_PRED_NUM, 1811 }; 1812 1813 s->is_nonstreaming = true; 1814 return trans_AND_pppp(s, &alt_a); 1815 } 1816 1817 TRANS_FEAT_NONSTREAMING(RDFFR, aa64_sve, do_mov_p, a->rd, FFR_PRED_NUM) 1818 TRANS_FEAT_NONSTREAMING(WRFFR, aa64_sve, do_mov_p, FFR_PRED_NUM, a->rn) 1819 1820 static bool do_pfirst_pnext(DisasContext *s, arg_rr_esz *a, 1821 void (*gen_fn)(TCGv_i32, TCGv_ptr, 1822 TCGv_ptr, TCGv_i32)) 1823 { 1824 if (!sve_access_check(s)) { 1825 return true; 1826 } 1827 1828 TCGv_ptr t_pd = tcg_temp_new_ptr(); 1829 TCGv_ptr t_pg = tcg_temp_new_ptr(); 1830 TCGv_i32 t; 1831 unsigned desc = 0; 1832 1833 desc = FIELD_DP32(desc, PREDDESC, OPRSZ, pred_full_reg_size(s)); 1834 desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz); 1835 1836 tcg_gen_addi_ptr(t_pd, cpu_env, pred_full_reg_offset(s, a->rd)); 1837 tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, a->rn)); 1838 t = tcg_temp_new_i32(); 1839 1840 gen_fn(t, t_pd, t_pg, tcg_constant_i32(desc)); 1841 1842 do_pred_flags(t); 1843 return true; 1844 } 1845 1846 TRANS_FEAT(PFIRST, aa64_sve, do_pfirst_pnext, a, gen_helper_sve_pfirst) 1847 TRANS_FEAT(PNEXT, aa64_sve, do_pfirst_pnext, a, gen_helper_sve_pnext) 1848 1849 /* 1850 *** SVE Element Count Group 1851 */ 1852 1853 /* Perform an inline saturating addition of a 32-bit value within 1854 * a 64-bit register. The second operand is known to be positive, 1855 * which halves the comparisions we must perform to bound the result. 1856 */ 1857 static void do_sat_addsub_32(TCGv_i64 reg, TCGv_i64 val, bool u, bool d) 1858 { 1859 int64_t ibound; 1860 1861 /* Use normal 64-bit arithmetic to detect 32-bit overflow. */ 1862 if (u) { 1863 tcg_gen_ext32u_i64(reg, reg); 1864 } else { 1865 tcg_gen_ext32s_i64(reg, reg); 1866 } 1867 if (d) { 1868 tcg_gen_sub_i64(reg, reg, val); 1869 ibound = (u ? 0 : INT32_MIN); 1870 tcg_gen_smax_i64(reg, reg, tcg_constant_i64(ibound)); 1871 } else { 1872 tcg_gen_add_i64(reg, reg, val); 1873 ibound = (u ? UINT32_MAX : INT32_MAX); 1874 tcg_gen_smin_i64(reg, reg, tcg_constant_i64(ibound)); 1875 } 1876 } 1877 1878 /* Similarly with 64-bit values. */ 1879 static void do_sat_addsub_64(TCGv_i64 reg, TCGv_i64 val, bool u, bool d) 1880 { 1881 TCGv_i64 t0 = tcg_temp_new_i64(); 1882 TCGv_i64 t2; 1883 1884 if (u) { 1885 if (d) { 1886 tcg_gen_sub_i64(t0, reg, val); 1887 t2 = tcg_constant_i64(0); 1888 tcg_gen_movcond_i64(TCG_COND_LTU, reg, reg, val, t2, t0); 1889 } else { 1890 tcg_gen_add_i64(t0, reg, val); 1891 t2 = tcg_constant_i64(-1); 1892 tcg_gen_movcond_i64(TCG_COND_LTU, reg, t0, reg, t2, t0); 1893 } 1894 } else { 1895 TCGv_i64 t1 = tcg_temp_new_i64(); 1896 if (d) { 1897 /* Detect signed overflow for subtraction. */ 1898 tcg_gen_xor_i64(t0, reg, val); 1899 tcg_gen_sub_i64(t1, reg, val); 1900 tcg_gen_xor_i64(reg, reg, t1); 1901 tcg_gen_and_i64(t0, t0, reg); 1902 1903 /* Bound the result. */ 1904 tcg_gen_movi_i64(reg, INT64_MIN); 1905 t2 = tcg_constant_i64(0); 1906 tcg_gen_movcond_i64(TCG_COND_LT, reg, t0, t2, reg, t1); 1907 } else { 1908 /* Detect signed overflow for addition. */ 1909 tcg_gen_xor_i64(t0, reg, val); 1910 tcg_gen_add_i64(reg, reg, val); 1911 tcg_gen_xor_i64(t1, reg, val); 1912 tcg_gen_andc_i64(t0, t1, t0); 1913 1914 /* Bound the result. */ 1915 tcg_gen_movi_i64(t1, INT64_MAX); 1916 t2 = tcg_constant_i64(0); 1917 tcg_gen_movcond_i64(TCG_COND_LT, reg, t0, t2, t1, reg); 1918 } 1919 } 1920 } 1921 1922 /* Similarly with a vector and a scalar operand. */ 1923 static void do_sat_addsub_vec(DisasContext *s, int esz, int rd, int rn, 1924 TCGv_i64 val, bool u, bool d) 1925 { 1926 unsigned vsz = vec_full_reg_size(s); 1927 TCGv_ptr dptr, nptr; 1928 TCGv_i32 t32, desc; 1929 TCGv_i64 t64; 1930 1931 dptr = tcg_temp_new_ptr(); 1932 nptr = tcg_temp_new_ptr(); 1933 tcg_gen_addi_ptr(dptr, cpu_env, vec_full_reg_offset(s, rd)); 1934 tcg_gen_addi_ptr(nptr, cpu_env, vec_full_reg_offset(s, rn)); 1935 desc = tcg_constant_i32(simd_desc(vsz, vsz, 0)); 1936 1937 switch (esz) { 1938 case MO_8: 1939 t32 = tcg_temp_new_i32(); 1940 tcg_gen_extrl_i64_i32(t32, val); 1941 if (d) { 1942 tcg_gen_neg_i32(t32, t32); 1943 } 1944 if (u) { 1945 gen_helper_sve_uqaddi_b(dptr, nptr, t32, desc); 1946 } else { 1947 gen_helper_sve_sqaddi_b(dptr, nptr, t32, desc); 1948 } 1949 break; 1950 1951 case MO_16: 1952 t32 = tcg_temp_new_i32(); 1953 tcg_gen_extrl_i64_i32(t32, val); 1954 if (d) { 1955 tcg_gen_neg_i32(t32, t32); 1956 } 1957 if (u) { 1958 gen_helper_sve_uqaddi_h(dptr, nptr, t32, desc); 1959 } else { 1960 gen_helper_sve_sqaddi_h(dptr, nptr, t32, desc); 1961 } 1962 break; 1963 1964 case MO_32: 1965 t64 = tcg_temp_new_i64(); 1966 if (d) { 1967 tcg_gen_neg_i64(t64, val); 1968 } else { 1969 tcg_gen_mov_i64(t64, val); 1970 } 1971 if (u) { 1972 gen_helper_sve_uqaddi_s(dptr, nptr, t64, desc); 1973 } else { 1974 gen_helper_sve_sqaddi_s(dptr, nptr, t64, desc); 1975 } 1976 break; 1977 1978 case MO_64: 1979 if (u) { 1980 if (d) { 1981 gen_helper_sve_uqsubi_d(dptr, nptr, val, desc); 1982 } else { 1983 gen_helper_sve_uqaddi_d(dptr, nptr, val, desc); 1984 } 1985 } else if (d) { 1986 t64 = tcg_temp_new_i64(); 1987 tcg_gen_neg_i64(t64, val); 1988 gen_helper_sve_sqaddi_d(dptr, nptr, t64, desc); 1989 } else { 1990 gen_helper_sve_sqaddi_d(dptr, nptr, val, desc); 1991 } 1992 break; 1993 1994 default: 1995 g_assert_not_reached(); 1996 } 1997 } 1998 1999 static bool trans_CNT_r(DisasContext *s, arg_CNT_r *a) 2000 { 2001 if (!dc_isar_feature(aa64_sve, s)) { 2002 return false; 2003 } 2004 if (sve_access_check(s)) { 2005 unsigned fullsz = vec_full_reg_size(s); 2006 unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz); 2007 tcg_gen_movi_i64(cpu_reg(s, a->rd), numelem * a->imm); 2008 } 2009 return true; 2010 } 2011 2012 static bool trans_INCDEC_r(DisasContext *s, arg_incdec_cnt *a) 2013 { 2014 if (!dc_isar_feature(aa64_sve, s)) { 2015 return false; 2016 } 2017 if (sve_access_check(s)) { 2018 unsigned fullsz = vec_full_reg_size(s); 2019 unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz); 2020 int inc = numelem * a->imm * (a->d ? -1 : 1); 2021 TCGv_i64 reg = cpu_reg(s, a->rd); 2022 2023 tcg_gen_addi_i64(reg, reg, inc); 2024 } 2025 return true; 2026 } 2027 2028 static bool trans_SINCDEC_r_32(DisasContext *s, arg_incdec_cnt *a) 2029 { 2030 if (!dc_isar_feature(aa64_sve, s)) { 2031 return false; 2032 } 2033 if (!sve_access_check(s)) { 2034 return true; 2035 } 2036 2037 unsigned fullsz = vec_full_reg_size(s); 2038 unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz); 2039 int inc = numelem * a->imm; 2040 TCGv_i64 reg = cpu_reg(s, a->rd); 2041 2042 /* Use normal 64-bit arithmetic to detect 32-bit overflow. */ 2043 if (inc == 0) { 2044 if (a->u) { 2045 tcg_gen_ext32u_i64(reg, reg); 2046 } else { 2047 tcg_gen_ext32s_i64(reg, reg); 2048 } 2049 } else { 2050 do_sat_addsub_32(reg, tcg_constant_i64(inc), a->u, a->d); 2051 } 2052 return true; 2053 } 2054 2055 static bool trans_SINCDEC_r_64(DisasContext *s, arg_incdec_cnt *a) 2056 { 2057 if (!dc_isar_feature(aa64_sve, s)) { 2058 return false; 2059 } 2060 if (!sve_access_check(s)) { 2061 return true; 2062 } 2063 2064 unsigned fullsz = vec_full_reg_size(s); 2065 unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz); 2066 int inc = numelem * a->imm; 2067 TCGv_i64 reg = cpu_reg(s, a->rd); 2068 2069 if (inc != 0) { 2070 do_sat_addsub_64(reg, tcg_constant_i64(inc), a->u, a->d); 2071 } 2072 return true; 2073 } 2074 2075 static bool trans_INCDEC_v(DisasContext *s, arg_incdec2_cnt *a) 2076 { 2077 if (a->esz == 0 || !dc_isar_feature(aa64_sve, s)) { 2078 return false; 2079 } 2080 2081 unsigned fullsz = vec_full_reg_size(s); 2082 unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz); 2083 int inc = numelem * a->imm; 2084 2085 if (inc != 0) { 2086 if (sve_access_check(s)) { 2087 tcg_gen_gvec_adds(a->esz, vec_full_reg_offset(s, a->rd), 2088 vec_full_reg_offset(s, a->rn), 2089 tcg_constant_i64(a->d ? -inc : inc), 2090 fullsz, fullsz); 2091 } 2092 } else { 2093 do_mov_z(s, a->rd, a->rn); 2094 } 2095 return true; 2096 } 2097 2098 static bool trans_SINCDEC_v(DisasContext *s, arg_incdec2_cnt *a) 2099 { 2100 if (a->esz == 0 || !dc_isar_feature(aa64_sve, s)) { 2101 return false; 2102 } 2103 2104 unsigned fullsz = vec_full_reg_size(s); 2105 unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz); 2106 int inc = numelem * a->imm; 2107 2108 if (inc != 0) { 2109 if (sve_access_check(s)) { 2110 do_sat_addsub_vec(s, a->esz, a->rd, a->rn, 2111 tcg_constant_i64(inc), a->u, a->d); 2112 } 2113 } else { 2114 do_mov_z(s, a->rd, a->rn); 2115 } 2116 return true; 2117 } 2118 2119 /* 2120 *** SVE Bitwise Immediate Group 2121 */ 2122 2123 static bool do_zz_dbm(DisasContext *s, arg_rr_dbm *a, GVecGen2iFn *gvec_fn) 2124 { 2125 uint64_t imm; 2126 if (!logic_imm_decode_wmask(&imm, extract32(a->dbm, 12, 1), 2127 extract32(a->dbm, 0, 6), 2128 extract32(a->dbm, 6, 6))) { 2129 return false; 2130 } 2131 return gen_gvec_fn_zzi(s, gvec_fn, MO_64, a->rd, a->rn, imm); 2132 } 2133 2134 TRANS_FEAT(AND_zzi, aa64_sve, do_zz_dbm, a, tcg_gen_gvec_andi) 2135 TRANS_FEAT(ORR_zzi, aa64_sve, do_zz_dbm, a, tcg_gen_gvec_ori) 2136 TRANS_FEAT(EOR_zzi, aa64_sve, do_zz_dbm, a, tcg_gen_gvec_xori) 2137 2138 static bool trans_DUPM(DisasContext *s, arg_DUPM *a) 2139 { 2140 uint64_t imm; 2141 2142 if (!dc_isar_feature(aa64_sve, s)) { 2143 return false; 2144 } 2145 if (!logic_imm_decode_wmask(&imm, extract32(a->dbm, 12, 1), 2146 extract32(a->dbm, 0, 6), 2147 extract32(a->dbm, 6, 6))) { 2148 return false; 2149 } 2150 if (sve_access_check(s)) { 2151 do_dupi_z(s, a->rd, imm); 2152 } 2153 return true; 2154 } 2155 2156 /* 2157 *** SVE Integer Wide Immediate - Predicated Group 2158 */ 2159 2160 /* Implement all merging copies. This is used for CPY (immediate), 2161 * FCPY, CPY (scalar), CPY (SIMD&FP scalar). 2162 */ 2163 static void do_cpy_m(DisasContext *s, int esz, int rd, int rn, int pg, 2164 TCGv_i64 val) 2165 { 2166 typedef void gen_cpy(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv_i32); 2167 static gen_cpy * const fns[4] = { 2168 gen_helper_sve_cpy_m_b, gen_helper_sve_cpy_m_h, 2169 gen_helper_sve_cpy_m_s, gen_helper_sve_cpy_m_d, 2170 }; 2171 unsigned vsz = vec_full_reg_size(s); 2172 TCGv_i32 desc = tcg_constant_i32(simd_desc(vsz, vsz, 0)); 2173 TCGv_ptr t_zd = tcg_temp_new_ptr(); 2174 TCGv_ptr t_zn = tcg_temp_new_ptr(); 2175 TCGv_ptr t_pg = tcg_temp_new_ptr(); 2176 2177 tcg_gen_addi_ptr(t_zd, cpu_env, vec_full_reg_offset(s, rd)); 2178 tcg_gen_addi_ptr(t_zn, cpu_env, vec_full_reg_offset(s, rn)); 2179 tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg)); 2180 2181 fns[esz](t_zd, t_zn, t_pg, val, desc); 2182 } 2183 2184 static bool trans_FCPY(DisasContext *s, arg_FCPY *a) 2185 { 2186 if (a->esz == 0 || !dc_isar_feature(aa64_sve, s)) { 2187 return false; 2188 } 2189 if (sve_access_check(s)) { 2190 /* Decode the VFP immediate. */ 2191 uint64_t imm = vfp_expand_imm(a->esz, a->imm); 2192 do_cpy_m(s, a->esz, a->rd, a->rn, a->pg, tcg_constant_i64(imm)); 2193 } 2194 return true; 2195 } 2196 2197 static bool trans_CPY_m_i(DisasContext *s, arg_rpri_esz *a) 2198 { 2199 if (!dc_isar_feature(aa64_sve, s)) { 2200 return false; 2201 } 2202 if (sve_access_check(s)) { 2203 do_cpy_m(s, a->esz, a->rd, a->rn, a->pg, tcg_constant_i64(a->imm)); 2204 } 2205 return true; 2206 } 2207 2208 static bool trans_CPY_z_i(DisasContext *s, arg_CPY_z_i *a) 2209 { 2210 static gen_helper_gvec_2i * const fns[4] = { 2211 gen_helper_sve_cpy_z_b, gen_helper_sve_cpy_z_h, 2212 gen_helper_sve_cpy_z_s, gen_helper_sve_cpy_z_d, 2213 }; 2214 2215 if (!dc_isar_feature(aa64_sve, s)) { 2216 return false; 2217 } 2218 if (sve_access_check(s)) { 2219 unsigned vsz = vec_full_reg_size(s); 2220 tcg_gen_gvec_2i_ool(vec_full_reg_offset(s, a->rd), 2221 pred_full_reg_offset(s, a->pg), 2222 tcg_constant_i64(a->imm), 2223 vsz, vsz, 0, fns[a->esz]); 2224 } 2225 return true; 2226 } 2227 2228 /* 2229 *** SVE Permute Extract Group 2230 */ 2231 2232 static bool do_EXT(DisasContext *s, int rd, int rn, int rm, int imm) 2233 { 2234 if (!sve_access_check(s)) { 2235 return true; 2236 } 2237 2238 unsigned vsz = vec_full_reg_size(s); 2239 unsigned n_ofs = imm >= vsz ? 0 : imm; 2240 unsigned n_siz = vsz - n_ofs; 2241 unsigned d = vec_full_reg_offset(s, rd); 2242 unsigned n = vec_full_reg_offset(s, rn); 2243 unsigned m = vec_full_reg_offset(s, rm); 2244 2245 /* Use host vector move insns if we have appropriate sizes 2246 * and no unfortunate overlap. 2247 */ 2248 if (m != d 2249 && n_ofs == size_for_gvec(n_ofs) 2250 && n_siz == size_for_gvec(n_siz) 2251 && (d != n || n_siz <= n_ofs)) { 2252 tcg_gen_gvec_mov(0, d, n + n_ofs, n_siz, n_siz); 2253 if (n_ofs != 0) { 2254 tcg_gen_gvec_mov(0, d + n_siz, m, n_ofs, n_ofs); 2255 } 2256 } else { 2257 tcg_gen_gvec_3_ool(d, n, m, vsz, vsz, n_ofs, gen_helper_sve_ext); 2258 } 2259 return true; 2260 } 2261 2262 TRANS_FEAT(EXT, aa64_sve, do_EXT, a->rd, a->rn, a->rm, a->imm) 2263 TRANS_FEAT(EXT_sve2, aa64_sve2, do_EXT, a->rd, a->rn, (a->rn + 1) % 32, a->imm) 2264 2265 /* 2266 *** SVE Permute - Unpredicated Group 2267 */ 2268 2269 static bool trans_DUP_s(DisasContext *s, arg_DUP_s *a) 2270 { 2271 if (!dc_isar_feature(aa64_sve, s)) { 2272 return false; 2273 } 2274 if (sve_access_check(s)) { 2275 unsigned vsz = vec_full_reg_size(s); 2276 tcg_gen_gvec_dup_i64(a->esz, vec_full_reg_offset(s, a->rd), 2277 vsz, vsz, cpu_reg_sp(s, a->rn)); 2278 } 2279 return true; 2280 } 2281 2282 static bool trans_DUP_x(DisasContext *s, arg_DUP_x *a) 2283 { 2284 if (!dc_isar_feature(aa64_sve, s)) { 2285 return false; 2286 } 2287 if ((a->imm & 0x1f) == 0) { 2288 return false; 2289 } 2290 if (sve_access_check(s)) { 2291 unsigned vsz = vec_full_reg_size(s); 2292 unsigned dofs = vec_full_reg_offset(s, a->rd); 2293 unsigned esz, index; 2294 2295 esz = ctz32(a->imm); 2296 index = a->imm >> (esz + 1); 2297 2298 if ((index << esz) < vsz) { 2299 unsigned nofs = vec_reg_offset(s, a->rn, index, esz); 2300 tcg_gen_gvec_dup_mem(esz, dofs, nofs, vsz, vsz); 2301 } else { 2302 /* 2303 * While dup_mem handles 128-bit elements, dup_imm does not. 2304 * Thankfully element size doesn't matter for splatting zero. 2305 */ 2306 tcg_gen_gvec_dup_imm(MO_64, dofs, vsz, vsz, 0); 2307 } 2308 } 2309 return true; 2310 } 2311 2312 static void do_insr_i64(DisasContext *s, arg_rrr_esz *a, TCGv_i64 val) 2313 { 2314 typedef void gen_insr(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv_i32); 2315 static gen_insr * const fns[4] = { 2316 gen_helper_sve_insr_b, gen_helper_sve_insr_h, 2317 gen_helper_sve_insr_s, gen_helper_sve_insr_d, 2318 }; 2319 unsigned vsz = vec_full_reg_size(s); 2320 TCGv_i32 desc = tcg_constant_i32(simd_desc(vsz, vsz, 0)); 2321 TCGv_ptr t_zd = tcg_temp_new_ptr(); 2322 TCGv_ptr t_zn = tcg_temp_new_ptr(); 2323 2324 tcg_gen_addi_ptr(t_zd, cpu_env, vec_full_reg_offset(s, a->rd)); 2325 tcg_gen_addi_ptr(t_zn, cpu_env, vec_full_reg_offset(s, a->rn)); 2326 2327 fns[a->esz](t_zd, t_zn, val, desc); 2328 } 2329 2330 static bool trans_INSR_f(DisasContext *s, arg_rrr_esz *a) 2331 { 2332 if (!dc_isar_feature(aa64_sve, s)) { 2333 return false; 2334 } 2335 if (sve_access_check(s)) { 2336 TCGv_i64 t = tcg_temp_new_i64(); 2337 tcg_gen_ld_i64(t, cpu_env, vec_reg_offset(s, a->rm, 0, MO_64)); 2338 do_insr_i64(s, a, t); 2339 } 2340 return true; 2341 } 2342 2343 static bool trans_INSR_r(DisasContext *s, arg_rrr_esz *a) 2344 { 2345 if (!dc_isar_feature(aa64_sve, s)) { 2346 return false; 2347 } 2348 if (sve_access_check(s)) { 2349 do_insr_i64(s, a, cpu_reg(s, a->rm)); 2350 } 2351 return true; 2352 } 2353 2354 static gen_helper_gvec_2 * const rev_fns[4] = { 2355 gen_helper_sve_rev_b, gen_helper_sve_rev_h, 2356 gen_helper_sve_rev_s, gen_helper_sve_rev_d 2357 }; 2358 TRANS_FEAT(REV_v, aa64_sve, gen_gvec_ool_zz, rev_fns[a->esz], a->rd, a->rn, 0) 2359 2360 static gen_helper_gvec_3 * const sve_tbl_fns[4] = { 2361 gen_helper_sve_tbl_b, gen_helper_sve_tbl_h, 2362 gen_helper_sve_tbl_s, gen_helper_sve_tbl_d 2363 }; 2364 TRANS_FEAT(TBL, aa64_sve, gen_gvec_ool_arg_zzz, sve_tbl_fns[a->esz], a, 0) 2365 2366 static gen_helper_gvec_4 * const sve2_tbl_fns[4] = { 2367 gen_helper_sve2_tbl_b, gen_helper_sve2_tbl_h, 2368 gen_helper_sve2_tbl_s, gen_helper_sve2_tbl_d 2369 }; 2370 TRANS_FEAT(TBL_sve2, aa64_sve2, gen_gvec_ool_zzzz, sve2_tbl_fns[a->esz], 2371 a->rd, a->rn, (a->rn + 1) % 32, a->rm, 0) 2372 2373 static gen_helper_gvec_3 * const tbx_fns[4] = { 2374 gen_helper_sve2_tbx_b, gen_helper_sve2_tbx_h, 2375 gen_helper_sve2_tbx_s, gen_helper_sve2_tbx_d 2376 }; 2377 TRANS_FEAT(TBX, aa64_sve2, gen_gvec_ool_arg_zzz, tbx_fns[a->esz], a, 0) 2378 2379 static bool trans_UNPK(DisasContext *s, arg_UNPK *a) 2380 { 2381 static gen_helper_gvec_2 * const fns[4][2] = { 2382 { NULL, NULL }, 2383 { gen_helper_sve_sunpk_h, gen_helper_sve_uunpk_h }, 2384 { gen_helper_sve_sunpk_s, gen_helper_sve_uunpk_s }, 2385 { gen_helper_sve_sunpk_d, gen_helper_sve_uunpk_d }, 2386 }; 2387 2388 if (a->esz == 0 || !dc_isar_feature(aa64_sve, s)) { 2389 return false; 2390 } 2391 if (sve_access_check(s)) { 2392 unsigned vsz = vec_full_reg_size(s); 2393 tcg_gen_gvec_2_ool(vec_full_reg_offset(s, a->rd), 2394 vec_full_reg_offset(s, a->rn) 2395 + (a->h ? vsz / 2 : 0), 2396 vsz, vsz, 0, fns[a->esz][a->u]); 2397 } 2398 return true; 2399 } 2400 2401 /* 2402 *** SVE Permute - Predicates Group 2403 */ 2404 2405 static bool do_perm_pred3(DisasContext *s, arg_rrr_esz *a, bool high_odd, 2406 gen_helper_gvec_3 *fn) 2407 { 2408 if (!sve_access_check(s)) { 2409 return true; 2410 } 2411 2412 unsigned vsz = pred_full_reg_size(s); 2413 2414 TCGv_ptr t_d = tcg_temp_new_ptr(); 2415 TCGv_ptr t_n = tcg_temp_new_ptr(); 2416 TCGv_ptr t_m = tcg_temp_new_ptr(); 2417 uint32_t desc = 0; 2418 2419 desc = FIELD_DP32(desc, PREDDESC, OPRSZ, vsz); 2420 desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz); 2421 desc = FIELD_DP32(desc, PREDDESC, DATA, high_odd); 2422 2423 tcg_gen_addi_ptr(t_d, cpu_env, pred_full_reg_offset(s, a->rd)); 2424 tcg_gen_addi_ptr(t_n, cpu_env, pred_full_reg_offset(s, a->rn)); 2425 tcg_gen_addi_ptr(t_m, cpu_env, pred_full_reg_offset(s, a->rm)); 2426 2427 fn(t_d, t_n, t_m, tcg_constant_i32(desc)); 2428 return true; 2429 } 2430 2431 static bool do_perm_pred2(DisasContext *s, arg_rr_esz *a, bool high_odd, 2432 gen_helper_gvec_2 *fn) 2433 { 2434 if (!sve_access_check(s)) { 2435 return true; 2436 } 2437 2438 unsigned vsz = pred_full_reg_size(s); 2439 TCGv_ptr t_d = tcg_temp_new_ptr(); 2440 TCGv_ptr t_n = tcg_temp_new_ptr(); 2441 uint32_t desc = 0; 2442 2443 tcg_gen_addi_ptr(t_d, cpu_env, pred_full_reg_offset(s, a->rd)); 2444 tcg_gen_addi_ptr(t_n, cpu_env, pred_full_reg_offset(s, a->rn)); 2445 2446 desc = FIELD_DP32(desc, PREDDESC, OPRSZ, vsz); 2447 desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz); 2448 desc = FIELD_DP32(desc, PREDDESC, DATA, high_odd); 2449 2450 fn(t_d, t_n, tcg_constant_i32(desc)); 2451 return true; 2452 } 2453 2454 TRANS_FEAT(ZIP1_p, aa64_sve, do_perm_pred3, a, 0, gen_helper_sve_zip_p) 2455 TRANS_FEAT(ZIP2_p, aa64_sve, do_perm_pred3, a, 1, gen_helper_sve_zip_p) 2456 TRANS_FEAT(UZP1_p, aa64_sve, do_perm_pred3, a, 0, gen_helper_sve_uzp_p) 2457 TRANS_FEAT(UZP2_p, aa64_sve, do_perm_pred3, a, 1, gen_helper_sve_uzp_p) 2458 TRANS_FEAT(TRN1_p, aa64_sve, do_perm_pred3, a, 0, gen_helper_sve_trn_p) 2459 TRANS_FEAT(TRN2_p, aa64_sve, do_perm_pred3, a, 1, gen_helper_sve_trn_p) 2460 2461 TRANS_FEAT(REV_p, aa64_sve, do_perm_pred2, a, 0, gen_helper_sve_rev_p) 2462 TRANS_FEAT(PUNPKLO, aa64_sve, do_perm_pred2, a, 0, gen_helper_sve_punpk_p) 2463 TRANS_FEAT(PUNPKHI, aa64_sve, do_perm_pred2, a, 1, gen_helper_sve_punpk_p) 2464 2465 /* 2466 *** SVE Permute - Interleaving Group 2467 */ 2468 2469 static gen_helper_gvec_3 * const zip_fns[4] = { 2470 gen_helper_sve_zip_b, gen_helper_sve_zip_h, 2471 gen_helper_sve_zip_s, gen_helper_sve_zip_d, 2472 }; 2473 TRANS_FEAT(ZIP1_z, aa64_sve, gen_gvec_ool_arg_zzz, 2474 zip_fns[a->esz], a, 0) 2475 TRANS_FEAT(ZIP2_z, aa64_sve, gen_gvec_ool_arg_zzz, 2476 zip_fns[a->esz], a, vec_full_reg_size(s) / 2) 2477 2478 TRANS_FEAT(ZIP1_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz, 2479 gen_helper_sve2_zip_q, a, 0) 2480 TRANS_FEAT(ZIP2_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz, 2481 gen_helper_sve2_zip_q, a, 2482 QEMU_ALIGN_DOWN(vec_full_reg_size(s), 32) / 2) 2483 2484 static gen_helper_gvec_3 * const uzp_fns[4] = { 2485 gen_helper_sve_uzp_b, gen_helper_sve_uzp_h, 2486 gen_helper_sve_uzp_s, gen_helper_sve_uzp_d, 2487 }; 2488 2489 TRANS_FEAT(UZP1_z, aa64_sve, gen_gvec_ool_arg_zzz, 2490 uzp_fns[a->esz], a, 0) 2491 TRANS_FEAT(UZP2_z, aa64_sve, gen_gvec_ool_arg_zzz, 2492 uzp_fns[a->esz], a, 1 << a->esz) 2493 2494 TRANS_FEAT(UZP1_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz, 2495 gen_helper_sve2_uzp_q, a, 0) 2496 TRANS_FEAT(UZP2_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz, 2497 gen_helper_sve2_uzp_q, a, 16) 2498 2499 static gen_helper_gvec_3 * const trn_fns[4] = { 2500 gen_helper_sve_trn_b, gen_helper_sve_trn_h, 2501 gen_helper_sve_trn_s, gen_helper_sve_trn_d, 2502 }; 2503 2504 TRANS_FEAT(TRN1_z, aa64_sve, gen_gvec_ool_arg_zzz, 2505 trn_fns[a->esz], a, 0) 2506 TRANS_FEAT(TRN2_z, aa64_sve, gen_gvec_ool_arg_zzz, 2507 trn_fns[a->esz], a, 1 << a->esz) 2508 2509 TRANS_FEAT(TRN1_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz, 2510 gen_helper_sve2_trn_q, a, 0) 2511 TRANS_FEAT(TRN2_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz, 2512 gen_helper_sve2_trn_q, a, 16) 2513 2514 /* 2515 *** SVE Permute Vector - Predicated Group 2516 */ 2517 2518 static gen_helper_gvec_3 * const compact_fns[4] = { 2519 NULL, NULL, gen_helper_sve_compact_s, gen_helper_sve_compact_d 2520 }; 2521 TRANS_FEAT_NONSTREAMING(COMPACT, aa64_sve, gen_gvec_ool_arg_zpz, 2522 compact_fns[a->esz], a, 0) 2523 2524 /* Call the helper that computes the ARM LastActiveElement pseudocode 2525 * function, scaled by the element size. This includes the not found 2526 * indication; e.g. not found for esz=3 is -8. 2527 */ 2528 static void find_last_active(DisasContext *s, TCGv_i32 ret, int esz, int pg) 2529 { 2530 /* Predicate sizes may be smaller and cannot use simd_desc. We cannot 2531 * round up, as we do elsewhere, because we need the exact size. 2532 */ 2533 TCGv_ptr t_p = tcg_temp_new_ptr(); 2534 unsigned desc = 0; 2535 2536 desc = FIELD_DP32(desc, PREDDESC, OPRSZ, pred_full_reg_size(s)); 2537 desc = FIELD_DP32(desc, PREDDESC, ESZ, esz); 2538 2539 tcg_gen_addi_ptr(t_p, cpu_env, pred_full_reg_offset(s, pg)); 2540 2541 gen_helper_sve_last_active_element(ret, t_p, tcg_constant_i32(desc)); 2542 } 2543 2544 /* Increment LAST to the offset of the next element in the vector, 2545 * wrapping around to 0. 2546 */ 2547 static void incr_last_active(DisasContext *s, TCGv_i32 last, int esz) 2548 { 2549 unsigned vsz = vec_full_reg_size(s); 2550 2551 tcg_gen_addi_i32(last, last, 1 << esz); 2552 if (is_power_of_2(vsz)) { 2553 tcg_gen_andi_i32(last, last, vsz - 1); 2554 } else { 2555 TCGv_i32 max = tcg_constant_i32(vsz); 2556 TCGv_i32 zero = tcg_constant_i32(0); 2557 tcg_gen_movcond_i32(TCG_COND_GEU, last, last, max, zero, last); 2558 } 2559 } 2560 2561 /* If LAST < 0, set LAST to the offset of the last element in the vector. */ 2562 static void wrap_last_active(DisasContext *s, TCGv_i32 last, int esz) 2563 { 2564 unsigned vsz = vec_full_reg_size(s); 2565 2566 if (is_power_of_2(vsz)) { 2567 tcg_gen_andi_i32(last, last, vsz - 1); 2568 } else { 2569 TCGv_i32 max = tcg_constant_i32(vsz - (1 << esz)); 2570 TCGv_i32 zero = tcg_constant_i32(0); 2571 tcg_gen_movcond_i32(TCG_COND_LT, last, last, zero, max, last); 2572 } 2573 } 2574 2575 /* Load an unsigned element of ESZ from BASE+OFS. */ 2576 static TCGv_i64 load_esz(TCGv_ptr base, int ofs, int esz) 2577 { 2578 TCGv_i64 r = tcg_temp_new_i64(); 2579 2580 switch (esz) { 2581 case 0: 2582 tcg_gen_ld8u_i64(r, base, ofs); 2583 break; 2584 case 1: 2585 tcg_gen_ld16u_i64(r, base, ofs); 2586 break; 2587 case 2: 2588 tcg_gen_ld32u_i64(r, base, ofs); 2589 break; 2590 case 3: 2591 tcg_gen_ld_i64(r, base, ofs); 2592 break; 2593 default: 2594 g_assert_not_reached(); 2595 } 2596 return r; 2597 } 2598 2599 /* Load an unsigned element of ESZ from RM[LAST]. */ 2600 static TCGv_i64 load_last_active(DisasContext *s, TCGv_i32 last, 2601 int rm, int esz) 2602 { 2603 TCGv_ptr p = tcg_temp_new_ptr(); 2604 2605 /* Convert offset into vector into offset into ENV. 2606 * The final adjustment for the vector register base 2607 * is added via constant offset to the load. 2608 */ 2609 #if HOST_BIG_ENDIAN 2610 /* Adjust for element ordering. See vec_reg_offset. */ 2611 if (esz < 3) { 2612 tcg_gen_xori_i32(last, last, 8 - (1 << esz)); 2613 } 2614 #endif 2615 tcg_gen_ext_i32_ptr(p, last); 2616 tcg_gen_add_ptr(p, p, cpu_env); 2617 2618 return load_esz(p, vec_full_reg_offset(s, rm), esz); 2619 } 2620 2621 /* Compute CLAST for a Zreg. */ 2622 static bool do_clast_vector(DisasContext *s, arg_rprr_esz *a, bool before) 2623 { 2624 TCGv_i32 last; 2625 TCGLabel *over; 2626 TCGv_i64 ele; 2627 unsigned vsz, esz = a->esz; 2628 2629 if (!sve_access_check(s)) { 2630 return true; 2631 } 2632 2633 last = tcg_temp_new_i32(); 2634 over = gen_new_label(); 2635 2636 find_last_active(s, last, esz, a->pg); 2637 2638 /* There is of course no movcond for a 2048-bit vector, 2639 * so we must branch over the actual store. 2640 */ 2641 tcg_gen_brcondi_i32(TCG_COND_LT, last, 0, over); 2642 2643 if (!before) { 2644 incr_last_active(s, last, esz); 2645 } 2646 2647 ele = load_last_active(s, last, a->rm, esz); 2648 2649 vsz = vec_full_reg_size(s); 2650 tcg_gen_gvec_dup_i64(esz, vec_full_reg_offset(s, a->rd), vsz, vsz, ele); 2651 2652 /* If this insn used MOVPRFX, we may need a second move. */ 2653 if (a->rd != a->rn) { 2654 TCGLabel *done = gen_new_label(); 2655 tcg_gen_br(done); 2656 2657 gen_set_label(over); 2658 do_mov_z(s, a->rd, a->rn); 2659 2660 gen_set_label(done); 2661 } else { 2662 gen_set_label(over); 2663 } 2664 return true; 2665 } 2666 2667 TRANS_FEAT(CLASTA_z, aa64_sve, do_clast_vector, a, false) 2668 TRANS_FEAT(CLASTB_z, aa64_sve, do_clast_vector, a, true) 2669 2670 /* Compute CLAST for a scalar. */ 2671 static void do_clast_scalar(DisasContext *s, int esz, int pg, int rm, 2672 bool before, TCGv_i64 reg_val) 2673 { 2674 TCGv_i32 last = tcg_temp_new_i32(); 2675 TCGv_i64 ele, cmp; 2676 2677 find_last_active(s, last, esz, pg); 2678 2679 /* Extend the original value of last prior to incrementing. */ 2680 cmp = tcg_temp_new_i64(); 2681 tcg_gen_ext_i32_i64(cmp, last); 2682 2683 if (!before) { 2684 incr_last_active(s, last, esz); 2685 } 2686 2687 /* The conceit here is that while last < 0 indicates not found, after 2688 * adjusting for cpu_env->vfp.zregs[rm], it is still a valid address 2689 * from which we can load garbage. We then discard the garbage with 2690 * a conditional move. 2691 */ 2692 ele = load_last_active(s, last, rm, esz); 2693 2694 tcg_gen_movcond_i64(TCG_COND_GE, reg_val, cmp, tcg_constant_i64(0), 2695 ele, reg_val); 2696 } 2697 2698 /* Compute CLAST for a Vreg. */ 2699 static bool do_clast_fp(DisasContext *s, arg_rpr_esz *a, bool before) 2700 { 2701 if (sve_access_check(s)) { 2702 int esz = a->esz; 2703 int ofs = vec_reg_offset(s, a->rd, 0, esz); 2704 TCGv_i64 reg = load_esz(cpu_env, ofs, esz); 2705 2706 do_clast_scalar(s, esz, a->pg, a->rn, before, reg); 2707 write_fp_dreg(s, a->rd, reg); 2708 } 2709 return true; 2710 } 2711 2712 TRANS_FEAT(CLASTA_v, aa64_sve, do_clast_fp, a, false) 2713 TRANS_FEAT(CLASTB_v, aa64_sve, do_clast_fp, a, true) 2714 2715 /* Compute CLAST for a Xreg. */ 2716 static bool do_clast_general(DisasContext *s, arg_rpr_esz *a, bool before) 2717 { 2718 TCGv_i64 reg; 2719 2720 if (!sve_access_check(s)) { 2721 return true; 2722 } 2723 2724 reg = cpu_reg(s, a->rd); 2725 switch (a->esz) { 2726 case 0: 2727 tcg_gen_ext8u_i64(reg, reg); 2728 break; 2729 case 1: 2730 tcg_gen_ext16u_i64(reg, reg); 2731 break; 2732 case 2: 2733 tcg_gen_ext32u_i64(reg, reg); 2734 break; 2735 case 3: 2736 break; 2737 default: 2738 g_assert_not_reached(); 2739 } 2740 2741 do_clast_scalar(s, a->esz, a->pg, a->rn, before, reg); 2742 return true; 2743 } 2744 2745 TRANS_FEAT(CLASTA_r, aa64_sve, do_clast_general, a, false) 2746 TRANS_FEAT(CLASTB_r, aa64_sve, do_clast_general, a, true) 2747 2748 /* Compute LAST for a scalar. */ 2749 static TCGv_i64 do_last_scalar(DisasContext *s, int esz, 2750 int pg, int rm, bool before) 2751 { 2752 TCGv_i32 last = tcg_temp_new_i32(); 2753 2754 find_last_active(s, last, esz, pg); 2755 if (before) { 2756 wrap_last_active(s, last, esz); 2757 } else { 2758 incr_last_active(s, last, esz); 2759 } 2760 2761 return load_last_active(s, last, rm, esz); 2762 } 2763 2764 /* Compute LAST for a Vreg. */ 2765 static bool do_last_fp(DisasContext *s, arg_rpr_esz *a, bool before) 2766 { 2767 if (sve_access_check(s)) { 2768 TCGv_i64 val = do_last_scalar(s, a->esz, a->pg, a->rn, before); 2769 write_fp_dreg(s, a->rd, val); 2770 } 2771 return true; 2772 } 2773 2774 TRANS_FEAT(LASTA_v, aa64_sve, do_last_fp, a, false) 2775 TRANS_FEAT(LASTB_v, aa64_sve, do_last_fp, a, true) 2776 2777 /* Compute LAST for a Xreg. */ 2778 static bool do_last_general(DisasContext *s, arg_rpr_esz *a, bool before) 2779 { 2780 if (sve_access_check(s)) { 2781 TCGv_i64 val = do_last_scalar(s, a->esz, a->pg, a->rn, before); 2782 tcg_gen_mov_i64(cpu_reg(s, a->rd), val); 2783 } 2784 return true; 2785 } 2786 2787 TRANS_FEAT(LASTA_r, aa64_sve, do_last_general, a, false) 2788 TRANS_FEAT(LASTB_r, aa64_sve, do_last_general, a, true) 2789 2790 static bool trans_CPY_m_r(DisasContext *s, arg_rpr_esz *a) 2791 { 2792 if (!dc_isar_feature(aa64_sve, s)) { 2793 return false; 2794 } 2795 if (sve_access_check(s)) { 2796 do_cpy_m(s, a->esz, a->rd, a->rd, a->pg, cpu_reg_sp(s, a->rn)); 2797 } 2798 return true; 2799 } 2800 2801 static bool trans_CPY_m_v(DisasContext *s, arg_rpr_esz *a) 2802 { 2803 if (!dc_isar_feature(aa64_sve, s)) { 2804 return false; 2805 } 2806 if (sve_access_check(s)) { 2807 int ofs = vec_reg_offset(s, a->rn, 0, a->esz); 2808 TCGv_i64 t = load_esz(cpu_env, ofs, a->esz); 2809 do_cpy_m(s, a->esz, a->rd, a->rd, a->pg, t); 2810 } 2811 return true; 2812 } 2813 2814 static gen_helper_gvec_3 * const revb_fns[4] = { 2815 NULL, gen_helper_sve_revb_h, 2816 gen_helper_sve_revb_s, gen_helper_sve_revb_d, 2817 }; 2818 TRANS_FEAT(REVB, aa64_sve, gen_gvec_ool_arg_zpz, revb_fns[a->esz], a, 0) 2819 2820 static gen_helper_gvec_3 * const revh_fns[4] = { 2821 NULL, NULL, gen_helper_sve_revh_s, gen_helper_sve_revh_d, 2822 }; 2823 TRANS_FEAT(REVH, aa64_sve, gen_gvec_ool_arg_zpz, revh_fns[a->esz], a, 0) 2824 2825 TRANS_FEAT(REVW, aa64_sve, gen_gvec_ool_arg_zpz, 2826 a->esz == 3 ? gen_helper_sve_revw_d : NULL, a, 0) 2827 2828 TRANS_FEAT(REVD, aa64_sme, gen_gvec_ool_arg_zpz, gen_helper_sme_revd_q, a, 0) 2829 2830 TRANS_FEAT(SPLICE, aa64_sve, gen_gvec_ool_arg_zpzz, 2831 gen_helper_sve_splice, a, a->esz) 2832 2833 TRANS_FEAT(SPLICE_sve2, aa64_sve2, gen_gvec_ool_zzzp, gen_helper_sve_splice, 2834 a->rd, a->rn, (a->rn + 1) % 32, a->pg, a->esz) 2835 2836 /* 2837 *** SVE Integer Compare - Vectors Group 2838 */ 2839 2840 static bool do_ppzz_flags(DisasContext *s, arg_rprr_esz *a, 2841 gen_helper_gvec_flags_4 *gen_fn) 2842 { 2843 TCGv_ptr pd, zn, zm, pg; 2844 unsigned vsz; 2845 TCGv_i32 t; 2846 2847 if (gen_fn == NULL) { 2848 return false; 2849 } 2850 if (!sve_access_check(s)) { 2851 return true; 2852 } 2853 2854 vsz = vec_full_reg_size(s); 2855 t = tcg_temp_new_i32(); 2856 pd = tcg_temp_new_ptr(); 2857 zn = tcg_temp_new_ptr(); 2858 zm = tcg_temp_new_ptr(); 2859 pg = tcg_temp_new_ptr(); 2860 2861 tcg_gen_addi_ptr(pd, cpu_env, pred_full_reg_offset(s, a->rd)); 2862 tcg_gen_addi_ptr(zn, cpu_env, vec_full_reg_offset(s, a->rn)); 2863 tcg_gen_addi_ptr(zm, cpu_env, vec_full_reg_offset(s, a->rm)); 2864 tcg_gen_addi_ptr(pg, cpu_env, pred_full_reg_offset(s, a->pg)); 2865 2866 gen_fn(t, pd, zn, zm, pg, tcg_constant_i32(simd_desc(vsz, vsz, 0))); 2867 2868 do_pred_flags(t); 2869 return true; 2870 } 2871 2872 #define DO_PPZZ(NAME, name) \ 2873 static gen_helper_gvec_flags_4 * const name##_ppzz_fns[4] = { \ 2874 gen_helper_sve_##name##_ppzz_b, gen_helper_sve_##name##_ppzz_h, \ 2875 gen_helper_sve_##name##_ppzz_s, gen_helper_sve_##name##_ppzz_d, \ 2876 }; \ 2877 TRANS_FEAT(NAME##_ppzz, aa64_sve, do_ppzz_flags, \ 2878 a, name##_ppzz_fns[a->esz]) 2879 2880 DO_PPZZ(CMPEQ, cmpeq) 2881 DO_PPZZ(CMPNE, cmpne) 2882 DO_PPZZ(CMPGT, cmpgt) 2883 DO_PPZZ(CMPGE, cmpge) 2884 DO_PPZZ(CMPHI, cmphi) 2885 DO_PPZZ(CMPHS, cmphs) 2886 2887 #undef DO_PPZZ 2888 2889 #define DO_PPZW(NAME, name) \ 2890 static gen_helper_gvec_flags_4 * const name##_ppzw_fns[4] = { \ 2891 gen_helper_sve_##name##_ppzw_b, gen_helper_sve_##name##_ppzw_h, \ 2892 gen_helper_sve_##name##_ppzw_s, NULL \ 2893 }; \ 2894 TRANS_FEAT(NAME##_ppzw, aa64_sve, do_ppzz_flags, \ 2895 a, name##_ppzw_fns[a->esz]) 2896 2897 DO_PPZW(CMPEQ, cmpeq) 2898 DO_PPZW(CMPNE, cmpne) 2899 DO_PPZW(CMPGT, cmpgt) 2900 DO_PPZW(CMPGE, cmpge) 2901 DO_PPZW(CMPHI, cmphi) 2902 DO_PPZW(CMPHS, cmphs) 2903 DO_PPZW(CMPLT, cmplt) 2904 DO_PPZW(CMPLE, cmple) 2905 DO_PPZW(CMPLO, cmplo) 2906 DO_PPZW(CMPLS, cmpls) 2907 2908 #undef DO_PPZW 2909 2910 /* 2911 *** SVE Integer Compare - Immediate Groups 2912 */ 2913 2914 static bool do_ppzi_flags(DisasContext *s, arg_rpri_esz *a, 2915 gen_helper_gvec_flags_3 *gen_fn) 2916 { 2917 TCGv_ptr pd, zn, pg; 2918 unsigned vsz; 2919 TCGv_i32 t; 2920 2921 if (gen_fn == NULL) { 2922 return false; 2923 } 2924 if (!sve_access_check(s)) { 2925 return true; 2926 } 2927 2928 vsz = vec_full_reg_size(s); 2929 t = tcg_temp_new_i32(); 2930 pd = tcg_temp_new_ptr(); 2931 zn = tcg_temp_new_ptr(); 2932 pg = tcg_temp_new_ptr(); 2933 2934 tcg_gen_addi_ptr(pd, cpu_env, pred_full_reg_offset(s, a->rd)); 2935 tcg_gen_addi_ptr(zn, cpu_env, vec_full_reg_offset(s, a->rn)); 2936 tcg_gen_addi_ptr(pg, cpu_env, pred_full_reg_offset(s, a->pg)); 2937 2938 gen_fn(t, pd, zn, pg, tcg_constant_i32(simd_desc(vsz, vsz, a->imm))); 2939 2940 do_pred_flags(t); 2941 return true; 2942 } 2943 2944 #define DO_PPZI(NAME, name) \ 2945 static gen_helper_gvec_flags_3 * const name##_ppzi_fns[4] = { \ 2946 gen_helper_sve_##name##_ppzi_b, gen_helper_sve_##name##_ppzi_h, \ 2947 gen_helper_sve_##name##_ppzi_s, gen_helper_sve_##name##_ppzi_d, \ 2948 }; \ 2949 TRANS_FEAT(NAME##_ppzi, aa64_sve, do_ppzi_flags, a, \ 2950 name##_ppzi_fns[a->esz]) 2951 2952 DO_PPZI(CMPEQ, cmpeq) 2953 DO_PPZI(CMPNE, cmpne) 2954 DO_PPZI(CMPGT, cmpgt) 2955 DO_PPZI(CMPGE, cmpge) 2956 DO_PPZI(CMPHI, cmphi) 2957 DO_PPZI(CMPHS, cmphs) 2958 DO_PPZI(CMPLT, cmplt) 2959 DO_PPZI(CMPLE, cmple) 2960 DO_PPZI(CMPLO, cmplo) 2961 DO_PPZI(CMPLS, cmpls) 2962 2963 #undef DO_PPZI 2964 2965 /* 2966 *** SVE Partition Break Group 2967 */ 2968 2969 static bool do_brk3(DisasContext *s, arg_rprr_s *a, 2970 gen_helper_gvec_4 *fn, gen_helper_gvec_flags_4 *fn_s) 2971 { 2972 if (!sve_access_check(s)) { 2973 return true; 2974 } 2975 2976 unsigned vsz = pred_full_reg_size(s); 2977 2978 /* Predicate sizes may be smaller and cannot use simd_desc. */ 2979 TCGv_ptr d = tcg_temp_new_ptr(); 2980 TCGv_ptr n = tcg_temp_new_ptr(); 2981 TCGv_ptr m = tcg_temp_new_ptr(); 2982 TCGv_ptr g = tcg_temp_new_ptr(); 2983 TCGv_i32 desc = tcg_constant_i32(FIELD_DP32(0, PREDDESC, OPRSZ, vsz)); 2984 2985 tcg_gen_addi_ptr(d, cpu_env, pred_full_reg_offset(s, a->rd)); 2986 tcg_gen_addi_ptr(n, cpu_env, pred_full_reg_offset(s, a->rn)); 2987 tcg_gen_addi_ptr(m, cpu_env, pred_full_reg_offset(s, a->rm)); 2988 tcg_gen_addi_ptr(g, cpu_env, pred_full_reg_offset(s, a->pg)); 2989 2990 if (a->s) { 2991 TCGv_i32 t = tcg_temp_new_i32(); 2992 fn_s(t, d, n, m, g, desc); 2993 do_pred_flags(t); 2994 } else { 2995 fn(d, n, m, g, desc); 2996 } 2997 return true; 2998 } 2999 3000 static bool do_brk2(DisasContext *s, arg_rpr_s *a, 3001 gen_helper_gvec_3 *fn, gen_helper_gvec_flags_3 *fn_s) 3002 { 3003 if (!sve_access_check(s)) { 3004 return true; 3005 } 3006 3007 unsigned vsz = pred_full_reg_size(s); 3008 3009 /* Predicate sizes may be smaller and cannot use simd_desc. */ 3010 TCGv_ptr d = tcg_temp_new_ptr(); 3011 TCGv_ptr n = tcg_temp_new_ptr(); 3012 TCGv_ptr g = tcg_temp_new_ptr(); 3013 TCGv_i32 desc = tcg_constant_i32(FIELD_DP32(0, PREDDESC, OPRSZ, vsz)); 3014 3015 tcg_gen_addi_ptr(d, cpu_env, pred_full_reg_offset(s, a->rd)); 3016 tcg_gen_addi_ptr(n, cpu_env, pred_full_reg_offset(s, a->rn)); 3017 tcg_gen_addi_ptr(g, cpu_env, pred_full_reg_offset(s, a->pg)); 3018 3019 if (a->s) { 3020 TCGv_i32 t = tcg_temp_new_i32(); 3021 fn_s(t, d, n, g, desc); 3022 do_pred_flags(t); 3023 } else { 3024 fn(d, n, g, desc); 3025 } 3026 return true; 3027 } 3028 3029 TRANS_FEAT(BRKPA, aa64_sve, do_brk3, a, 3030 gen_helper_sve_brkpa, gen_helper_sve_brkpas) 3031 TRANS_FEAT(BRKPB, aa64_sve, do_brk3, a, 3032 gen_helper_sve_brkpb, gen_helper_sve_brkpbs) 3033 3034 TRANS_FEAT(BRKA_m, aa64_sve, do_brk2, a, 3035 gen_helper_sve_brka_m, gen_helper_sve_brkas_m) 3036 TRANS_FEAT(BRKB_m, aa64_sve, do_brk2, a, 3037 gen_helper_sve_brkb_m, gen_helper_sve_brkbs_m) 3038 3039 TRANS_FEAT(BRKA_z, aa64_sve, do_brk2, a, 3040 gen_helper_sve_brka_z, gen_helper_sve_brkas_z) 3041 TRANS_FEAT(BRKB_z, aa64_sve, do_brk2, a, 3042 gen_helper_sve_brkb_z, gen_helper_sve_brkbs_z) 3043 3044 TRANS_FEAT(BRKN, aa64_sve, do_brk2, a, 3045 gen_helper_sve_brkn, gen_helper_sve_brkns) 3046 3047 /* 3048 *** SVE Predicate Count Group 3049 */ 3050 3051 static void do_cntp(DisasContext *s, TCGv_i64 val, int esz, int pn, int pg) 3052 { 3053 unsigned psz = pred_full_reg_size(s); 3054 3055 if (psz <= 8) { 3056 uint64_t psz_mask; 3057 3058 tcg_gen_ld_i64(val, cpu_env, pred_full_reg_offset(s, pn)); 3059 if (pn != pg) { 3060 TCGv_i64 g = tcg_temp_new_i64(); 3061 tcg_gen_ld_i64(g, cpu_env, pred_full_reg_offset(s, pg)); 3062 tcg_gen_and_i64(val, val, g); 3063 } 3064 3065 /* Reduce the pred_esz_masks value simply to reduce the 3066 * size of the code generated here. 3067 */ 3068 psz_mask = MAKE_64BIT_MASK(0, psz * 8); 3069 tcg_gen_andi_i64(val, val, pred_esz_masks[esz] & psz_mask); 3070 3071 tcg_gen_ctpop_i64(val, val); 3072 } else { 3073 TCGv_ptr t_pn = tcg_temp_new_ptr(); 3074 TCGv_ptr t_pg = tcg_temp_new_ptr(); 3075 unsigned desc = 0; 3076 3077 desc = FIELD_DP32(desc, PREDDESC, OPRSZ, psz); 3078 desc = FIELD_DP32(desc, PREDDESC, ESZ, esz); 3079 3080 tcg_gen_addi_ptr(t_pn, cpu_env, pred_full_reg_offset(s, pn)); 3081 tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg)); 3082 3083 gen_helper_sve_cntp(val, t_pn, t_pg, tcg_constant_i32(desc)); 3084 } 3085 } 3086 3087 static bool trans_CNTP(DisasContext *s, arg_CNTP *a) 3088 { 3089 if (!dc_isar_feature(aa64_sve, s)) { 3090 return false; 3091 } 3092 if (sve_access_check(s)) { 3093 do_cntp(s, cpu_reg(s, a->rd), a->esz, a->rn, a->pg); 3094 } 3095 return true; 3096 } 3097 3098 static bool trans_INCDECP_r(DisasContext *s, arg_incdec_pred *a) 3099 { 3100 if (!dc_isar_feature(aa64_sve, s)) { 3101 return false; 3102 } 3103 if (sve_access_check(s)) { 3104 TCGv_i64 reg = cpu_reg(s, a->rd); 3105 TCGv_i64 val = tcg_temp_new_i64(); 3106 3107 do_cntp(s, val, a->esz, a->pg, a->pg); 3108 if (a->d) { 3109 tcg_gen_sub_i64(reg, reg, val); 3110 } else { 3111 tcg_gen_add_i64(reg, reg, val); 3112 } 3113 } 3114 return true; 3115 } 3116 3117 static bool trans_INCDECP_z(DisasContext *s, arg_incdec2_pred *a) 3118 { 3119 if (a->esz == 0 || !dc_isar_feature(aa64_sve, s)) { 3120 return false; 3121 } 3122 if (sve_access_check(s)) { 3123 unsigned vsz = vec_full_reg_size(s); 3124 TCGv_i64 val = tcg_temp_new_i64(); 3125 GVecGen2sFn *gvec_fn = a->d ? tcg_gen_gvec_subs : tcg_gen_gvec_adds; 3126 3127 do_cntp(s, val, a->esz, a->pg, a->pg); 3128 gvec_fn(a->esz, vec_full_reg_offset(s, a->rd), 3129 vec_full_reg_offset(s, a->rn), val, vsz, vsz); 3130 } 3131 return true; 3132 } 3133 3134 static bool trans_SINCDECP_r_32(DisasContext *s, arg_incdec_pred *a) 3135 { 3136 if (!dc_isar_feature(aa64_sve, s)) { 3137 return false; 3138 } 3139 if (sve_access_check(s)) { 3140 TCGv_i64 reg = cpu_reg(s, a->rd); 3141 TCGv_i64 val = tcg_temp_new_i64(); 3142 3143 do_cntp(s, val, a->esz, a->pg, a->pg); 3144 do_sat_addsub_32(reg, val, a->u, a->d); 3145 } 3146 return true; 3147 } 3148 3149 static bool trans_SINCDECP_r_64(DisasContext *s, arg_incdec_pred *a) 3150 { 3151 if (!dc_isar_feature(aa64_sve, s)) { 3152 return false; 3153 } 3154 if (sve_access_check(s)) { 3155 TCGv_i64 reg = cpu_reg(s, a->rd); 3156 TCGv_i64 val = tcg_temp_new_i64(); 3157 3158 do_cntp(s, val, a->esz, a->pg, a->pg); 3159 do_sat_addsub_64(reg, val, a->u, a->d); 3160 } 3161 return true; 3162 } 3163 3164 static bool trans_SINCDECP_z(DisasContext *s, arg_incdec2_pred *a) 3165 { 3166 if (a->esz == 0 || !dc_isar_feature(aa64_sve, s)) { 3167 return false; 3168 } 3169 if (sve_access_check(s)) { 3170 TCGv_i64 val = tcg_temp_new_i64(); 3171 do_cntp(s, val, a->esz, a->pg, a->pg); 3172 do_sat_addsub_vec(s, a->esz, a->rd, a->rn, val, a->u, a->d); 3173 } 3174 return true; 3175 } 3176 3177 /* 3178 *** SVE Integer Compare Scalars Group 3179 */ 3180 3181 static bool trans_CTERM(DisasContext *s, arg_CTERM *a) 3182 { 3183 if (!dc_isar_feature(aa64_sve, s)) { 3184 return false; 3185 } 3186 if (!sve_access_check(s)) { 3187 return true; 3188 } 3189 3190 TCGCond cond = (a->ne ? TCG_COND_NE : TCG_COND_EQ); 3191 TCGv_i64 rn = read_cpu_reg(s, a->rn, a->sf); 3192 TCGv_i64 rm = read_cpu_reg(s, a->rm, a->sf); 3193 TCGv_i64 cmp = tcg_temp_new_i64(); 3194 3195 tcg_gen_setcond_i64(cond, cmp, rn, rm); 3196 tcg_gen_extrl_i64_i32(cpu_NF, cmp); 3197 3198 /* VF = !NF & !CF. */ 3199 tcg_gen_xori_i32(cpu_VF, cpu_NF, 1); 3200 tcg_gen_andc_i32(cpu_VF, cpu_VF, cpu_CF); 3201 3202 /* Both NF and VF actually look at bit 31. */ 3203 tcg_gen_neg_i32(cpu_NF, cpu_NF); 3204 tcg_gen_neg_i32(cpu_VF, cpu_VF); 3205 return true; 3206 } 3207 3208 static bool trans_WHILE(DisasContext *s, arg_WHILE *a) 3209 { 3210 TCGv_i64 op0, op1, t0, t1, tmax; 3211 TCGv_i32 t2; 3212 TCGv_ptr ptr; 3213 unsigned vsz = vec_full_reg_size(s); 3214 unsigned desc = 0; 3215 TCGCond cond; 3216 uint64_t maxval; 3217 /* Note that GE/HS has a->eq == 0 and GT/HI has a->eq == 1. */ 3218 bool eq = a->eq == a->lt; 3219 3220 /* The greater-than conditions are all SVE2. */ 3221 if (a->lt 3222 ? !dc_isar_feature(aa64_sve, s) 3223 : !dc_isar_feature(aa64_sve2, s)) { 3224 return false; 3225 } 3226 if (!sve_access_check(s)) { 3227 return true; 3228 } 3229 3230 op0 = read_cpu_reg(s, a->rn, 1); 3231 op1 = read_cpu_reg(s, a->rm, 1); 3232 3233 if (!a->sf) { 3234 if (a->u) { 3235 tcg_gen_ext32u_i64(op0, op0); 3236 tcg_gen_ext32u_i64(op1, op1); 3237 } else { 3238 tcg_gen_ext32s_i64(op0, op0); 3239 tcg_gen_ext32s_i64(op1, op1); 3240 } 3241 } 3242 3243 /* For the helper, compress the different conditions into a computation 3244 * of how many iterations for which the condition is true. 3245 */ 3246 t0 = tcg_temp_new_i64(); 3247 t1 = tcg_temp_new_i64(); 3248 3249 if (a->lt) { 3250 tcg_gen_sub_i64(t0, op1, op0); 3251 if (a->u) { 3252 maxval = a->sf ? UINT64_MAX : UINT32_MAX; 3253 cond = eq ? TCG_COND_LEU : TCG_COND_LTU; 3254 } else { 3255 maxval = a->sf ? INT64_MAX : INT32_MAX; 3256 cond = eq ? TCG_COND_LE : TCG_COND_LT; 3257 } 3258 } else { 3259 tcg_gen_sub_i64(t0, op0, op1); 3260 if (a->u) { 3261 maxval = 0; 3262 cond = eq ? TCG_COND_GEU : TCG_COND_GTU; 3263 } else { 3264 maxval = a->sf ? INT64_MIN : INT32_MIN; 3265 cond = eq ? TCG_COND_GE : TCG_COND_GT; 3266 } 3267 } 3268 3269 tmax = tcg_constant_i64(vsz >> a->esz); 3270 if (eq) { 3271 /* Equality means one more iteration. */ 3272 tcg_gen_addi_i64(t0, t0, 1); 3273 3274 /* 3275 * For the less-than while, if op1 is maxval (and the only time 3276 * the addition above could overflow), then we produce an all-true 3277 * predicate by setting the count to the vector length. This is 3278 * because the pseudocode is described as an increment + compare 3279 * loop, and the maximum integer would always compare true. 3280 * Similarly, the greater-than while has the same issue with the 3281 * minimum integer due to the decrement + compare loop. 3282 */ 3283 tcg_gen_movi_i64(t1, maxval); 3284 tcg_gen_movcond_i64(TCG_COND_EQ, t0, op1, t1, tmax, t0); 3285 } 3286 3287 /* Bound to the maximum. */ 3288 tcg_gen_umin_i64(t0, t0, tmax); 3289 3290 /* Set the count to zero if the condition is false. */ 3291 tcg_gen_movi_i64(t1, 0); 3292 tcg_gen_movcond_i64(cond, t0, op0, op1, t0, t1); 3293 3294 /* Since we're bounded, pass as a 32-bit type. */ 3295 t2 = tcg_temp_new_i32(); 3296 tcg_gen_extrl_i64_i32(t2, t0); 3297 3298 /* Scale elements to bits. */ 3299 tcg_gen_shli_i32(t2, t2, a->esz); 3300 3301 desc = FIELD_DP32(desc, PREDDESC, OPRSZ, vsz / 8); 3302 desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz); 3303 3304 ptr = tcg_temp_new_ptr(); 3305 tcg_gen_addi_ptr(ptr, cpu_env, pred_full_reg_offset(s, a->rd)); 3306 3307 if (a->lt) { 3308 gen_helper_sve_whilel(t2, ptr, t2, tcg_constant_i32(desc)); 3309 } else { 3310 gen_helper_sve_whileg(t2, ptr, t2, tcg_constant_i32(desc)); 3311 } 3312 do_pred_flags(t2); 3313 return true; 3314 } 3315 3316 static bool trans_WHILE_ptr(DisasContext *s, arg_WHILE_ptr *a) 3317 { 3318 TCGv_i64 op0, op1, diff, t1, tmax; 3319 TCGv_i32 t2; 3320 TCGv_ptr ptr; 3321 unsigned vsz = vec_full_reg_size(s); 3322 unsigned desc = 0; 3323 3324 if (!dc_isar_feature(aa64_sve2, s)) { 3325 return false; 3326 } 3327 if (!sve_access_check(s)) { 3328 return true; 3329 } 3330 3331 op0 = read_cpu_reg(s, a->rn, 1); 3332 op1 = read_cpu_reg(s, a->rm, 1); 3333 3334 tmax = tcg_constant_i64(vsz); 3335 diff = tcg_temp_new_i64(); 3336 3337 if (a->rw) { 3338 /* WHILERW */ 3339 /* diff = abs(op1 - op0), noting that op0/1 are unsigned. */ 3340 t1 = tcg_temp_new_i64(); 3341 tcg_gen_sub_i64(diff, op0, op1); 3342 tcg_gen_sub_i64(t1, op1, op0); 3343 tcg_gen_movcond_i64(TCG_COND_GEU, diff, op0, op1, diff, t1); 3344 /* Round down to a multiple of ESIZE. */ 3345 tcg_gen_andi_i64(diff, diff, -1 << a->esz); 3346 /* If op1 == op0, diff == 0, and the condition is always true. */ 3347 tcg_gen_movcond_i64(TCG_COND_EQ, diff, op0, op1, tmax, diff); 3348 } else { 3349 /* WHILEWR */ 3350 tcg_gen_sub_i64(diff, op1, op0); 3351 /* Round down to a multiple of ESIZE. */ 3352 tcg_gen_andi_i64(diff, diff, -1 << a->esz); 3353 /* If op0 >= op1, diff <= 0, the condition is always true. */ 3354 tcg_gen_movcond_i64(TCG_COND_GEU, diff, op0, op1, tmax, diff); 3355 } 3356 3357 /* Bound to the maximum. */ 3358 tcg_gen_umin_i64(diff, diff, tmax); 3359 3360 /* Since we're bounded, pass as a 32-bit type. */ 3361 t2 = tcg_temp_new_i32(); 3362 tcg_gen_extrl_i64_i32(t2, diff); 3363 3364 desc = FIELD_DP32(desc, PREDDESC, OPRSZ, vsz / 8); 3365 desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz); 3366 3367 ptr = tcg_temp_new_ptr(); 3368 tcg_gen_addi_ptr(ptr, cpu_env, pred_full_reg_offset(s, a->rd)); 3369 3370 gen_helper_sve_whilel(t2, ptr, t2, tcg_constant_i32(desc)); 3371 do_pred_flags(t2); 3372 return true; 3373 } 3374 3375 /* 3376 *** SVE Integer Wide Immediate - Unpredicated Group 3377 */ 3378 3379 static bool trans_FDUP(DisasContext *s, arg_FDUP *a) 3380 { 3381 if (a->esz == 0 || !dc_isar_feature(aa64_sve, s)) { 3382 return false; 3383 } 3384 if (sve_access_check(s)) { 3385 unsigned vsz = vec_full_reg_size(s); 3386 int dofs = vec_full_reg_offset(s, a->rd); 3387 uint64_t imm; 3388 3389 /* Decode the VFP immediate. */ 3390 imm = vfp_expand_imm(a->esz, a->imm); 3391 tcg_gen_gvec_dup_imm(a->esz, dofs, vsz, vsz, imm); 3392 } 3393 return true; 3394 } 3395 3396 static bool trans_DUP_i(DisasContext *s, arg_DUP_i *a) 3397 { 3398 if (!dc_isar_feature(aa64_sve, s)) { 3399 return false; 3400 } 3401 if (sve_access_check(s)) { 3402 unsigned vsz = vec_full_reg_size(s); 3403 int dofs = vec_full_reg_offset(s, a->rd); 3404 tcg_gen_gvec_dup_imm(a->esz, dofs, vsz, vsz, a->imm); 3405 } 3406 return true; 3407 } 3408 3409 TRANS_FEAT(ADD_zzi, aa64_sve, gen_gvec_fn_arg_zzi, tcg_gen_gvec_addi, a) 3410 3411 static bool trans_SUB_zzi(DisasContext *s, arg_rri_esz *a) 3412 { 3413 a->imm = -a->imm; 3414 return trans_ADD_zzi(s, a); 3415 } 3416 3417 static bool trans_SUBR_zzi(DisasContext *s, arg_rri_esz *a) 3418 { 3419 static const TCGOpcode vecop_list[] = { INDEX_op_sub_vec, 0 }; 3420 static const GVecGen2s op[4] = { 3421 { .fni8 = tcg_gen_vec_sub8_i64, 3422 .fniv = tcg_gen_sub_vec, 3423 .fno = gen_helper_sve_subri_b, 3424 .opt_opc = vecop_list, 3425 .vece = MO_8, 3426 .scalar_first = true }, 3427 { .fni8 = tcg_gen_vec_sub16_i64, 3428 .fniv = tcg_gen_sub_vec, 3429 .fno = gen_helper_sve_subri_h, 3430 .opt_opc = vecop_list, 3431 .vece = MO_16, 3432 .scalar_first = true }, 3433 { .fni4 = tcg_gen_sub_i32, 3434 .fniv = tcg_gen_sub_vec, 3435 .fno = gen_helper_sve_subri_s, 3436 .opt_opc = vecop_list, 3437 .vece = MO_32, 3438 .scalar_first = true }, 3439 { .fni8 = tcg_gen_sub_i64, 3440 .fniv = tcg_gen_sub_vec, 3441 .fno = gen_helper_sve_subri_d, 3442 .opt_opc = vecop_list, 3443 .prefer_i64 = TCG_TARGET_REG_BITS == 64, 3444 .vece = MO_64, 3445 .scalar_first = true } 3446 }; 3447 3448 if (!dc_isar_feature(aa64_sve, s)) { 3449 return false; 3450 } 3451 if (sve_access_check(s)) { 3452 unsigned vsz = vec_full_reg_size(s); 3453 tcg_gen_gvec_2s(vec_full_reg_offset(s, a->rd), 3454 vec_full_reg_offset(s, a->rn), 3455 vsz, vsz, tcg_constant_i64(a->imm), &op[a->esz]); 3456 } 3457 return true; 3458 } 3459 3460 TRANS_FEAT(MUL_zzi, aa64_sve, gen_gvec_fn_arg_zzi, tcg_gen_gvec_muli, a) 3461 3462 static bool do_zzi_sat(DisasContext *s, arg_rri_esz *a, bool u, bool d) 3463 { 3464 if (sve_access_check(s)) { 3465 do_sat_addsub_vec(s, a->esz, a->rd, a->rn, 3466 tcg_constant_i64(a->imm), u, d); 3467 } 3468 return true; 3469 } 3470 3471 TRANS_FEAT(SQADD_zzi, aa64_sve, do_zzi_sat, a, false, false) 3472 TRANS_FEAT(UQADD_zzi, aa64_sve, do_zzi_sat, a, true, false) 3473 TRANS_FEAT(SQSUB_zzi, aa64_sve, do_zzi_sat, a, false, true) 3474 TRANS_FEAT(UQSUB_zzi, aa64_sve, do_zzi_sat, a, true, true) 3475 3476 static bool do_zzi_ool(DisasContext *s, arg_rri_esz *a, gen_helper_gvec_2i *fn) 3477 { 3478 if (sve_access_check(s)) { 3479 unsigned vsz = vec_full_reg_size(s); 3480 tcg_gen_gvec_2i_ool(vec_full_reg_offset(s, a->rd), 3481 vec_full_reg_offset(s, a->rn), 3482 tcg_constant_i64(a->imm), vsz, vsz, 0, fn); 3483 } 3484 return true; 3485 } 3486 3487 #define DO_ZZI(NAME, name) \ 3488 static gen_helper_gvec_2i * const name##i_fns[4] = { \ 3489 gen_helper_sve_##name##i_b, gen_helper_sve_##name##i_h, \ 3490 gen_helper_sve_##name##i_s, gen_helper_sve_##name##i_d, \ 3491 }; \ 3492 TRANS_FEAT(NAME##_zzi, aa64_sve, do_zzi_ool, a, name##i_fns[a->esz]) 3493 3494 DO_ZZI(SMAX, smax) 3495 DO_ZZI(UMAX, umax) 3496 DO_ZZI(SMIN, smin) 3497 DO_ZZI(UMIN, umin) 3498 3499 #undef DO_ZZI 3500 3501 static gen_helper_gvec_4 * const dot_fns[2][2] = { 3502 { gen_helper_gvec_sdot_b, gen_helper_gvec_sdot_h }, 3503 { gen_helper_gvec_udot_b, gen_helper_gvec_udot_h } 3504 }; 3505 TRANS_FEAT(DOT_zzzz, aa64_sve, gen_gvec_ool_zzzz, 3506 dot_fns[a->u][a->sz], a->rd, a->rn, a->rm, a->ra, 0) 3507 3508 /* 3509 * SVE Multiply - Indexed 3510 */ 3511 3512 TRANS_FEAT(SDOT_zzxw_s, aa64_sve, gen_gvec_ool_arg_zzxz, 3513 gen_helper_gvec_sdot_idx_b, a) 3514 TRANS_FEAT(SDOT_zzxw_d, aa64_sve, gen_gvec_ool_arg_zzxz, 3515 gen_helper_gvec_sdot_idx_h, a) 3516 TRANS_FEAT(UDOT_zzxw_s, aa64_sve, gen_gvec_ool_arg_zzxz, 3517 gen_helper_gvec_udot_idx_b, a) 3518 TRANS_FEAT(UDOT_zzxw_d, aa64_sve, gen_gvec_ool_arg_zzxz, 3519 gen_helper_gvec_udot_idx_h, a) 3520 3521 TRANS_FEAT(SUDOT_zzxw_s, aa64_sve_i8mm, gen_gvec_ool_arg_zzxz, 3522 gen_helper_gvec_sudot_idx_b, a) 3523 TRANS_FEAT(USDOT_zzxw_s, aa64_sve_i8mm, gen_gvec_ool_arg_zzxz, 3524 gen_helper_gvec_usdot_idx_b, a) 3525 3526 #define DO_SVE2_RRX(NAME, FUNC) \ 3527 TRANS_FEAT(NAME, aa64_sve, gen_gvec_ool_zzz, FUNC, \ 3528 a->rd, a->rn, a->rm, a->index) 3529 3530 DO_SVE2_RRX(MUL_zzx_h, gen_helper_gvec_mul_idx_h) 3531 DO_SVE2_RRX(MUL_zzx_s, gen_helper_gvec_mul_idx_s) 3532 DO_SVE2_RRX(MUL_zzx_d, gen_helper_gvec_mul_idx_d) 3533 3534 DO_SVE2_RRX(SQDMULH_zzx_h, gen_helper_sve2_sqdmulh_idx_h) 3535 DO_SVE2_RRX(SQDMULH_zzx_s, gen_helper_sve2_sqdmulh_idx_s) 3536 DO_SVE2_RRX(SQDMULH_zzx_d, gen_helper_sve2_sqdmulh_idx_d) 3537 3538 DO_SVE2_RRX(SQRDMULH_zzx_h, gen_helper_sve2_sqrdmulh_idx_h) 3539 DO_SVE2_RRX(SQRDMULH_zzx_s, gen_helper_sve2_sqrdmulh_idx_s) 3540 DO_SVE2_RRX(SQRDMULH_zzx_d, gen_helper_sve2_sqrdmulh_idx_d) 3541 3542 #undef DO_SVE2_RRX 3543 3544 #define DO_SVE2_RRX_TB(NAME, FUNC, TOP) \ 3545 TRANS_FEAT(NAME, aa64_sve, gen_gvec_ool_zzz, FUNC, \ 3546 a->rd, a->rn, a->rm, (a->index << 1) | TOP) 3547 3548 DO_SVE2_RRX_TB(SQDMULLB_zzx_s, gen_helper_sve2_sqdmull_idx_s, false) 3549 DO_SVE2_RRX_TB(SQDMULLB_zzx_d, gen_helper_sve2_sqdmull_idx_d, false) 3550 DO_SVE2_RRX_TB(SQDMULLT_zzx_s, gen_helper_sve2_sqdmull_idx_s, true) 3551 DO_SVE2_RRX_TB(SQDMULLT_zzx_d, gen_helper_sve2_sqdmull_idx_d, true) 3552 3553 DO_SVE2_RRX_TB(SMULLB_zzx_s, gen_helper_sve2_smull_idx_s, false) 3554 DO_SVE2_RRX_TB(SMULLB_zzx_d, gen_helper_sve2_smull_idx_d, false) 3555 DO_SVE2_RRX_TB(SMULLT_zzx_s, gen_helper_sve2_smull_idx_s, true) 3556 DO_SVE2_RRX_TB(SMULLT_zzx_d, gen_helper_sve2_smull_idx_d, true) 3557 3558 DO_SVE2_RRX_TB(UMULLB_zzx_s, gen_helper_sve2_umull_idx_s, false) 3559 DO_SVE2_RRX_TB(UMULLB_zzx_d, gen_helper_sve2_umull_idx_d, false) 3560 DO_SVE2_RRX_TB(UMULLT_zzx_s, gen_helper_sve2_umull_idx_s, true) 3561 DO_SVE2_RRX_TB(UMULLT_zzx_d, gen_helper_sve2_umull_idx_d, true) 3562 3563 #undef DO_SVE2_RRX_TB 3564 3565 #define DO_SVE2_RRXR(NAME, FUNC) \ 3566 TRANS_FEAT(NAME, aa64_sve2, gen_gvec_ool_arg_zzxz, FUNC, a) 3567 3568 DO_SVE2_RRXR(MLA_zzxz_h, gen_helper_gvec_mla_idx_h) 3569 DO_SVE2_RRXR(MLA_zzxz_s, gen_helper_gvec_mla_idx_s) 3570 DO_SVE2_RRXR(MLA_zzxz_d, gen_helper_gvec_mla_idx_d) 3571 3572 DO_SVE2_RRXR(MLS_zzxz_h, gen_helper_gvec_mls_idx_h) 3573 DO_SVE2_RRXR(MLS_zzxz_s, gen_helper_gvec_mls_idx_s) 3574 DO_SVE2_RRXR(MLS_zzxz_d, gen_helper_gvec_mls_idx_d) 3575 3576 DO_SVE2_RRXR(SQRDMLAH_zzxz_h, gen_helper_sve2_sqrdmlah_idx_h) 3577 DO_SVE2_RRXR(SQRDMLAH_zzxz_s, gen_helper_sve2_sqrdmlah_idx_s) 3578 DO_SVE2_RRXR(SQRDMLAH_zzxz_d, gen_helper_sve2_sqrdmlah_idx_d) 3579 3580 DO_SVE2_RRXR(SQRDMLSH_zzxz_h, gen_helper_sve2_sqrdmlsh_idx_h) 3581 DO_SVE2_RRXR(SQRDMLSH_zzxz_s, gen_helper_sve2_sqrdmlsh_idx_s) 3582 DO_SVE2_RRXR(SQRDMLSH_zzxz_d, gen_helper_sve2_sqrdmlsh_idx_d) 3583 3584 #undef DO_SVE2_RRXR 3585 3586 #define DO_SVE2_RRXR_TB(NAME, FUNC, TOP) \ 3587 TRANS_FEAT(NAME, aa64_sve2, gen_gvec_ool_zzzz, FUNC, \ 3588 a->rd, a->rn, a->rm, a->ra, (a->index << 1) | TOP) 3589 3590 DO_SVE2_RRXR_TB(SQDMLALB_zzxw_s, gen_helper_sve2_sqdmlal_idx_s, false) 3591 DO_SVE2_RRXR_TB(SQDMLALB_zzxw_d, gen_helper_sve2_sqdmlal_idx_d, false) 3592 DO_SVE2_RRXR_TB(SQDMLALT_zzxw_s, gen_helper_sve2_sqdmlal_idx_s, true) 3593 DO_SVE2_RRXR_TB(SQDMLALT_zzxw_d, gen_helper_sve2_sqdmlal_idx_d, true) 3594 3595 DO_SVE2_RRXR_TB(SQDMLSLB_zzxw_s, gen_helper_sve2_sqdmlsl_idx_s, false) 3596 DO_SVE2_RRXR_TB(SQDMLSLB_zzxw_d, gen_helper_sve2_sqdmlsl_idx_d, false) 3597 DO_SVE2_RRXR_TB(SQDMLSLT_zzxw_s, gen_helper_sve2_sqdmlsl_idx_s, true) 3598 DO_SVE2_RRXR_TB(SQDMLSLT_zzxw_d, gen_helper_sve2_sqdmlsl_idx_d, true) 3599 3600 DO_SVE2_RRXR_TB(SMLALB_zzxw_s, gen_helper_sve2_smlal_idx_s, false) 3601 DO_SVE2_RRXR_TB(SMLALB_zzxw_d, gen_helper_sve2_smlal_idx_d, false) 3602 DO_SVE2_RRXR_TB(SMLALT_zzxw_s, gen_helper_sve2_smlal_idx_s, true) 3603 DO_SVE2_RRXR_TB(SMLALT_zzxw_d, gen_helper_sve2_smlal_idx_d, true) 3604 3605 DO_SVE2_RRXR_TB(UMLALB_zzxw_s, gen_helper_sve2_umlal_idx_s, false) 3606 DO_SVE2_RRXR_TB(UMLALB_zzxw_d, gen_helper_sve2_umlal_idx_d, false) 3607 DO_SVE2_RRXR_TB(UMLALT_zzxw_s, gen_helper_sve2_umlal_idx_s, true) 3608 DO_SVE2_RRXR_TB(UMLALT_zzxw_d, gen_helper_sve2_umlal_idx_d, true) 3609 3610 DO_SVE2_RRXR_TB(SMLSLB_zzxw_s, gen_helper_sve2_smlsl_idx_s, false) 3611 DO_SVE2_RRXR_TB(SMLSLB_zzxw_d, gen_helper_sve2_smlsl_idx_d, false) 3612 DO_SVE2_RRXR_TB(SMLSLT_zzxw_s, gen_helper_sve2_smlsl_idx_s, true) 3613 DO_SVE2_RRXR_TB(SMLSLT_zzxw_d, gen_helper_sve2_smlsl_idx_d, true) 3614 3615 DO_SVE2_RRXR_TB(UMLSLB_zzxw_s, gen_helper_sve2_umlsl_idx_s, false) 3616 DO_SVE2_RRXR_TB(UMLSLB_zzxw_d, gen_helper_sve2_umlsl_idx_d, false) 3617 DO_SVE2_RRXR_TB(UMLSLT_zzxw_s, gen_helper_sve2_umlsl_idx_s, true) 3618 DO_SVE2_RRXR_TB(UMLSLT_zzxw_d, gen_helper_sve2_umlsl_idx_d, true) 3619 3620 #undef DO_SVE2_RRXR_TB 3621 3622 #define DO_SVE2_RRXR_ROT(NAME, FUNC) \ 3623 TRANS_FEAT(NAME, aa64_sve2, gen_gvec_ool_zzzz, FUNC, \ 3624 a->rd, a->rn, a->rm, a->ra, (a->index << 2) | a->rot) 3625 3626 DO_SVE2_RRXR_ROT(CMLA_zzxz_h, gen_helper_sve2_cmla_idx_h) 3627 DO_SVE2_RRXR_ROT(CMLA_zzxz_s, gen_helper_sve2_cmla_idx_s) 3628 3629 DO_SVE2_RRXR_ROT(SQRDCMLAH_zzxz_h, gen_helper_sve2_sqrdcmlah_idx_h) 3630 DO_SVE2_RRXR_ROT(SQRDCMLAH_zzxz_s, gen_helper_sve2_sqrdcmlah_idx_s) 3631 3632 DO_SVE2_RRXR_ROT(CDOT_zzxw_s, gen_helper_sve2_cdot_idx_s) 3633 DO_SVE2_RRXR_ROT(CDOT_zzxw_d, gen_helper_sve2_cdot_idx_d) 3634 3635 #undef DO_SVE2_RRXR_ROT 3636 3637 /* 3638 *** SVE Floating Point Multiply-Add Indexed Group 3639 */ 3640 3641 static bool do_FMLA_zzxz(DisasContext *s, arg_rrxr_esz *a, bool sub) 3642 { 3643 static gen_helper_gvec_4_ptr * const fns[4] = { 3644 NULL, 3645 gen_helper_gvec_fmla_idx_h, 3646 gen_helper_gvec_fmla_idx_s, 3647 gen_helper_gvec_fmla_idx_d, 3648 }; 3649 return gen_gvec_fpst_zzzz(s, fns[a->esz], a->rd, a->rn, a->rm, a->ra, 3650 (a->index << 1) | sub, 3651 a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); 3652 } 3653 3654 TRANS_FEAT(FMLA_zzxz, aa64_sve, do_FMLA_zzxz, a, false) 3655 TRANS_FEAT(FMLS_zzxz, aa64_sve, do_FMLA_zzxz, a, true) 3656 3657 /* 3658 *** SVE Floating Point Multiply Indexed Group 3659 */ 3660 3661 static gen_helper_gvec_3_ptr * const fmul_idx_fns[4] = { 3662 NULL, gen_helper_gvec_fmul_idx_h, 3663 gen_helper_gvec_fmul_idx_s, gen_helper_gvec_fmul_idx_d, 3664 }; 3665 TRANS_FEAT(FMUL_zzx, aa64_sve, gen_gvec_fpst_zzz, 3666 fmul_idx_fns[a->esz], a->rd, a->rn, a->rm, a->index, 3667 a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR) 3668 3669 /* 3670 *** SVE Floating Point Fast Reduction Group 3671 */ 3672 3673 typedef void gen_helper_fp_reduce(TCGv_i64, TCGv_ptr, TCGv_ptr, 3674 TCGv_ptr, TCGv_i32); 3675 3676 static bool do_reduce(DisasContext *s, arg_rpr_esz *a, 3677 gen_helper_fp_reduce *fn) 3678 { 3679 unsigned vsz, p2vsz; 3680 TCGv_i32 t_desc; 3681 TCGv_ptr t_zn, t_pg, status; 3682 TCGv_i64 temp; 3683 3684 if (fn == NULL) { 3685 return false; 3686 } 3687 if (!sve_access_check(s)) { 3688 return true; 3689 } 3690 3691 vsz = vec_full_reg_size(s); 3692 p2vsz = pow2ceil(vsz); 3693 t_desc = tcg_constant_i32(simd_desc(vsz, vsz, p2vsz)); 3694 temp = tcg_temp_new_i64(); 3695 t_zn = tcg_temp_new_ptr(); 3696 t_pg = tcg_temp_new_ptr(); 3697 3698 tcg_gen_addi_ptr(t_zn, cpu_env, vec_full_reg_offset(s, a->rn)); 3699 tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, a->pg)); 3700 status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); 3701 3702 fn(temp, t_zn, t_pg, status, t_desc); 3703 3704 write_fp_dreg(s, a->rd, temp); 3705 return true; 3706 } 3707 3708 #define DO_VPZ(NAME, name) \ 3709 static gen_helper_fp_reduce * const name##_fns[4] = { \ 3710 NULL, gen_helper_sve_##name##_h, \ 3711 gen_helper_sve_##name##_s, gen_helper_sve_##name##_d, \ 3712 }; \ 3713 TRANS_FEAT(NAME, aa64_sve, do_reduce, a, name##_fns[a->esz]) 3714 3715 DO_VPZ(FADDV, faddv) 3716 DO_VPZ(FMINNMV, fminnmv) 3717 DO_VPZ(FMAXNMV, fmaxnmv) 3718 DO_VPZ(FMINV, fminv) 3719 DO_VPZ(FMAXV, fmaxv) 3720 3721 #undef DO_VPZ 3722 3723 /* 3724 *** SVE Floating Point Unary Operations - Unpredicated Group 3725 */ 3726 3727 static gen_helper_gvec_2_ptr * const frecpe_fns[] = { 3728 NULL, gen_helper_gvec_frecpe_h, 3729 gen_helper_gvec_frecpe_s, gen_helper_gvec_frecpe_d, 3730 }; 3731 TRANS_FEAT(FRECPE, aa64_sve, gen_gvec_fpst_arg_zz, frecpe_fns[a->esz], a, 0) 3732 3733 static gen_helper_gvec_2_ptr * const frsqrte_fns[] = { 3734 NULL, gen_helper_gvec_frsqrte_h, 3735 gen_helper_gvec_frsqrte_s, gen_helper_gvec_frsqrte_d, 3736 }; 3737 TRANS_FEAT(FRSQRTE, aa64_sve, gen_gvec_fpst_arg_zz, frsqrte_fns[a->esz], a, 0) 3738 3739 /* 3740 *** SVE Floating Point Compare with Zero Group 3741 */ 3742 3743 static bool do_ppz_fp(DisasContext *s, arg_rpr_esz *a, 3744 gen_helper_gvec_3_ptr *fn) 3745 { 3746 if (fn == NULL) { 3747 return false; 3748 } 3749 if (sve_access_check(s)) { 3750 unsigned vsz = vec_full_reg_size(s); 3751 TCGv_ptr status = 3752 fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); 3753 3754 tcg_gen_gvec_3_ptr(pred_full_reg_offset(s, a->rd), 3755 vec_full_reg_offset(s, a->rn), 3756 pred_full_reg_offset(s, a->pg), 3757 status, vsz, vsz, 0, fn); 3758 } 3759 return true; 3760 } 3761 3762 #define DO_PPZ(NAME, name) \ 3763 static gen_helper_gvec_3_ptr * const name##_fns[] = { \ 3764 NULL, gen_helper_sve_##name##_h, \ 3765 gen_helper_sve_##name##_s, gen_helper_sve_##name##_d, \ 3766 }; \ 3767 TRANS_FEAT(NAME, aa64_sve, do_ppz_fp, a, name##_fns[a->esz]) 3768 3769 DO_PPZ(FCMGE_ppz0, fcmge0) 3770 DO_PPZ(FCMGT_ppz0, fcmgt0) 3771 DO_PPZ(FCMLE_ppz0, fcmle0) 3772 DO_PPZ(FCMLT_ppz0, fcmlt0) 3773 DO_PPZ(FCMEQ_ppz0, fcmeq0) 3774 DO_PPZ(FCMNE_ppz0, fcmne0) 3775 3776 #undef DO_PPZ 3777 3778 /* 3779 *** SVE floating-point trig multiply-add coefficient 3780 */ 3781 3782 static gen_helper_gvec_3_ptr * const ftmad_fns[4] = { 3783 NULL, gen_helper_sve_ftmad_h, 3784 gen_helper_sve_ftmad_s, gen_helper_sve_ftmad_d, 3785 }; 3786 TRANS_FEAT_NONSTREAMING(FTMAD, aa64_sve, gen_gvec_fpst_zzz, 3787 ftmad_fns[a->esz], a->rd, a->rn, a->rm, a->imm, 3788 a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR) 3789 3790 /* 3791 *** SVE Floating Point Accumulating Reduction Group 3792 */ 3793 3794 static bool trans_FADDA(DisasContext *s, arg_rprr_esz *a) 3795 { 3796 typedef void fadda_fn(TCGv_i64, TCGv_i64, TCGv_ptr, 3797 TCGv_ptr, TCGv_ptr, TCGv_i32); 3798 static fadda_fn * const fns[3] = { 3799 gen_helper_sve_fadda_h, 3800 gen_helper_sve_fadda_s, 3801 gen_helper_sve_fadda_d, 3802 }; 3803 unsigned vsz = vec_full_reg_size(s); 3804 TCGv_ptr t_rm, t_pg, t_fpst; 3805 TCGv_i64 t_val; 3806 TCGv_i32 t_desc; 3807 3808 if (a->esz == 0 || !dc_isar_feature(aa64_sve, s)) { 3809 return false; 3810 } 3811 s->is_nonstreaming = true; 3812 if (!sve_access_check(s)) { 3813 return true; 3814 } 3815 3816 t_val = load_esz(cpu_env, vec_reg_offset(s, a->rn, 0, a->esz), a->esz); 3817 t_rm = tcg_temp_new_ptr(); 3818 t_pg = tcg_temp_new_ptr(); 3819 tcg_gen_addi_ptr(t_rm, cpu_env, vec_full_reg_offset(s, a->rm)); 3820 tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, a->pg)); 3821 t_fpst = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); 3822 t_desc = tcg_constant_i32(simd_desc(vsz, vsz, 0)); 3823 3824 fns[a->esz - 1](t_val, t_val, t_rm, t_pg, t_fpst, t_desc); 3825 3826 write_fp_dreg(s, a->rd, t_val); 3827 return true; 3828 } 3829 3830 /* 3831 *** SVE Floating Point Arithmetic - Unpredicated Group 3832 */ 3833 3834 #define DO_FP3(NAME, name) \ 3835 static gen_helper_gvec_3_ptr * const name##_fns[4] = { \ 3836 NULL, gen_helper_gvec_##name##_h, \ 3837 gen_helper_gvec_##name##_s, gen_helper_gvec_##name##_d \ 3838 }; \ 3839 TRANS_FEAT(NAME, aa64_sve, gen_gvec_fpst_arg_zzz, name##_fns[a->esz], a, 0) 3840 3841 DO_FP3(FADD_zzz, fadd) 3842 DO_FP3(FSUB_zzz, fsub) 3843 DO_FP3(FMUL_zzz, fmul) 3844 DO_FP3(FRECPS, recps) 3845 DO_FP3(FRSQRTS, rsqrts) 3846 3847 #undef DO_FP3 3848 3849 static gen_helper_gvec_3_ptr * const ftsmul_fns[4] = { 3850 NULL, gen_helper_gvec_ftsmul_h, 3851 gen_helper_gvec_ftsmul_s, gen_helper_gvec_ftsmul_d 3852 }; 3853 TRANS_FEAT_NONSTREAMING(FTSMUL, aa64_sve, gen_gvec_fpst_arg_zzz, 3854 ftsmul_fns[a->esz], a, 0) 3855 3856 /* 3857 *** SVE Floating Point Arithmetic - Predicated Group 3858 */ 3859 3860 #define DO_ZPZZ_FP(NAME, FEAT, name) \ 3861 static gen_helper_gvec_4_ptr * const name##_zpzz_fns[4] = { \ 3862 NULL, gen_helper_##name##_h, \ 3863 gen_helper_##name##_s, gen_helper_##name##_d \ 3864 }; \ 3865 TRANS_FEAT(NAME, FEAT, gen_gvec_fpst_arg_zpzz, name##_zpzz_fns[a->esz], a) 3866 3867 DO_ZPZZ_FP(FADD_zpzz, aa64_sve, sve_fadd) 3868 DO_ZPZZ_FP(FSUB_zpzz, aa64_sve, sve_fsub) 3869 DO_ZPZZ_FP(FMUL_zpzz, aa64_sve, sve_fmul) 3870 DO_ZPZZ_FP(FMIN_zpzz, aa64_sve, sve_fmin) 3871 DO_ZPZZ_FP(FMAX_zpzz, aa64_sve, sve_fmax) 3872 DO_ZPZZ_FP(FMINNM_zpzz, aa64_sve, sve_fminnum) 3873 DO_ZPZZ_FP(FMAXNM_zpzz, aa64_sve, sve_fmaxnum) 3874 DO_ZPZZ_FP(FABD, aa64_sve, sve_fabd) 3875 DO_ZPZZ_FP(FSCALE, aa64_sve, sve_fscalbn) 3876 DO_ZPZZ_FP(FDIV, aa64_sve, sve_fdiv) 3877 DO_ZPZZ_FP(FMULX, aa64_sve, sve_fmulx) 3878 3879 typedef void gen_helper_sve_fp2scalar(TCGv_ptr, TCGv_ptr, TCGv_ptr, 3880 TCGv_i64, TCGv_ptr, TCGv_i32); 3881 3882 static void do_fp_scalar(DisasContext *s, int zd, int zn, int pg, bool is_fp16, 3883 TCGv_i64 scalar, gen_helper_sve_fp2scalar *fn) 3884 { 3885 unsigned vsz = vec_full_reg_size(s); 3886 TCGv_ptr t_zd, t_zn, t_pg, status; 3887 TCGv_i32 desc; 3888 3889 t_zd = tcg_temp_new_ptr(); 3890 t_zn = tcg_temp_new_ptr(); 3891 t_pg = tcg_temp_new_ptr(); 3892 tcg_gen_addi_ptr(t_zd, cpu_env, vec_full_reg_offset(s, zd)); 3893 tcg_gen_addi_ptr(t_zn, cpu_env, vec_full_reg_offset(s, zn)); 3894 tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg)); 3895 3896 status = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR); 3897 desc = tcg_constant_i32(simd_desc(vsz, vsz, 0)); 3898 fn(t_zd, t_zn, t_pg, scalar, status, desc); 3899 } 3900 3901 static bool do_fp_imm(DisasContext *s, arg_rpri_esz *a, uint64_t imm, 3902 gen_helper_sve_fp2scalar *fn) 3903 { 3904 if (fn == NULL) { 3905 return false; 3906 } 3907 if (sve_access_check(s)) { 3908 do_fp_scalar(s, a->rd, a->rn, a->pg, a->esz == MO_16, 3909 tcg_constant_i64(imm), fn); 3910 } 3911 return true; 3912 } 3913 3914 #define DO_FP_IMM(NAME, name, const0, const1) \ 3915 static gen_helper_sve_fp2scalar * const name##_fns[4] = { \ 3916 NULL, gen_helper_sve_##name##_h, \ 3917 gen_helper_sve_##name##_s, \ 3918 gen_helper_sve_##name##_d \ 3919 }; \ 3920 static uint64_t const name##_const[4][2] = { \ 3921 { -1, -1 }, \ 3922 { float16_##const0, float16_##const1 }, \ 3923 { float32_##const0, float32_##const1 }, \ 3924 { float64_##const0, float64_##const1 }, \ 3925 }; \ 3926 TRANS_FEAT(NAME##_zpzi, aa64_sve, do_fp_imm, a, \ 3927 name##_const[a->esz][a->imm], name##_fns[a->esz]) 3928 3929 DO_FP_IMM(FADD, fadds, half, one) 3930 DO_FP_IMM(FSUB, fsubs, half, one) 3931 DO_FP_IMM(FMUL, fmuls, half, two) 3932 DO_FP_IMM(FSUBR, fsubrs, half, one) 3933 DO_FP_IMM(FMAXNM, fmaxnms, zero, one) 3934 DO_FP_IMM(FMINNM, fminnms, zero, one) 3935 DO_FP_IMM(FMAX, fmaxs, zero, one) 3936 DO_FP_IMM(FMIN, fmins, zero, one) 3937 3938 #undef DO_FP_IMM 3939 3940 static bool do_fp_cmp(DisasContext *s, arg_rprr_esz *a, 3941 gen_helper_gvec_4_ptr *fn) 3942 { 3943 if (fn == NULL) { 3944 return false; 3945 } 3946 if (sve_access_check(s)) { 3947 unsigned vsz = vec_full_reg_size(s); 3948 TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); 3949 tcg_gen_gvec_4_ptr(pred_full_reg_offset(s, a->rd), 3950 vec_full_reg_offset(s, a->rn), 3951 vec_full_reg_offset(s, a->rm), 3952 pred_full_reg_offset(s, a->pg), 3953 status, vsz, vsz, 0, fn); 3954 } 3955 return true; 3956 } 3957 3958 #define DO_FPCMP(NAME, name) \ 3959 static gen_helper_gvec_4_ptr * const name##_fns[4] = { \ 3960 NULL, gen_helper_sve_##name##_h, \ 3961 gen_helper_sve_##name##_s, gen_helper_sve_##name##_d \ 3962 }; \ 3963 TRANS_FEAT(NAME##_ppzz, aa64_sve, do_fp_cmp, a, name##_fns[a->esz]) 3964 3965 DO_FPCMP(FCMGE, fcmge) 3966 DO_FPCMP(FCMGT, fcmgt) 3967 DO_FPCMP(FCMEQ, fcmeq) 3968 DO_FPCMP(FCMNE, fcmne) 3969 DO_FPCMP(FCMUO, fcmuo) 3970 DO_FPCMP(FACGE, facge) 3971 DO_FPCMP(FACGT, facgt) 3972 3973 #undef DO_FPCMP 3974 3975 static gen_helper_gvec_4_ptr * const fcadd_fns[] = { 3976 NULL, gen_helper_sve_fcadd_h, 3977 gen_helper_sve_fcadd_s, gen_helper_sve_fcadd_d, 3978 }; 3979 TRANS_FEAT(FCADD, aa64_sve, gen_gvec_fpst_zzzp, fcadd_fns[a->esz], 3980 a->rd, a->rn, a->rm, a->pg, a->rot, 3981 a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR) 3982 3983 #define DO_FMLA(NAME, name) \ 3984 static gen_helper_gvec_5_ptr * const name##_fns[4] = { \ 3985 NULL, gen_helper_sve_##name##_h, \ 3986 gen_helper_sve_##name##_s, gen_helper_sve_##name##_d \ 3987 }; \ 3988 TRANS_FEAT(NAME, aa64_sve, gen_gvec_fpst_zzzzp, name##_fns[a->esz], \ 3989 a->rd, a->rn, a->rm, a->ra, a->pg, 0, \ 3990 a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR) 3991 3992 DO_FMLA(FMLA_zpzzz, fmla_zpzzz) 3993 DO_FMLA(FMLS_zpzzz, fmls_zpzzz) 3994 DO_FMLA(FNMLA_zpzzz, fnmla_zpzzz) 3995 DO_FMLA(FNMLS_zpzzz, fnmls_zpzzz) 3996 3997 #undef DO_FMLA 3998 3999 static gen_helper_gvec_5_ptr * const fcmla_fns[4] = { 4000 NULL, gen_helper_sve_fcmla_zpzzz_h, 4001 gen_helper_sve_fcmla_zpzzz_s, gen_helper_sve_fcmla_zpzzz_d, 4002 }; 4003 TRANS_FEAT(FCMLA_zpzzz, aa64_sve, gen_gvec_fpst_zzzzp, fcmla_fns[a->esz], 4004 a->rd, a->rn, a->rm, a->ra, a->pg, a->rot, 4005 a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR) 4006 4007 static gen_helper_gvec_4_ptr * const fcmla_idx_fns[4] = { 4008 NULL, gen_helper_gvec_fcmlah_idx, gen_helper_gvec_fcmlas_idx, NULL 4009 }; 4010 TRANS_FEAT(FCMLA_zzxz, aa64_sve, gen_gvec_fpst_zzzz, fcmla_idx_fns[a->esz], 4011 a->rd, a->rn, a->rm, a->ra, a->index * 4 + a->rot, 4012 a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR) 4013 4014 /* 4015 *** SVE Floating Point Unary Operations Predicated Group 4016 */ 4017 4018 TRANS_FEAT(FCVT_sh, aa64_sve, gen_gvec_fpst_arg_zpz, 4019 gen_helper_sve_fcvt_sh, a, 0, FPST_FPCR) 4020 TRANS_FEAT(FCVT_hs, aa64_sve, gen_gvec_fpst_arg_zpz, 4021 gen_helper_sve_fcvt_hs, a, 0, FPST_FPCR) 4022 4023 TRANS_FEAT(BFCVT, aa64_sve_bf16, gen_gvec_fpst_arg_zpz, 4024 gen_helper_sve_bfcvt, a, 0, FPST_FPCR) 4025 4026 TRANS_FEAT(FCVT_dh, aa64_sve, gen_gvec_fpst_arg_zpz, 4027 gen_helper_sve_fcvt_dh, a, 0, FPST_FPCR) 4028 TRANS_FEAT(FCVT_hd, aa64_sve, gen_gvec_fpst_arg_zpz, 4029 gen_helper_sve_fcvt_hd, a, 0, FPST_FPCR) 4030 TRANS_FEAT(FCVT_ds, aa64_sve, gen_gvec_fpst_arg_zpz, 4031 gen_helper_sve_fcvt_ds, a, 0, FPST_FPCR) 4032 TRANS_FEAT(FCVT_sd, aa64_sve, gen_gvec_fpst_arg_zpz, 4033 gen_helper_sve_fcvt_sd, a, 0, FPST_FPCR) 4034 4035 TRANS_FEAT(FCVTZS_hh, aa64_sve, gen_gvec_fpst_arg_zpz, 4036 gen_helper_sve_fcvtzs_hh, a, 0, FPST_FPCR_F16) 4037 TRANS_FEAT(FCVTZU_hh, aa64_sve, gen_gvec_fpst_arg_zpz, 4038 gen_helper_sve_fcvtzu_hh, a, 0, FPST_FPCR_F16) 4039 TRANS_FEAT(FCVTZS_hs, aa64_sve, gen_gvec_fpst_arg_zpz, 4040 gen_helper_sve_fcvtzs_hs, a, 0, FPST_FPCR_F16) 4041 TRANS_FEAT(FCVTZU_hs, aa64_sve, gen_gvec_fpst_arg_zpz, 4042 gen_helper_sve_fcvtzu_hs, a, 0, FPST_FPCR_F16) 4043 TRANS_FEAT(FCVTZS_hd, aa64_sve, gen_gvec_fpst_arg_zpz, 4044 gen_helper_sve_fcvtzs_hd, a, 0, FPST_FPCR_F16) 4045 TRANS_FEAT(FCVTZU_hd, aa64_sve, gen_gvec_fpst_arg_zpz, 4046 gen_helper_sve_fcvtzu_hd, a, 0, FPST_FPCR_F16) 4047 4048 TRANS_FEAT(FCVTZS_ss, aa64_sve, gen_gvec_fpst_arg_zpz, 4049 gen_helper_sve_fcvtzs_ss, a, 0, FPST_FPCR) 4050 TRANS_FEAT(FCVTZU_ss, aa64_sve, gen_gvec_fpst_arg_zpz, 4051 gen_helper_sve_fcvtzu_ss, a, 0, FPST_FPCR) 4052 TRANS_FEAT(FCVTZS_sd, aa64_sve, gen_gvec_fpst_arg_zpz, 4053 gen_helper_sve_fcvtzs_sd, a, 0, FPST_FPCR) 4054 TRANS_FEAT(FCVTZU_sd, aa64_sve, gen_gvec_fpst_arg_zpz, 4055 gen_helper_sve_fcvtzu_sd, a, 0, FPST_FPCR) 4056 TRANS_FEAT(FCVTZS_ds, aa64_sve, gen_gvec_fpst_arg_zpz, 4057 gen_helper_sve_fcvtzs_ds, a, 0, FPST_FPCR) 4058 TRANS_FEAT(FCVTZU_ds, aa64_sve, gen_gvec_fpst_arg_zpz, 4059 gen_helper_sve_fcvtzu_ds, a, 0, FPST_FPCR) 4060 4061 TRANS_FEAT(FCVTZS_dd, aa64_sve, gen_gvec_fpst_arg_zpz, 4062 gen_helper_sve_fcvtzs_dd, a, 0, FPST_FPCR) 4063 TRANS_FEAT(FCVTZU_dd, aa64_sve, gen_gvec_fpst_arg_zpz, 4064 gen_helper_sve_fcvtzu_dd, a, 0, FPST_FPCR) 4065 4066 static gen_helper_gvec_3_ptr * const frint_fns[] = { 4067 NULL, 4068 gen_helper_sve_frint_h, 4069 gen_helper_sve_frint_s, 4070 gen_helper_sve_frint_d 4071 }; 4072 TRANS_FEAT(FRINTI, aa64_sve, gen_gvec_fpst_arg_zpz, frint_fns[a->esz], 4073 a, 0, a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR) 4074 4075 static gen_helper_gvec_3_ptr * const frintx_fns[] = { 4076 NULL, 4077 gen_helper_sve_frintx_h, 4078 gen_helper_sve_frintx_s, 4079 gen_helper_sve_frintx_d 4080 }; 4081 TRANS_FEAT(FRINTX, aa64_sve, gen_gvec_fpst_arg_zpz, frintx_fns[a->esz], 4082 a, 0, a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); 4083 4084 static bool do_frint_mode(DisasContext *s, arg_rpr_esz *a, 4085 ARMFPRounding mode, gen_helper_gvec_3_ptr *fn) 4086 { 4087 unsigned vsz; 4088 TCGv_i32 tmode; 4089 TCGv_ptr status; 4090 4091 if (fn == NULL) { 4092 return false; 4093 } 4094 if (!sve_access_check(s)) { 4095 return true; 4096 } 4097 4098 vsz = vec_full_reg_size(s); 4099 status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); 4100 tmode = gen_set_rmode(mode, status); 4101 4102 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, a->rd), 4103 vec_full_reg_offset(s, a->rn), 4104 pred_full_reg_offset(s, a->pg), 4105 status, vsz, vsz, 0, fn); 4106 4107 gen_restore_rmode(tmode, status); 4108 return true; 4109 } 4110 4111 TRANS_FEAT(FRINTN, aa64_sve, do_frint_mode, a, 4112 FPROUNDING_TIEEVEN, frint_fns[a->esz]) 4113 TRANS_FEAT(FRINTP, aa64_sve, do_frint_mode, a, 4114 FPROUNDING_POSINF, frint_fns[a->esz]) 4115 TRANS_FEAT(FRINTM, aa64_sve, do_frint_mode, a, 4116 FPROUNDING_NEGINF, frint_fns[a->esz]) 4117 TRANS_FEAT(FRINTZ, aa64_sve, do_frint_mode, a, 4118 FPROUNDING_ZERO, frint_fns[a->esz]) 4119 TRANS_FEAT(FRINTA, aa64_sve, do_frint_mode, a, 4120 FPROUNDING_TIEAWAY, frint_fns[a->esz]) 4121 4122 static gen_helper_gvec_3_ptr * const frecpx_fns[] = { 4123 NULL, gen_helper_sve_frecpx_h, 4124 gen_helper_sve_frecpx_s, gen_helper_sve_frecpx_d, 4125 }; 4126 TRANS_FEAT(FRECPX, aa64_sve, gen_gvec_fpst_arg_zpz, frecpx_fns[a->esz], 4127 a, 0, a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR) 4128 4129 static gen_helper_gvec_3_ptr * const fsqrt_fns[] = { 4130 NULL, gen_helper_sve_fsqrt_h, 4131 gen_helper_sve_fsqrt_s, gen_helper_sve_fsqrt_d, 4132 }; 4133 TRANS_FEAT(FSQRT, aa64_sve, gen_gvec_fpst_arg_zpz, fsqrt_fns[a->esz], 4134 a, 0, a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR) 4135 4136 TRANS_FEAT(SCVTF_hh, aa64_sve, gen_gvec_fpst_arg_zpz, 4137 gen_helper_sve_scvt_hh, a, 0, FPST_FPCR_F16) 4138 TRANS_FEAT(SCVTF_sh, aa64_sve, gen_gvec_fpst_arg_zpz, 4139 gen_helper_sve_scvt_sh, a, 0, FPST_FPCR_F16) 4140 TRANS_FEAT(SCVTF_dh, aa64_sve, gen_gvec_fpst_arg_zpz, 4141 gen_helper_sve_scvt_dh, a, 0, FPST_FPCR_F16) 4142 4143 TRANS_FEAT(SCVTF_ss, aa64_sve, gen_gvec_fpst_arg_zpz, 4144 gen_helper_sve_scvt_ss, a, 0, FPST_FPCR) 4145 TRANS_FEAT(SCVTF_ds, aa64_sve, gen_gvec_fpst_arg_zpz, 4146 gen_helper_sve_scvt_ds, a, 0, FPST_FPCR) 4147 4148 TRANS_FEAT(SCVTF_sd, aa64_sve, gen_gvec_fpst_arg_zpz, 4149 gen_helper_sve_scvt_sd, a, 0, FPST_FPCR) 4150 TRANS_FEAT(SCVTF_dd, aa64_sve, gen_gvec_fpst_arg_zpz, 4151 gen_helper_sve_scvt_dd, a, 0, FPST_FPCR) 4152 4153 TRANS_FEAT(UCVTF_hh, aa64_sve, gen_gvec_fpst_arg_zpz, 4154 gen_helper_sve_ucvt_hh, a, 0, FPST_FPCR_F16) 4155 TRANS_FEAT(UCVTF_sh, aa64_sve, gen_gvec_fpst_arg_zpz, 4156 gen_helper_sve_ucvt_sh, a, 0, FPST_FPCR_F16) 4157 TRANS_FEAT(UCVTF_dh, aa64_sve, gen_gvec_fpst_arg_zpz, 4158 gen_helper_sve_ucvt_dh, a, 0, FPST_FPCR_F16) 4159 4160 TRANS_FEAT(UCVTF_ss, aa64_sve, gen_gvec_fpst_arg_zpz, 4161 gen_helper_sve_ucvt_ss, a, 0, FPST_FPCR) 4162 TRANS_FEAT(UCVTF_ds, aa64_sve, gen_gvec_fpst_arg_zpz, 4163 gen_helper_sve_ucvt_ds, a, 0, FPST_FPCR) 4164 TRANS_FEAT(UCVTF_sd, aa64_sve, gen_gvec_fpst_arg_zpz, 4165 gen_helper_sve_ucvt_sd, a, 0, FPST_FPCR) 4166 4167 TRANS_FEAT(UCVTF_dd, aa64_sve, gen_gvec_fpst_arg_zpz, 4168 gen_helper_sve_ucvt_dd, a, 0, FPST_FPCR) 4169 4170 /* 4171 *** SVE Memory - 32-bit Gather and Unsized Contiguous Group 4172 */ 4173 4174 /* Subroutine loading a vector register at VOFS of LEN bytes. 4175 * The load should begin at the address Rn + IMM. 4176 */ 4177 4178 void gen_sve_ldr(DisasContext *s, TCGv_ptr base, int vofs, 4179 int len, int rn, int imm) 4180 { 4181 int len_align = QEMU_ALIGN_DOWN(len, 8); 4182 int len_remain = len % 8; 4183 int nparts = len / 8 + ctpop8(len_remain); 4184 int midx = get_mem_index(s); 4185 TCGv_i64 dirty_addr, clean_addr, t0, t1; 4186 4187 dirty_addr = tcg_temp_new_i64(); 4188 tcg_gen_addi_i64(dirty_addr, cpu_reg_sp(s, rn), imm); 4189 clean_addr = gen_mte_checkN(s, dirty_addr, false, rn != 31, len); 4190 4191 /* 4192 * Note that unpredicated load/store of vector/predicate registers 4193 * are defined as a stream of bytes, which equates to little-endian 4194 * operations on larger quantities. 4195 * Attempt to keep code expansion to a minimum by limiting the 4196 * amount of unrolling done. 4197 */ 4198 if (nparts <= 4) { 4199 int i; 4200 4201 t0 = tcg_temp_new_i64(); 4202 for (i = 0; i < len_align; i += 8) { 4203 tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEUQ); 4204 tcg_gen_st_i64(t0, base, vofs + i); 4205 tcg_gen_addi_i64(clean_addr, clean_addr, 8); 4206 } 4207 } else { 4208 TCGLabel *loop = gen_new_label(); 4209 TCGv_ptr tp, i = tcg_temp_new_ptr(); 4210 4211 tcg_gen_movi_ptr(i, 0); 4212 gen_set_label(loop); 4213 4214 t0 = tcg_temp_new_i64(); 4215 tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEUQ); 4216 tcg_gen_addi_i64(clean_addr, clean_addr, 8); 4217 4218 tp = tcg_temp_new_ptr(); 4219 tcg_gen_add_ptr(tp, base, i); 4220 tcg_gen_addi_ptr(i, i, 8); 4221 tcg_gen_st_i64(t0, tp, vofs); 4222 4223 tcg_gen_brcondi_ptr(TCG_COND_LTU, i, len_align, loop); 4224 } 4225 4226 /* 4227 * Predicate register loads can be any multiple of 2. 4228 * Note that we still store the entire 64-bit unit into cpu_env. 4229 */ 4230 if (len_remain) { 4231 t0 = tcg_temp_new_i64(); 4232 switch (len_remain) { 4233 case 2: 4234 case 4: 4235 case 8: 4236 tcg_gen_qemu_ld_i64(t0, clean_addr, midx, 4237 MO_LE | ctz32(len_remain)); 4238 break; 4239 4240 case 6: 4241 t1 = tcg_temp_new_i64(); 4242 tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEUL); 4243 tcg_gen_addi_i64(clean_addr, clean_addr, 4); 4244 tcg_gen_qemu_ld_i64(t1, clean_addr, midx, MO_LEUW); 4245 tcg_gen_deposit_i64(t0, t0, t1, 32, 32); 4246 break; 4247 4248 default: 4249 g_assert_not_reached(); 4250 } 4251 tcg_gen_st_i64(t0, base, vofs + len_align); 4252 } 4253 } 4254 4255 /* Similarly for stores. */ 4256 void gen_sve_str(DisasContext *s, TCGv_ptr base, int vofs, 4257 int len, int rn, int imm) 4258 { 4259 int len_align = QEMU_ALIGN_DOWN(len, 8); 4260 int len_remain = len % 8; 4261 int nparts = len / 8 + ctpop8(len_remain); 4262 int midx = get_mem_index(s); 4263 TCGv_i64 dirty_addr, clean_addr, t0; 4264 4265 dirty_addr = tcg_temp_new_i64(); 4266 tcg_gen_addi_i64(dirty_addr, cpu_reg_sp(s, rn), imm); 4267 clean_addr = gen_mte_checkN(s, dirty_addr, false, rn != 31, len); 4268 4269 /* Note that unpredicated load/store of vector/predicate registers 4270 * are defined as a stream of bytes, which equates to little-endian 4271 * operations on larger quantities. There is no nice way to force 4272 * a little-endian store for aarch64_be-linux-user out of line. 4273 * 4274 * Attempt to keep code expansion to a minimum by limiting the 4275 * amount of unrolling done. 4276 */ 4277 if (nparts <= 4) { 4278 int i; 4279 4280 t0 = tcg_temp_new_i64(); 4281 for (i = 0; i < len_align; i += 8) { 4282 tcg_gen_ld_i64(t0, base, vofs + i); 4283 tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUQ); 4284 tcg_gen_addi_i64(clean_addr, clean_addr, 8); 4285 } 4286 } else { 4287 TCGLabel *loop = gen_new_label(); 4288 TCGv_ptr tp, i = tcg_temp_new_ptr(); 4289 4290 tcg_gen_movi_ptr(i, 0); 4291 gen_set_label(loop); 4292 4293 t0 = tcg_temp_new_i64(); 4294 tp = tcg_temp_new_ptr(); 4295 tcg_gen_add_ptr(tp, base, i); 4296 tcg_gen_ld_i64(t0, tp, vofs); 4297 tcg_gen_addi_ptr(i, i, 8); 4298 4299 tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUQ); 4300 tcg_gen_addi_i64(clean_addr, clean_addr, 8); 4301 4302 tcg_gen_brcondi_ptr(TCG_COND_LTU, i, len_align, loop); 4303 } 4304 4305 /* Predicate register stores can be any multiple of 2. */ 4306 if (len_remain) { 4307 t0 = tcg_temp_new_i64(); 4308 tcg_gen_ld_i64(t0, base, vofs + len_align); 4309 4310 switch (len_remain) { 4311 case 2: 4312 case 4: 4313 case 8: 4314 tcg_gen_qemu_st_i64(t0, clean_addr, midx, 4315 MO_LE | ctz32(len_remain)); 4316 break; 4317 4318 case 6: 4319 tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUL); 4320 tcg_gen_addi_i64(clean_addr, clean_addr, 4); 4321 tcg_gen_shri_i64(t0, t0, 32); 4322 tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUW); 4323 break; 4324 4325 default: 4326 g_assert_not_reached(); 4327 } 4328 } 4329 } 4330 4331 static bool trans_LDR_zri(DisasContext *s, arg_rri *a) 4332 { 4333 if (!dc_isar_feature(aa64_sve, s)) { 4334 return false; 4335 } 4336 if (sve_access_check(s)) { 4337 int size = vec_full_reg_size(s); 4338 int off = vec_full_reg_offset(s, a->rd); 4339 gen_sve_ldr(s, cpu_env, off, size, a->rn, a->imm * size); 4340 } 4341 return true; 4342 } 4343 4344 static bool trans_LDR_pri(DisasContext *s, arg_rri *a) 4345 { 4346 if (!dc_isar_feature(aa64_sve, s)) { 4347 return false; 4348 } 4349 if (sve_access_check(s)) { 4350 int size = pred_full_reg_size(s); 4351 int off = pred_full_reg_offset(s, a->rd); 4352 gen_sve_ldr(s, cpu_env, off, size, a->rn, a->imm * size); 4353 } 4354 return true; 4355 } 4356 4357 static bool trans_STR_zri(DisasContext *s, arg_rri *a) 4358 { 4359 if (!dc_isar_feature(aa64_sve, s)) { 4360 return false; 4361 } 4362 if (sve_access_check(s)) { 4363 int size = vec_full_reg_size(s); 4364 int off = vec_full_reg_offset(s, a->rd); 4365 gen_sve_str(s, cpu_env, off, size, a->rn, a->imm * size); 4366 } 4367 return true; 4368 } 4369 4370 static bool trans_STR_pri(DisasContext *s, arg_rri *a) 4371 { 4372 if (!dc_isar_feature(aa64_sve, s)) { 4373 return false; 4374 } 4375 if (sve_access_check(s)) { 4376 int size = pred_full_reg_size(s); 4377 int off = pred_full_reg_offset(s, a->rd); 4378 gen_sve_str(s, cpu_env, off, size, a->rn, a->imm * size); 4379 } 4380 return true; 4381 } 4382 4383 /* 4384 *** SVE Memory - Contiguous Load Group 4385 */ 4386 4387 /* The memory mode of the dtype. */ 4388 static const MemOp dtype_mop[16] = { 4389 MO_UB, MO_UB, MO_UB, MO_UB, 4390 MO_SL, MO_UW, MO_UW, MO_UW, 4391 MO_SW, MO_SW, MO_UL, MO_UL, 4392 MO_SB, MO_SB, MO_SB, MO_UQ 4393 }; 4394 4395 #define dtype_msz(x) (dtype_mop[x] & MO_SIZE) 4396 4397 /* The vector element size of dtype. */ 4398 static const uint8_t dtype_esz[16] = { 4399 0, 1, 2, 3, 4400 3, 1, 2, 3, 4401 3, 2, 2, 3, 4402 3, 2, 1, 3 4403 }; 4404 4405 static void do_mem_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr, 4406 int dtype, uint32_t mte_n, bool is_write, 4407 gen_helper_gvec_mem *fn) 4408 { 4409 unsigned vsz = vec_full_reg_size(s); 4410 TCGv_ptr t_pg; 4411 int desc = 0; 4412 4413 /* 4414 * For e.g. LD4, there are not enough arguments to pass all 4 4415 * registers as pointers, so encode the regno into the data field. 4416 * For consistency, do this even for LD1. 4417 */ 4418 if (s->mte_active[0]) { 4419 int msz = dtype_msz(dtype); 4420 4421 desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s)); 4422 desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid); 4423 desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma); 4424 desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write); 4425 desc = FIELD_DP32(desc, MTEDESC, SIZEM1, (mte_n << msz) - 1); 4426 desc <<= SVE_MTEDESC_SHIFT; 4427 } else { 4428 addr = clean_data_tbi(s, addr); 4429 } 4430 4431 desc = simd_desc(vsz, vsz, zt | desc); 4432 t_pg = tcg_temp_new_ptr(); 4433 4434 tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg)); 4435 fn(cpu_env, t_pg, addr, tcg_constant_i32(desc)); 4436 } 4437 4438 /* Indexed by [mte][be][dtype][nreg] */ 4439 static gen_helper_gvec_mem * const ldr_fns[2][2][16][4] = { 4440 { /* mte inactive, little-endian */ 4441 { { gen_helper_sve_ld1bb_r, gen_helper_sve_ld2bb_r, 4442 gen_helper_sve_ld3bb_r, gen_helper_sve_ld4bb_r }, 4443 { gen_helper_sve_ld1bhu_r, NULL, NULL, NULL }, 4444 { gen_helper_sve_ld1bsu_r, NULL, NULL, NULL }, 4445 { gen_helper_sve_ld1bdu_r, NULL, NULL, NULL }, 4446 4447 { gen_helper_sve_ld1sds_le_r, NULL, NULL, NULL }, 4448 { gen_helper_sve_ld1hh_le_r, gen_helper_sve_ld2hh_le_r, 4449 gen_helper_sve_ld3hh_le_r, gen_helper_sve_ld4hh_le_r }, 4450 { gen_helper_sve_ld1hsu_le_r, NULL, NULL, NULL }, 4451 { gen_helper_sve_ld1hdu_le_r, NULL, NULL, NULL }, 4452 4453 { gen_helper_sve_ld1hds_le_r, NULL, NULL, NULL }, 4454 { gen_helper_sve_ld1hss_le_r, NULL, NULL, NULL }, 4455 { gen_helper_sve_ld1ss_le_r, gen_helper_sve_ld2ss_le_r, 4456 gen_helper_sve_ld3ss_le_r, gen_helper_sve_ld4ss_le_r }, 4457 { gen_helper_sve_ld1sdu_le_r, NULL, NULL, NULL }, 4458 4459 { gen_helper_sve_ld1bds_r, NULL, NULL, NULL }, 4460 { gen_helper_sve_ld1bss_r, NULL, NULL, NULL }, 4461 { gen_helper_sve_ld1bhs_r, NULL, NULL, NULL }, 4462 { gen_helper_sve_ld1dd_le_r, gen_helper_sve_ld2dd_le_r, 4463 gen_helper_sve_ld3dd_le_r, gen_helper_sve_ld4dd_le_r } }, 4464 4465 /* mte inactive, big-endian */ 4466 { { gen_helper_sve_ld1bb_r, gen_helper_sve_ld2bb_r, 4467 gen_helper_sve_ld3bb_r, gen_helper_sve_ld4bb_r }, 4468 { gen_helper_sve_ld1bhu_r, NULL, NULL, NULL }, 4469 { gen_helper_sve_ld1bsu_r, NULL, NULL, NULL }, 4470 { gen_helper_sve_ld1bdu_r, NULL, NULL, NULL }, 4471 4472 { gen_helper_sve_ld1sds_be_r, NULL, NULL, NULL }, 4473 { gen_helper_sve_ld1hh_be_r, gen_helper_sve_ld2hh_be_r, 4474 gen_helper_sve_ld3hh_be_r, gen_helper_sve_ld4hh_be_r }, 4475 { gen_helper_sve_ld1hsu_be_r, NULL, NULL, NULL }, 4476 { gen_helper_sve_ld1hdu_be_r, NULL, NULL, NULL }, 4477 4478 { gen_helper_sve_ld1hds_be_r, NULL, NULL, NULL }, 4479 { gen_helper_sve_ld1hss_be_r, NULL, NULL, NULL }, 4480 { gen_helper_sve_ld1ss_be_r, gen_helper_sve_ld2ss_be_r, 4481 gen_helper_sve_ld3ss_be_r, gen_helper_sve_ld4ss_be_r }, 4482 { gen_helper_sve_ld1sdu_be_r, NULL, NULL, NULL }, 4483 4484 { gen_helper_sve_ld1bds_r, NULL, NULL, NULL }, 4485 { gen_helper_sve_ld1bss_r, NULL, NULL, NULL }, 4486 { gen_helper_sve_ld1bhs_r, NULL, NULL, NULL }, 4487 { gen_helper_sve_ld1dd_be_r, gen_helper_sve_ld2dd_be_r, 4488 gen_helper_sve_ld3dd_be_r, gen_helper_sve_ld4dd_be_r } } }, 4489 4490 { /* mte active, little-endian */ 4491 { { gen_helper_sve_ld1bb_r_mte, 4492 gen_helper_sve_ld2bb_r_mte, 4493 gen_helper_sve_ld3bb_r_mte, 4494 gen_helper_sve_ld4bb_r_mte }, 4495 { gen_helper_sve_ld1bhu_r_mte, NULL, NULL, NULL }, 4496 { gen_helper_sve_ld1bsu_r_mte, NULL, NULL, NULL }, 4497 { gen_helper_sve_ld1bdu_r_mte, NULL, NULL, NULL }, 4498 4499 { gen_helper_sve_ld1sds_le_r_mte, NULL, NULL, NULL }, 4500 { gen_helper_sve_ld1hh_le_r_mte, 4501 gen_helper_sve_ld2hh_le_r_mte, 4502 gen_helper_sve_ld3hh_le_r_mte, 4503 gen_helper_sve_ld4hh_le_r_mte }, 4504 { gen_helper_sve_ld1hsu_le_r_mte, NULL, NULL, NULL }, 4505 { gen_helper_sve_ld1hdu_le_r_mte, NULL, NULL, NULL }, 4506 4507 { gen_helper_sve_ld1hds_le_r_mte, NULL, NULL, NULL }, 4508 { gen_helper_sve_ld1hss_le_r_mte, NULL, NULL, NULL }, 4509 { gen_helper_sve_ld1ss_le_r_mte, 4510 gen_helper_sve_ld2ss_le_r_mte, 4511 gen_helper_sve_ld3ss_le_r_mte, 4512 gen_helper_sve_ld4ss_le_r_mte }, 4513 { gen_helper_sve_ld1sdu_le_r_mte, NULL, NULL, NULL }, 4514 4515 { gen_helper_sve_ld1bds_r_mte, NULL, NULL, NULL }, 4516 { gen_helper_sve_ld1bss_r_mte, NULL, NULL, NULL }, 4517 { gen_helper_sve_ld1bhs_r_mte, NULL, NULL, NULL }, 4518 { gen_helper_sve_ld1dd_le_r_mte, 4519 gen_helper_sve_ld2dd_le_r_mte, 4520 gen_helper_sve_ld3dd_le_r_mte, 4521 gen_helper_sve_ld4dd_le_r_mte } }, 4522 4523 /* mte active, big-endian */ 4524 { { gen_helper_sve_ld1bb_r_mte, 4525 gen_helper_sve_ld2bb_r_mte, 4526 gen_helper_sve_ld3bb_r_mte, 4527 gen_helper_sve_ld4bb_r_mte }, 4528 { gen_helper_sve_ld1bhu_r_mte, NULL, NULL, NULL }, 4529 { gen_helper_sve_ld1bsu_r_mte, NULL, NULL, NULL }, 4530 { gen_helper_sve_ld1bdu_r_mte, NULL, NULL, NULL }, 4531 4532 { gen_helper_sve_ld1sds_be_r_mte, NULL, NULL, NULL }, 4533 { gen_helper_sve_ld1hh_be_r_mte, 4534 gen_helper_sve_ld2hh_be_r_mte, 4535 gen_helper_sve_ld3hh_be_r_mte, 4536 gen_helper_sve_ld4hh_be_r_mte }, 4537 { gen_helper_sve_ld1hsu_be_r_mte, NULL, NULL, NULL }, 4538 { gen_helper_sve_ld1hdu_be_r_mte, NULL, NULL, NULL }, 4539 4540 { gen_helper_sve_ld1hds_be_r_mte, NULL, NULL, NULL }, 4541 { gen_helper_sve_ld1hss_be_r_mte, NULL, NULL, NULL }, 4542 { gen_helper_sve_ld1ss_be_r_mte, 4543 gen_helper_sve_ld2ss_be_r_mte, 4544 gen_helper_sve_ld3ss_be_r_mte, 4545 gen_helper_sve_ld4ss_be_r_mte }, 4546 { gen_helper_sve_ld1sdu_be_r_mte, NULL, NULL, NULL }, 4547 4548 { gen_helper_sve_ld1bds_r_mte, NULL, NULL, NULL }, 4549 { gen_helper_sve_ld1bss_r_mte, NULL, NULL, NULL }, 4550 { gen_helper_sve_ld1bhs_r_mte, NULL, NULL, NULL }, 4551 { gen_helper_sve_ld1dd_be_r_mte, 4552 gen_helper_sve_ld2dd_be_r_mte, 4553 gen_helper_sve_ld3dd_be_r_mte, 4554 gen_helper_sve_ld4dd_be_r_mte } } }, 4555 }; 4556 4557 static void do_ld_zpa(DisasContext *s, int zt, int pg, 4558 TCGv_i64 addr, int dtype, int nreg) 4559 { 4560 gen_helper_gvec_mem *fn 4561 = ldr_fns[s->mte_active[0]][s->be_data == MO_BE][dtype][nreg]; 4562 4563 /* 4564 * While there are holes in the table, they are not 4565 * accessible via the instruction encoding. 4566 */ 4567 assert(fn != NULL); 4568 do_mem_zpa(s, zt, pg, addr, dtype, nreg, false, fn); 4569 } 4570 4571 static bool trans_LD_zprr(DisasContext *s, arg_rprr_load *a) 4572 { 4573 if (a->rm == 31 || !dc_isar_feature(aa64_sve, s)) { 4574 return false; 4575 } 4576 if (sve_access_check(s)) { 4577 TCGv_i64 addr = tcg_temp_new_i64(); 4578 tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype)); 4579 tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn)); 4580 do_ld_zpa(s, a->rd, a->pg, addr, a->dtype, a->nreg); 4581 } 4582 return true; 4583 } 4584 4585 static bool trans_LD_zpri(DisasContext *s, arg_rpri_load *a) 4586 { 4587 if (!dc_isar_feature(aa64_sve, s)) { 4588 return false; 4589 } 4590 if (sve_access_check(s)) { 4591 int vsz = vec_full_reg_size(s); 4592 int elements = vsz >> dtype_esz[a->dtype]; 4593 TCGv_i64 addr = tcg_temp_new_i64(); 4594 4595 tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), 4596 (a->imm * elements * (a->nreg + 1)) 4597 << dtype_msz(a->dtype)); 4598 do_ld_zpa(s, a->rd, a->pg, addr, a->dtype, a->nreg); 4599 } 4600 return true; 4601 } 4602 4603 static bool trans_LDFF1_zprr(DisasContext *s, arg_rprr_load *a) 4604 { 4605 static gen_helper_gvec_mem * const fns[2][2][16] = { 4606 { /* mte inactive, little-endian */ 4607 { gen_helper_sve_ldff1bb_r, 4608 gen_helper_sve_ldff1bhu_r, 4609 gen_helper_sve_ldff1bsu_r, 4610 gen_helper_sve_ldff1bdu_r, 4611 4612 gen_helper_sve_ldff1sds_le_r, 4613 gen_helper_sve_ldff1hh_le_r, 4614 gen_helper_sve_ldff1hsu_le_r, 4615 gen_helper_sve_ldff1hdu_le_r, 4616 4617 gen_helper_sve_ldff1hds_le_r, 4618 gen_helper_sve_ldff1hss_le_r, 4619 gen_helper_sve_ldff1ss_le_r, 4620 gen_helper_sve_ldff1sdu_le_r, 4621 4622 gen_helper_sve_ldff1bds_r, 4623 gen_helper_sve_ldff1bss_r, 4624 gen_helper_sve_ldff1bhs_r, 4625 gen_helper_sve_ldff1dd_le_r }, 4626 4627 /* mte inactive, big-endian */ 4628 { gen_helper_sve_ldff1bb_r, 4629 gen_helper_sve_ldff1bhu_r, 4630 gen_helper_sve_ldff1bsu_r, 4631 gen_helper_sve_ldff1bdu_r, 4632 4633 gen_helper_sve_ldff1sds_be_r, 4634 gen_helper_sve_ldff1hh_be_r, 4635 gen_helper_sve_ldff1hsu_be_r, 4636 gen_helper_sve_ldff1hdu_be_r, 4637 4638 gen_helper_sve_ldff1hds_be_r, 4639 gen_helper_sve_ldff1hss_be_r, 4640 gen_helper_sve_ldff1ss_be_r, 4641 gen_helper_sve_ldff1sdu_be_r, 4642 4643 gen_helper_sve_ldff1bds_r, 4644 gen_helper_sve_ldff1bss_r, 4645 gen_helper_sve_ldff1bhs_r, 4646 gen_helper_sve_ldff1dd_be_r } }, 4647 4648 { /* mte active, little-endian */ 4649 { gen_helper_sve_ldff1bb_r_mte, 4650 gen_helper_sve_ldff1bhu_r_mte, 4651 gen_helper_sve_ldff1bsu_r_mte, 4652 gen_helper_sve_ldff1bdu_r_mte, 4653 4654 gen_helper_sve_ldff1sds_le_r_mte, 4655 gen_helper_sve_ldff1hh_le_r_mte, 4656 gen_helper_sve_ldff1hsu_le_r_mte, 4657 gen_helper_sve_ldff1hdu_le_r_mte, 4658 4659 gen_helper_sve_ldff1hds_le_r_mte, 4660 gen_helper_sve_ldff1hss_le_r_mte, 4661 gen_helper_sve_ldff1ss_le_r_mte, 4662 gen_helper_sve_ldff1sdu_le_r_mte, 4663 4664 gen_helper_sve_ldff1bds_r_mte, 4665 gen_helper_sve_ldff1bss_r_mte, 4666 gen_helper_sve_ldff1bhs_r_mte, 4667 gen_helper_sve_ldff1dd_le_r_mte }, 4668 4669 /* mte active, big-endian */ 4670 { gen_helper_sve_ldff1bb_r_mte, 4671 gen_helper_sve_ldff1bhu_r_mte, 4672 gen_helper_sve_ldff1bsu_r_mte, 4673 gen_helper_sve_ldff1bdu_r_mte, 4674 4675 gen_helper_sve_ldff1sds_be_r_mte, 4676 gen_helper_sve_ldff1hh_be_r_mte, 4677 gen_helper_sve_ldff1hsu_be_r_mte, 4678 gen_helper_sve_ldff1hdu_be_r_mte, 4679 4680 gen_helper_sve_ldff1hds_be_r_mte, 4681 gen_helper_sve_ldff1hss_be_r_mte, 4682 gen_helper_sve_ldff1ss_be_r_mte, 4683 gen_helper_sve_ldff1sdu_be_r_mte, 4684 4685 gen_helper_sve_ldff1bds_r_mte, 4686 gen_helper_sve_ldff1bss_r_mte, 4687 gen_helper_sve_ldff1bhs_r_mte, 4688 gen_helper_sve_ldff1dd_be_r_mte } }, 4689 }; 4690 4691 if (!dc_isar_feature(aa64_sve, s)) { 4692 return false; 4693 } 4694 s->is_nonstreaming = true; 4695 if (sve_access_check(s)) { 4696 TCGv_i64 addr = tcg_temp_new_i64(); 4697 tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype)); 4698 tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn)); 4699 do_mem_zpa(s, a->rd, a->pg, addr, a->dtype, 1, false, 4700 fns[s->mte_active[0]][s->be_data == MO_BE][a->dtype]); 4701 } 4702 return true; 4703 } 4704 4705 static bool trans_LDNF1_zpri(DisasContext *s, arg_rpri_load *a) 4706 { 4707 static gen_helper_gvec_mem * const fns[2][2][16] = { 4708 { /* mte inactive, little-endian */ 4709 { gen_helper_sve_ldnf1bb_r, 4710 gen_helper_sve_ldnf1bhu_r, 4711 gen_helper_sve_ldnf1bsu_r, 4712 gen_helper_sve_ldnf1bdu_r, 4713 4714 gen_helper_sve_ldnf1sds_le_r, 4715 gen_helper_sve_ldnf1hh_le_r, 4716 gen_helper_sve_ldnf1hsu_le_r, 4717 gen_helper_sve_ldnf1hdu_le_r, 4718 4719 gen_helper_sve_ldnf1hds_le_r, 4720 gen_helper_sve_ldnf1hss_le_r, 4721 gen_helper_sve_ldnf1ss_le_r, 4722 gen_helper_sve_ldnf1sdu_le_r, 4723 4724 gen_helper_sve_ldnf1bds_r, 4725 gen_helper_sve_ldnf1bss_r, 4726 gen_helper_sve_ldnf1bhs_r, 4727 gen_helper_sve_ldnf1dd_le_r }, 4728 4729 /* mte inactive, big-endian */ 4730 { gen_helper_sve_ldnf1bb_r, 4731 gen_helper_sve_ldnf1bhu_r, 4732 gen_helper_sve_ldnf1bsu_r, 4733 gen_helper_sve_ldnf1bdu_r, 4734 4735 gen_helper_sve_ldnf1sds_be_r, 4736 gen_helper_sve_ldnf1hh_be_r, 4737 gen_helper_sve_ldnf1hsu_be_r, 4738 gen_helper_sve_ldnf1hdu_be_r, 4739 4740 gen_helper_sve_ldnf1hds_be_r, 4741 gen_helper_sve_ldnf1hss_be_r, 4742 gen_helper_sve_ldnf1ss_be_r, 4743 gen_helper_sve_ldnf1sdu_be_r, 4744 4745 gen_helper_sve_ldnf1bds_r, 4746 gen_helper_sve_ldnf1bss_r, 4747 gen_helper_sve_ldnf1bhs_r, 4748 gen_helper_sve_ldnf1dd_be_r } }, 4749 4750 { /* mte inactive, little-endian */ 4751 { gen_helper_sve_ldnf1bb_r_mte, 4752 gen_helper_sve_ldnf1bhu_r_mte, 4753 gen_helper_sve_ldnf1bsu_r_mte, 4754 gen_helper_sve_ldnf1bdu_r_mte, 4755 4756 gen_helper_sve_ldnf1sds_le_r_mte, 4757 gen_helper_sve_ldnf1hh_le_r_mte, 4758 gen_helper_sve_ldnf1hsu_le_r_mte, 4759 gen_helper_sve_ldnf1hdu_le_r_mte, 4760 4761 gen_helper_sve_ldnf1hds_le_r_mte, 4762 gen_helper_sve_ldnf1hss_le_r_mte, 4763 gen_helper_sve_ldnf1ss_le_r_mte, 4764 gen_helper_sve_ldnf1sdu_le_r_mte, 4765 4766 gen_helper_sve_ldnf1bds_r_mte, 4767 gen_helper_sve_ldnf1bss_r_mte, 4768 gen_helper_sve_ldnf1bhs_r_mte, 4769 gen_helper_sve_ldnf1dd_le_r_mte }, 4770 4771 /* mte inactive, big-endian */ 4772 { gen_helper_sve_ldnf1bb_r_mte, 4773 gen_helper_sve_ldnf1bhu_r_mte, 4774 gen_helper_sve_ldnf1bsu_r_mte, 4775 gen_helper_sve_ldnf1bdu_r_mte, 4776 4777 gen_helper_sve_ldnf1sds_be_r_mte, 4778 gen_helper_sve_ldnf1hh_be_r_mte, 4779 gen_helper_sve_ldnf1hsu_be_r_mte, 4780 gen_helper_sve_ldnf1hdu_be_r_mte, 4781 4782 gen_helper_sve_ldnf1hds_be_r_mte, 4783 gen_helper_sve_ldnf1hss_be_r_mte, 4784 gen_helper_sve_ldnf1ss_be_r_mte, 4785 gen_helper_sve_ldnf1sdu_be_r_mte, 4786 4787 gen_helper_sve_ldnf1bds_r_mte, 4788 gen_helper_sve_ldnf1bss_r_mte, 4789 gen_helper_sve_ldnf1bhs_r_mte, 4790 gen_helper_sve_ldnf1dd_be_r_mte } }, 4791 }; 4792 4793 if (!dc_isar_feature(aa64_sve, s)) { 4794 return false; 4795 } 4796 s->is_nonstreaming = true; 4797 if (sve_access_check(s)) { 4798 int vsz = vec_full_reg_size(s); 4799 int elements = vsz >> dtype_esz[a->dtype]; 4800 int off = (a->imm * elements) << dtype_msz(a->dtype); 4801 TCGv_i64 addr = tcg_temp_new_i64(); 4802 4803 tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), off); 4804 do_mem_zpa(s, a->rd, a->pg, addr, a->dtype, 1, false, 4805 fns[s->mte_active[0]][s->be_data == MO_BE][a->dtype]); 4806 } 4807 return true; 4808 } 4809 4810 static void do_ldrq(DisasContext *s, int zt, int pg, TCGv_i64 addr, int dtype) 4811 { 4812 unsigned vsz = vec_full_reg_size(s); 4813 TCGv_ptr t_pg; 4814 int poff; 4815 4816 /* Load the first quadword using the normal predicated load helpers. */ 4817 poff = pred_full_reg_offset(s, pg); 4818 if (vsz > 16) { 4819 /* 4820 * Zero-extend the first 16 bits of the predicate into a temporary. 4821 * This avoids triggering an assert making sure we don't have bits 4822 * set within a predicate beyond VQ, but we have lowered VQ to 1 4823 * for this load operation. 4824 */ 4825 TCGv_i64 tmp = tcg_temp_new_i64(); 4826 #if HOST_BIG_ENDIAN 4827 poff += 6; 4828 #endif 4829 tcg_gen_ld16u_i64(tmp, cpu_env, poff); 4830 4831 poff = offsetof(CPUARMState, vfp.preg_tmp); 4832 tcg_gen_st_i64(tmp, cpu_env, poff); 4833 } 4834 4835 t_pg = tcg_temp_new_ptr(); 4836 tcg_gen_addi_ptr(t_pg, cpu_env, poff); 4837 4838 gen_helper_gvec_mem *fn 4839 = ldr_fns[s->mte_active[0]][s->be_data == MO_BE][dtype][0]; 4840 fn(cpu_env, t_pg, addr, tcg_constant_i32(simd_desc(16, 16, zt))); 4841 4842 /* Replicate that first quadword. */ 4843 if (vsz > 16) { 4844 int doff = vec_full_reg_offset(s, zt); 4845 tcg_gen_gvec_dup_mem(4, doff + 16, doff, vsz - 16, vsz - 16); 4846 } 4847 } 4848 4849 static bool trans_LD1RQ_zprr(DisasContext *s, arg_rprr_load *a) 4850 { 4851 if (a->rm == 31 || !dc_isar_feature(aa64_sve, s)) { 4852 return false; 4853 } 4854 if (sve_access_check(s)) { 4855 int msz = dtype_msz(a->dtype); 4856 TCGv_i64 addr = tcg_temp_new_i64(); 4857 tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), msz); 4858 tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn)); 4859 do_ldrq(s, a->rd, a->pg, addr, a->dtype); 4860 } 4861 return true; 4862 } 4863 4864 static bool trans_LD1RQ_zpri(DisasContext *s, arg_rpri_load *a) 4865 { 4866 if (!dc_isar_feature(aa64_sve, s)) { 4867 return false; 4868 } 4869 if (sve_access_check(s)) { 4870 TCGv_i64 addr = tcg_temp_new_i64(); 4871 tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), a->imm * 16); 4872 do_ldrq(s, a->rd, a->pg, addr, a->dtype); 4873 } 4874 return true; 4875 } 4876 4877 static void do_ldro(DisasContext *s, int zt, int pg, TCGv_i64 addr, int dtype) 4878 { 4879 unsigned vsz = vec_full_reg_size(s); 4880 unsigned vsz_r32; 4881 TCGv_ptr t_pg; 4882 int poff, doff; 4883 4884 if (vsz < 32) { 4885 /* 4886 * Note that this UNDEFINED check comes after CheckSVEEnabled() 4887 * in the ARM pseudocode, which is the sve_access_check() done 4888 * in our caller. We should not now return false from the caller. 4889 */ 4890 unallocated_encoding(s); 4891 return; 4892 } 4893 4894 /* Load the first octaword using the normal predicated load helpers. */ 4895 4896 poff = pred_full_reg_offset(s, pg); 4897 if (vsz > 32) { 4898 /* 4899 * Zero-extend the first 32 bits of the predicate into a temporary. 4900 * This avoids triggering an assert making sure we don't have bits 4901 * set within a predicate beyond VQ, but we have lowered VQ to 2 4902 * for this load operation. 4903 */ 4904 TCGv_i64 tmp = tcg_temp_new_i64(); 4905 #if HOST_BIG_ENDIAN 4906 poff += 4; 4907 #endif 4908 tcg_gen_ld32u_i64(tmp, cpu_env, poff); 4909 4910 poff = offsetof(CPUARMState, vfp.preg_tmp); 4911 tcg_gen_st_i64(tmp, cpu_env, poff); 4912 } 4913 4914 t_pg = tcg_temp_new_ptr(); 4915 tcg_gen_addi_ptr(t_pg, cpu_env, poff); 4916 4917 gen_helper_gvec_mem *fn 4918 = ldr_fns[s->mte_active[0]][s->be_data == MO_BE][dtype][0]; 4919 fn(cpu_env, t_pg, addr, tcg_constant_i32(simd_desc(32, 32, zt))); 4920 4921 /* 4922 * Replicate that first octaword. 4923 * The replication happens in units of 32; if the full vector size 4924 * is not a multiple of 32, the final bits are zeroed. 4925 */ 4926 doff = vec_full_reg_offset(s, zt); 4927 vsz_r32 = QEMU_ALIGN_DOWN(vsz, 32); 4928 if (vsz >= 64) { 4929 tcg_gen_gvec_dup_mem(5, doff + 32, doff, vsz_r32 - 32, vsz_r32 - 32); 4930 } 4931 vsz -= vsz_r32; 4932 if (vsz) { 4933 tcg_gen_gvec_dup_imm(MO_64, doff + vsz_r32, vsz, vsz, 0); 4934 } 4935 } 4936 4937 static bool trans_LD1RO_zprr(DisasContext *s, arg_rprr_load *a) 4938 { 4939 if (!dc_isar_feature(aa64_sve_f64mm, s)) { 4940 return false; 4941 } 4942 if (a->rm == 31) { 4943 return false; 4944 } 4945 s->is_nonstreaming = true; 4946 if (sve_access_check(s)) { 4947 TCGv_i64 addr = tcg_temp_new_i64(); 4948 tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype)); 4949 tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn)); 4950 do_ldro(s, a->rd, a->pg, addr, a->dtype); 4951 } 4952 return true; 4953 } 4954 4955 static bool trans_LD1RO_zpri(DisasContext *s, arg_rpri_load *a) 4956 { 4957 if (!dc_isar_feature(aa64_sve_f64mm, s)) { 4958 return false; 4959 } 4960 s->is_nonstreaming = true; 4961 if (sve_access_check(s)) { 4962 TCGv_i64 addr = tcg_temp_new_i64(); 4963 tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), a->imm * 32); 4964 do_ldro(s, a->rd, a->pg, addr, a->dtype); 4965 } 4966 return true; 4967 } 4968 4969 /* Load and broadcast element. */ 4970 static bool trans_LD1R_zpri(DisasContext *s, arg_rpri_load *a) 4971 { 4972 unsigned vsz = vec_full_reg_size(s); 4973 unsigned psz = pred_full_reg_size(s); 4974 unsigned esz = dtype_esz[a->dtype]; 4975 unsigned msz = dtype_msz(a->dtype); 4976 TCGLabel *over; 4977 TCGv_i64 temp, clean_addr; 4978 4979 if (!dc_isar_feature(aa64_sve, s)) { 4980 return false; 4981 } 4982 if (!sve_access_check(s)) { 4983 return true; 4984 } 4985 4986 over = gen_new_label(); 4987 4988 /* If the guarding predicate has no bits set, no load occurs. */ 4989 if (psz <= 8) { 4990 /* Reduce the pred_esz_masks value simply to reduce the 4991 * size of the code generated here. 4992 */ 4993 uint64_t psz_mask = MAKE_64BIT_MASK(0, psz * 8); 4994 temp = tcg_temp_new_i64(); 4995 tcg_gen_ld_i64(temp, cpu_env, pred_full_reg_offset(s, a->pg)); 4996 tcg_gen_andi_i64(temp, temp, pred_esz_masks[esz] & psz_mask); 4997 tcg_gen_brcondi_i64(TCG_COND_EQ, temp, 0, over); 4998 } else { 4999 TCGv_i32 t32 = tcg_temp_new_i32(); 5000 find_last_active(s, t32, esz, a->pg); 5001 tcg_gen_brcondi_i32(TCG_COND_LT, t32, 0, over); 5002 } 5003 5004 /* Load the data. */ 5005 temp = tcg_temp_new_i64(); 5006 tcg_gen_addi_i64(temp, cpu_reg_sp(s, a->rn), a->imm << msz); 5007 clean_addr = gen_mte_check1(s, temp, false, true, msz); 5008 5009 tcg_gen_qemu_ld_i64(temp, clean_addr, get_mem_index(s), 5010 finalize_memop(s, dtype_mop[a->dtype])); 5011 5012 /* Broadcast to *all* elements. */ 5013 tcg_gen_gvec_dup_i64(esz, vec_full_reg_offset(s, a->rd), 5014 vsz, vsz, temp); 5015 5016 /* Zero the inactive elements. */ 5017 gen_set_label(over); 5018 return do_movz_zpz(s, a->rd, a->rd, a->pg, esz, false); 5019 } 5020 5021 static void do_st_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr, 5022 int msz, int esz, int nreg) 5023 { 5024 static gen_helper_gvec_mem * const fn_single[2][2][4][4] = { 5025 { { { gen_helper_sve_st1bb_r, 5026 gen_helper_sve_st1bh_r, 5027 gen_helper_sve_st1bs_r, 5028 gen_helper_sve_st1bd_r }, 5029 { NULL, 5030 gen_helper_sve_st1hh_le_r, 5031 gen_helper_sve_st1hs_le_r, 5032 gen_helper_sve_st1hd_le_r }, 5033 { NULL, NULL, 5034 gen_helper_sve_st1ss_le_r, 5035 gen_helper_sve_st1sd_le_r }, 5036 { NULL, NULL, NULL, 5037 gen_helper_sve_st1dd_le_r } }, 5038 { { gen_helper_sve_st1bb_r, 5039 gen_helper_sve_st1bh_r, 5040 gen_helper_sve_st1bs_r, 5041 gen_helper_sve_st1bd_r }, 5042 { NULL, 5043 gen_helper_sve_st1hh_be_r, 5044 gen_helper_sve_st1hs_be_r, 5045 gen_helper_sve_st1hd_be_r }, 5046 { NULL, NULL, 5047 gen_helper_sve_st1ss_be_r, 5048 gen_helper_sve_st1sd_be_r }, 5049 { NULL, NULL, NULL, 5050 gen_helper_sve_st1dd_be_r } } }, 5051 5052 { { { gen_helper_sve_st1bb_r_mte, 5053 gen_helper_sve_st1bh_r_mte, 5054 gen_helper_sve_st1bs_r_mte, 5055 gen_helper_sve_st1bd_r_mte }, 5056 { NULL, 5057 gen_helper_sve_st1hh_le_r_mte, 5058 gen_helper_sve_st1hs_le_r_mte, 5059 gen_helper_sve_st1hd_le_r_mte }, 5060 { NULL, NULL, 5061 gen_helper_sve_st1ss_le_r_mte, 5062 gen_helper_sve_st1sd_le_r_mte }, 5063 { NULL, NULL, NULL, 5064 gen_helper_sve_st1dd_le_r_mte } }, 5065 { { gen_helper_sve_st1bb_r_mte, 5066 gen_helper_sve_st1bh_r_mte, 5067 gen_helper_sve_st1bs_r_mte, 5068 gen_helper_sve_st1bd_r_mte }, 5069 { NULL, 5070 gen_helper_sve_st1hh_be_r_mte, 5071 gen_helper_sve_st1hs_be_r_mte, 5072 gen_helper_sve_st1hd_be_r_mte }, 5073 { NULL, NULL, 5074 gen_helper_sve_st1ss_be_r_mte, 5075 gen_helper_sve_st1sd_be_r_mte }, 5076 { NULL, NULL, NULL, 5077 gen_helper_sve_st1dd_be_r_mte } } }, 5078 }; 5079 static gen_helper_gvec_mem * const fn_multiple[2][2][3][4] = { 5080 { { { gen_helper_sve_st2bb_r, 5081 gen_helper_sve_st2hh_le_r, 5082 gen_helper_sve_st2ss_le_r, 5083 gen_helper_sve_st2dd_le_r }, 5084 { gen_helper_sve_st3bb_r, 5085 gen_helper_sve_st3hh_le_r, 5086 gen_helper_sve_st3ss_le_r, 5087 gen_helper_sve_st3dd_le_r }, 5088 { gen_helper_sve_st4bb_r, 5089 gen_helper_sve_st4hh_le_r, 5090 gen_helper_sve_st4ss_le_r, 5091 gen_helper_sve_st4dd_le_r } }, 5092 { { gen_helper_sve_st2bb_r, 5093 gen_helper_sve_st2hh_be_r, 5094 gen_helper_sve_st2ss_be_r, 5095 gen_helper_sve_st2dd_be_r }, 5096 { gen_helper_sve_st3bb_r, 5097 gen_helper_sve_st3hh_be_r, 5098 gen_helper_sve_st3ss_be_r, 5099 gen_helper_sve_st3dd_be_r }, 5100 { gen_helper_sve_st4bb_r, 5101 gen_helper_sve_st4hh_be_r, 5102 gen_helper_sve_st4ss_be_r, 5103 gen_helper_sve_st4dd_be_r } } }, 5104 { { { gen_helper_sve_st2bb_r_mte, 5105 gen_helper_sve_st2hh_le_r_mte, 5106 gen_helper_sve_st2ss_le_r_mte, 5107 gen_helper_sve_st2dd_le_r_mte }, 5108 { gen_helper_sve_st3bb_r_mte, 5109 gen_helper_sve_st3hh_le_r_mte, 5110 gen_helper_sve_st3ss_le_r_mte, 5111 gen_helper_sve_st3dd_le_r_mte }, 5112 { gen_helper_sve_st4bb_r_mte, 5113 gen_helper_sve_st4hh_le_r_mte, 5114 gen_helper_sve_st4ss_le_r_mte, 5115 gen_helper_sve_st4dd_le_r_mte } }, 5116 { { gen_helper_sve_st2bb_r_mte, 5117 gen_helper_sve_st2hh_be_r_mte, 5118 gen_helper_sve_st2ss_be_r_mte, 5119 gen_helper_sve_st2dd_be_r_mte }, 5120 { gen_helper_sve_st3bb_r_mte, 5121 gen_helper_sve_st3hh_be_r_mte, 5122 gen_helper_sve_st3ss_be_r_mte, 5123 gen_helper_sve_st3dd_be_r_mte }, 5124 { gen_helper_sve_st4bb_r_mte, 5125 gen_helper_sve_st4hh_be_r_mte, 5126 gen_helper_sve_st4ss_be_r_mte, 5127 gen_helper_sve_st4dd_be_r_mte } } }, 5128 }; 5129 gen_helper_gvec_mem *fn; 5130 int be = s->be_data == MO_BE; 5131 5132 if (nreg == 0) { 5133 /* ST1 */ 5134 fn = fn_single[s->mte_active[0]][be][msz][esz]; 5135 nreg = 1; 5136 } else { 5137 /* ST2, ST3, ST4 -- msz == esz, enforced by encoding */ 5138 assert(msz == esz); 5139 fn = fn_multiple[s->mte_active[0]][be][nreg - 1][msz]; 5140 } 5141 assert(fn != NULL); 5142 do_mem_zpa(s, zt, pg, addr, msz_dtype(s, msz), nreg, true, fn); 5143 } 5144 5145 static bool trans_ST_zprr(DisasContext *s, arg_rprr_store *a) 5146 { 5147 if (!dc_isar_feature(aa64_sve, s)) { 5148 return false; 5149 } 5150 if (a->rm == 31 || a->msz > a->esz) { 5151 return false; 5152 } 5153 if (sve_access_check(s)) { 5154 TCGv_i64 addr = tcg_temp_new_i64(); 5155 tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), a->msz); 5156 tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn)); 5157 do_st_zpa(s, a->rd, a->pg, addr, a->msz, a->esz, a->nreg); 5158 } 5159 return true; 5160 } 5161 5162 static bool trans_ST_zpri(DisasContext *s, arg_rpri_store *a) 5163 { 5164 if (!dc_isar_feature(aa64_sve, s)) { 5165 return false; 5166 } 5167 if (a->msz > a->esz) { 5168 return false; 5169 } 5170 if (sve_access_check(s)) { 5171 int vsz = vec_full_reg_size(s); 5172 int elements = vsz >> a->esz; 5173 TCGv_i64 addr = tcg_temp_new_i64(); 5174 5175 tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), 5176 (a->imm * elements * (a->nreg + 1)) << a->msz); 5177 do_st_zpa(s, a->rd, a->pg, addr, a->msz, a->esz, a->nreg); 5178 } 5179 return true; 5180 } 5181 5182 /* 5183 *** SVE gather loads / scatter stores 5184 */ 5185 5186 static void do_mem_zpz(DisasContext *s, int zt, int pg, int zm, 5187 int scale, TCGv_i64 scalar, int msz, bool is_write, 5188 gen_helper_gvec_mem_scatter *fn) 5189 { 5190 unsigned vsz = vec_full_reg_size(s); 5191 TCGv_ptr t_zm = tcg_temp_new_ptr(); 5192 TCGv_ptr t_pg = tcg_temp_new_ptr(); 5193 TCGv_ptr t_zt = tcg_temp_new_ptr(); 5194 int desc = 0; 5195 5196 if (s->mte_active[0]) { 5197 desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s)); 5198 desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid); 5199 desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma); 5200 desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write); 5201 desc = FIELD_DP32(desc, MTEDESC, SIZEM1, (1 << msz) - 1); 5202 desc <<= SVE_MTEDESC_SHIFT; 5203 } 5204 desc = simd_desc(vsz, vsz, desc | scale); 5205 5206 tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg)); 5207 tcg_gen_addi_ptr(t_zm, cpu_env, vec_full_reg_offset(s, zm)); 5208 tcg_gen_addi_ptr(t_zt, cpu_env, vec_full_reg_offset(s, zt)); 5209 fn(cpu_env, t_zt, t_pg, t_zm, scalar, tcg_constant_i32(desc)); 5210 } 5211 5212 /* Indexed by [mte][be][ff][xs][u][msz]. */ 5213 static gen_helper_gvec_mem_scatter * const 5214 gather_load_fn32[2][2][2][2][2][3] = { 5215 { /* MTE Inactive */ 5216 { /* Little-endian */ 5217 { { { gen_helper_sve_ldbss_zsu, 5218 gen_helper_sve_ldhss_le_zsu, 5219 NULL, }, 5220 { gen_helper_sve_ldbsu_zsu, 5221 gen_helper_sve_ldhsu_le_zsu, 5222 gen_helper_sve_ldss_le_zsu, } }, 5223 { { gen_helper_sve_ldbss_zss, 5224 gen_helper_sve_ldhss_le_zss, 5225 NULL, }, 5226 { gen_helper_sve_ldbsu_zss, 5227 gen_helper_sve_ldhsu_le_zss, 5228 gen_helper_sve_ldss_le_zss, } } }, 5229 5230 /* First-fault */ 5231 { { { gen_helper_sve_ldffbss_zsu, 5232 gen_helper_sve_ldffhss_le_zsu, 5233 NULL, }, 5234 { gen_helper_sve_ldffbsu_zsu, 5235 gen_helper_sve_ldffhsu_le_zsu, 5236 gen_helper_sve_ldffss_le_zsu, } }, 5237 { { gen_helper_sve_ldffbss_zss, 5238 gen_helper_sve_ldffhss_le_zss, 5239 NULL, }, 5240 { gen_helper_sve_ldffbsu_zss, 5241 gen_helper_sve_ldffhsu_le_zss, 5242 gen_helper_sve_ldffss_le_zss, } } } }, 5243 5244 { /* Big-endian */ 5245 { { { gen_helper_sve_ldbss_zsu, 5246 gen_helper_sve_ldhss_be_zsu, 5247 NULL, }, 5248 { gen_helper_sve_ldbsu_zsu, 5249 gen_helper_sve_ldhsu_be_zsu, 5250 gen_helper_sve_ldss_be_zsu, } }, 5251 { { gen_helper_sve_ldbss_zss, 5252 gen_helper_sve_ldhss_be_zss, 5253 NULL, }, 5254 { gen_helper_sve_ldbsu_zss, 5255 gen_helper_sve_ldhsu_be_zss, 5256 gen_helper_sve_ldss_be_zss, } } }, 5257 5258 /* First-fault */ 5259 { { { gen_helper_sve_ldffbss_zsu, 5260 gen_helper_sve_ldffhss_be_zsu, 5261 NULL, }, 5262 { gen_helper_sve_ldffbsu_zsu, 5263 gen_helper_sve_ldffhsu_be_zsu, 5264 gen_helper_sve_ldffss_be_zsu, } }, 5265 { { gen_helper_sve_ldffbss_zss, 5266 gen_helper_sve_ldffhss_be_zss, 5267 NULL, }, 5268 { gen_helper_sve_ldffbsu_zss, 5269 gen_helper_sve_ldffhsu_be_zss, 5270 gen_helper_sve_ldffss_be_zss, } } } } }, 5271 { /* MTE Active */ 5272 { /* Little-endian */ 5273 { { { gen_helper_sve_ldbss_zsu_mte, 5274 gen_helper_sve_ldhss_le_zsu_mte, 5275 NULL, }, 5276 { gen_helper_sve_ldbsu_zsu_mte, 5277 gen_helper_sve_ldhsu_le_zsu_mte, 5278 gen_helper_sve_ldss_le_zsu_mte, } }, 5279 { { gen_helper_sve_ldbss_zss_mte, 5280 gen_helper_sve_ldhss_le_zss_mte, 5281 NULL, }, 5282 { gen_helper_sve_ldbsu_zss_mte, 5283 gen_helper_sve_ldhsu_le_zss_mte, 5284 gen_helper_sve_ldss_le_zss_mte, } } }, 5285 5286 /* First-fault */ 5287 { { { gen_helper_sve_ldffbss_zsu_mte, 5288 gen_helper_sve_ldffhss_le_zsu_mte, 5289 NULL, }, 5290 { gen_helper_sve_ldffbsu_zsu_mte, 5291 gen_helper_sve_ldffhsu_le_zsu_mte, 5292 gen_helper_sve_ldffss_le_zsu_mte, } }, 5293 { { gen_helper_sve_ldffbss_zss_mte, 5294 gen_helper_sve_ldffhss_le_zss_mte, 5295 NULL, }, 5296 { gen_helper_sve_ldffbsu_zss_mte, 5297 gen_helper_sve_ldffhsu_le_zss_mte, 5298 gen_helper_sve_ldffss_le_zss_mte, } } } }, 5299 5300 { /* Big-endian */ 5301 { { { gen_helper_sve_ldbss_zsu_mte, 5302 gen_helper_sve_ldhss_be_zsu_mte, 5303 NULL, }, 5304 { gen_helper_sve_ldbsu_zsu_mte, 5305 gen_helper_sve_ldhsu_be_zsu_mte, 5306 gen_helper_sve_ldss_be_zsu_mte, } }, 5307 { { gen_helper_sve_ldbss_zss_mte, 5308 gen_helper_sve_ldhss_be_zss_mte, 5309 NULL, }, 5310 { gen_helper_sve_ldbsu_zss_mte, 5311 gen_helper_sve_ldhsu_be_zss_mte, 5312 gen_helper_sve_ldss_be_zss_mte, } } }, 5313 5314 /* First-fault */ 5315 { { { gen_helper_sve_ldffbss_zsu_mte, 5316 gen_helper_sve_ldffhss_be_zsu_mte, 5317 NULL, }, 5318 { gen_helper_sve_ldffbsu_zsu_mte, 5319 gen_helper_sve_ldffhsu_be_zsu_mte, 5320 gen_helper_sve_ldffss_be_zsu_mte, } }, 5321 { { gen_helper_sve_ldffbss_zss_mte, 5322 gen_helper_sve_ldffhss_be_zss_mte, 5323 NULL, }, 5324 { gen_helper_sve_ldffbsu_zss_mte, 5325 gen_helper_sve_ldffhsu_be_zss_mte, 5326 gen_helper_sve_ldffss_be_zss_mte, } } } } }, 5327 }; 5328 5329 /* Note that we overload xs=2 to indicate 64-bit offset. */ 5330 static gen_helper_gvec_mem_scatter * const 5331 gather_load_fn64[2][2][2][3][2][4] = { 5332 { /* MTE Inactive */ 5333 { /* Little-endian */ 5334 { { { gen_helper_sve_ldbds_zsu, 5335 gen_helper_sve_ldhds_le_zsu, 5336 gen_helper_sve_ldsds_le_zsu, 5337 NULL, }, 5338 { gen_helper_sve_ldbdu_zsu, 5339 gen_helper_sve_ldhdu_le_zsu, 5340 gen_helper_sve_ldsdu_le_zsu, 5341 gen_helper_sve_lddd_le_zsu, } }, 5342 { { gen_helper_sve_ldbds_zss, 5343 gen_helper_sve_ldhds_le_zss, 5344 gen_helper_sve_ldsds_le_zss, 5345 NULL, }, 5346 { gen_helper_sve_ldbdu_zss, 5347 gen_helper_sve_ldhdu_le_zss, 5348 gen_helper_sve_ldsdu_le_zss, 5349 gen_helper_sve_lddd_le_zss, } }, 5350 { { gen_helper_sve_ldbds_zd, 5351 gen_helper_sve_ldhds_le_zd, 5352 gen_helper_sve_ldsds_le_zd, 5353 NULL, }, 5354 { gen_helper_sve_ldbdu_zd, 5355 gen_helper_sve_ldhdu_le_zd, 5356 gen_helper_sve_ldsdu_le_zd, 5357 gen_helper_sve_lddd_le_zd, } } }, 5358 5359 /* First-fault */ 5360 { { { gen_helper_sve_ldffbds_zsu, 5361 gen_helper_sve_ldffhds_le_zsu, 5362 gen_helper_sve_ldffsds_le_zsu, 5363 NULL, }, 5364 { gen_helper_sve_ldffbdu_zsu, 5365 gen_helper_sve_ldffhdu_le_zsu, 5366 gen_helper_sve_ldffsdu_le_zsu, 5367 gen_helper_sve_ldffdd_le_zsu, } }, 5368 { { gen_helper_sve_ldffbds_zss, 5369 gen_helper_sve_ldffhds_le_zss, 5370 gen_helper_sve_ldffsds_le_zss, 5371 NULL, }, 5372 { gen_helper_sve_ldffbdu_zss, 5373 gen_helper_sve_ldffhdu_le_zss, 5374 gen_helper_sve_ldffsdu_le_zss, 5375 gen_helper_sve_ldffdd_le_zss, } }, 5376 { { gen_helper_sve_ldffbds_zd, 5377 gen_helper_sve_ldffhds_le_zd, 5378 gen_helper_sve_ldffsds_le_zd, 5379 NULL, }, 5380 { gen_helper_sve_ldffbdu_zd, 5381 gen_helper_sve_ldffhdu_le_zd, 5382 gen_helper_sve_ldffsdu_le_zd, 5383 gen_helper_sve_ldffdd_le_zd, } } } }, 5384 { /* Big-endian */ 5385 { { { gen_helper_sve_ldbds_zsu, 5386 gen_helper_sve_ldhds_be_zsu, 5387 gen_helper_sve_ldsds_be_zsu, 5388 NULL, }, 5389 { gen_helper_sve_ldbdu_zsu, 5390 gen_helper_sve_ldhdu_be_zsu, 5391 gen_helper_sve_ldsdu_be_zsu, 5392 gen_helper_sve_lddd_be_zsu, } }, 5393 { { gen_helper_sve_ldbds_zss, 5394 gen_helper_sve_ldhds_be_zss, 5395 gen_helper_sve_ldsds_be_zss, 5396 NULL, }, 5397 { gen_helper_sve_ldbdu_zss, 5398 gen_helper_sve_ldhdu_be_zss, 5399 gen_helper_sve_ldsdu_be_zss, 5400 gen_helper_sve_lddd_be_zss, } }, 5401 { { gen_helper_sve_ldbds_zd, 5402 gen_helper_sve_ldhds_be_zd, 5403 gen_helper_sve_ldsds_be_zd, 5404 NULL, }, 5405 { gen_helper_sve_ldbdu_zd, 5406 gen_helper_sve_ldhdu_be_zd, 5407 gen_helper_sve_ldsdu_be_zd, 5408 gen_helper_sve_lddd_be_zd, } } }, 5409 5410 /* First-fault */ 5411 { { { gen_helper_sve_ldffbds_zsu, 5412 gen_helper_sve_ldffhds_be_zsu, 5413 gen_helper_sve_ldffsds_be_zsu, 5414 NULL, }, 5415 { gen_helper_sve_ldffbdu_zsu, 5416 gen_helper_sve_ldffhdu_be_zsu, 5417 gen_helper_sve_ldffsdu_be_zsu, 5418 gen_helper_sve_ldffdd_be_zsu, } }, 5419 { { gen_helper_sve_ldffbds_zss, 5420 gen_helper_sve_ldffhds_be_zss, 5421 gen_helper_sve_ldffsds_be_zss, 5422 NULL, }, 5423 { gen_helper_sve_ldffbdu_zss, 5424 gen_helper_sve_ldffhdu_be_zss, 5425 gen_helper_sve_ldffsdu_be_zss, 5426 gen_helper_sve_ldffdd_be_zss, } }, 5427 { { gen_helper_sve_ldffbds_zd, 5428 gen_helper_sve_ldffhds_be_zd, 5429 gen_helper_sve_ldffsds_be_zd, 5430 NULL, }, 5431 { gen_helper_sve_ldffbdu_zd, 5432 gen_helper_sve_ldffhdu_be_zd, 5433 gen_helper_sve_ldffsdu_be_zd, 5434 gen_helper_sve_ldffdd_be_zd, } } } } }, 5435 { /* MTE Active */ 5436 { /* Little-endian */ 5437 { { { gen_helper_sve_ldbds_zsu_mte, 5438 gen_helper_sve_ldhds_le_zsu_mte, 5439 gen_helper_sve_ldsds_le_zsu_mte, 5440 NULL, }, 5441 { gen_helper_sve_ldbdu_zsu_mte, 5442 gen_helper_sve_ldhdu_le_zsu_mte, 5443 gen_helper_sve_ldsdu_le_zsu_mte, 5444 gen_helper_sve_lddd_le_zsu_mte, } }, 5445 { { gen_helper_sve_ldbds_zss_mte, 5446 gen_helper_sve_ldhds_le_zss_mte, 5447 gen_helper_sve_ldsds_le_zss_mte, 5448 NULL, }, 5449 { gen_helper_sve_ldbdu_zss_mte, 5450 gen_helper_sve_ldhdu_le_zss_mte, 5451 gen_helper_sve_ldsdu_le_zss_mte, 5452 gen_helper_sve_lddd_le_zss_mte, } }, 5453 { { gen_helper_sve_ldbds_zd_mte, 5454 gen_helper_sve_ldhds_le_zd_mte, 5455 gen_helper_sve_ldsds_le_zd_mte, 5456 NULL, }, 5457 { gen_helper_sve_ldbdu_zd_mte, 5458 gen_helper_sve_ldhdu_le_zd_mte, 5459 gen_helper_sve_ldsdu_le_zd_mte, 5460 gen_helper_sve_lddd_le_zd_mte, } } }, 5461 5462 /* First-fault */ 5463 { { { gen_helper_sve_ldffbds_zsu_mte, 5464 gen_helper_sve_ldffhds_le_zsu_mte, 5465 gen_helper_sve_ldffsds_le_zsu_mte, 5466 NULL, }, 5467 { gen_helper_sve_ldffbdu_zsu_mte, 5468 gen_helper_sve_ldffhdu_le_zsu_mte, 5469 gen_helper_sve_ldffsdu_le_zsu_mte, 5470 gen_helper_sve_ldffdd_le_zsu_mte, } }, 5471 { { gen_helper_sve_ldffbds_zss_mte, 5472 gen_helper_sve_ldffhds_le_zss_mte, 5473 gen_helper_sve_ldffsds_le_zss_mte, 5474 NULL, }, 5475 { gen_helper_sve_ldffbdu_zss_mte, 5476 gen_helper_sve_ldffhdu_le_zss_mte, 5477 gen_helper_sve_ldffsdu_le_zss_mte, 5478 gen_helper_sve_ldffdd_le_zss_mte, } }, 5479 { { gen_helper_sve_ldffbds_zd_mte, 5480 gen_helper_sve_ldffhds_le_zd_mte, 5481 gen_helper_sve_ldffsds_le_zd_mte, 5482 NULL, }, 5483 { gen_helper_sve_ldffbdu_zd_mte, 5484 gen_helper_sve_ldffhdu_le_zd_mte, 5485 gen_helper_sve_ldffsdu_le_zd_mte, 5486 gen_helper_sve_ldffdd_le_zd_mte, } } } }, 5487 { /* Big-endian */ 5488 { { { gen_helper_sve_ldbds_zsu_mte, 5489 gen_helper_sve_ldhds_be_zsu_mte, 5490 gen_helper_sve_ldsds_be_zsu_mte, 5491 NULL, }, 5492 { gen_helper_sve_ldbdu_zsu_mte, 5493 gen_helper_sve_ldhdu_be_zsu_mte, 5494 gen_helper_sve_ldsdu_be_zsu_mte, 5495 gen_helper_sve_lddd_be_zsu_mte, } }, 5496 { { gen_helper_sve_ldbds_zss_mte, 5497 gen_helper_sve_ldhds_be_zss_mte, 5498 gen_helper_sve_ldsds_be_zss_mte, 5499 NULL, }, 5500 { gen_helper_sve_ldbdu_zss_mte, 5501 gen_helper_sve_ldhdu_be_zss_mte, 5502 gen_helper_sve_ldsdu_be_zss_mte, 5503 gen_helper_sve_lddd_be_zss_mte, } }, 5504 { { gen_helper_sve_ldbds_zd_mte, 5505 gen_helper_sve_ldhds_be_zd_mte, 5506 gen_helper_sve_ldsds_be_zd_mte, 5507 NULL, }, 5508 { gen_helper_sve_ldbdu_zd_mte, 5509 gen_helper_sve_ldhdu_be_zd_mte, 5510 gen_helper_sve_ldsdu_be_zd_mte, 5511 gen_helper_sve_lddd_be_zd_mte, } } }, 5512 5513 /* First-fault */ 5514 { { { gen_helper_sve_ldffbds_zsu_mte, 5515 gen_helper_sve_ldffhds_be_zsu_mte, 5516 gen_helper_sve_ldffsds_be_zsu_mte, 5517 NULL, }, 5518 { gen_helper_sve_ldffbdu_zsu_mte, 5519 gen_helper_sve_ldffhdu_be_zsu_mte, 5520 gen_helper_sve_ldffsdu_be_zsu_mte, 5521 gen_helper_sve_ldffdd_be_zsu_mte, } }, 5522 { { gen_helper_sve_ldffbds_zss_mte, 5523 gen_helper_sve_ldffhds_be_zss_mte, 5524 gen_helper_sve_ldffsds_be_zss_mte, 5525 NULL, }, 5526 { gen_helper_sve_ldffbdu_zss_mte, 5527 gen_helper_sve_ldffhdu_be_zss_mte, 5528 gen_helper_sve_ldffsdu_be_zss_mte, 5529 gen_helper_sve_ldffdd_be_zss_mte, } }, 5530 { { gen_helper_sve_ldffbds_zd_mte, 5531 gen_helper_sve_ldffhds_be_zd_mte, 5532 gen_helper_sve_ldffsds_be_zd_mte, 5533 NULL, }, 5534 { gen_helper_sve_ldffbdu_zd_mte, 5535 gen_helper_sve_ldffhdu_be_zd_mte, 5536 gen_helper_sve_ldffsdu_be_zd_mte, 5537 gen_helper_sve_ldffdd_be_zd_mte, } } } } }, 5538 }; 5539 5540 static bool trans_LD1_zprz(DisasContext *s, arg_LD1_zprz *a) 5541 { 5542 gen_helper_gvec_mem_scatter *fn = NULL; 5543 bool be = s->be_data == MO_BE; 5544 bool mte = s->mte_active[0]; 5545 5546 if (!dc_isar_feature(aa64_sve, s)) { 5547 return false; 5548 } 5549 s->is_nonstreaming = true; 5550 if (!sve_access_check(s)) { 5551 return true; 5552 } 5553 5554 switch (a->esz) { 5555 case MO_32: 5556 fn = gather_load_fn32[mte][be][a->ff][a->xs][a->u][a->msz]; 5557 break; 5558 case MO_64: 5559 fn = gather_load_fn64[mte][be][a->ff][a->xs][a->u][a->msz]; 5560 break; 5561 } 5562 assert(fn != NULL); 5563 5564 do_mem_zpz(s, a->rd, a->pg, a->rm, a->scale * a->msz, 5565 cpu_reg_sp(s, a->rn), a->msz, false, fn); 5566 return true; 5567 } 5568 5569 static bool trans_LD1_zpiz(DisasContext *s, arg_LD1_zpiz *a) 5570 { 5571 gen_helper_gvec_mem_scatter *fn = NULL; 5572 bool be = s->be_data == MO_BE; 5573 bool mte = s->mte_active[0]; 5574 5575 if (a->esz < a->msz || (a->esz == a->msz && !a->u)) { 5576 return false; 5577 } 5578 if (!dc_isar_feature(aa64_sve, s)) { 5579 return false; 5580 } 5581 s->is_nonstreaming = true; 5582 if (!sve_access_check(s)) { 5583 return true; 5584 } 5585 5586 switch (a->esz) { 5587 case MO_32: 5588 fn = gather_load_fn32[mte][be][a->ff][0][a->u][a->msz]; 5589 break; 5590 case MO_64: 5591 fn = gather_load_fn64[mte][be][a->ff][2][a->u][a->msz]; 5592 break; 5593 } 5594 assert(fn != NULL); 5595 5596 /* Treat LD1_zpiz (zn[x] + imm) the same way as LD1_zprz (rn + zm[x]) 5597 * by loading the immediate into the scalar parameter. 5598 */ 5599 do_mem_zpz(s, a->rd, a->pg, a->rn, 0, 5600 tcg_constant_i64(a->imm << a->msz), a->msz, false, fn); 5601 return true; 5602 } 5603 5604 static bool trans_LDNT1_zprz(DisasContext *s, arg_LD1_zprz *a) 5605 { 5606 gen_helper_gvec_mem_scatter *fn = NULL; 5607 bool be = s->be_data == MO_BE; 5608 bool mte = s->mte_active[0]; 5609 5610 if (a->esz < a->msz + !a->u) { 5611 return false; 5612 } 5613 if (!dc_isar_feature(aa64_sve2, s)) { 5614 return false; 5615 } 5616 s->is_nonstreaming = true; 5617 if (!sve_access_check(s)) { 5618 return true; 5619 } 5620 5621 switch (a->esz) { 5622 case MO_32: 5623 fn = gather_load_fn32[mte][be][0][0][a->u][a->msz]; 5624 break; 5625 case MO_64: 5626 fn = gather_load_fn64[mte][be][0][2][a->u][a->msz]; 5627 break; 5628 } 5629 assert(fn != NULL); 5630 5631 do_mem_zpz(s, a->rd, a->pg, a->rn, 0, 5632 cpu_reg(s, a->rm), a->msz, false, fn); 5633 return true; 5634 } 5635 5636 /* Indexed by [mte][be][xs][msz]. */ 5637 static gen_helper_gvec_mem_scatter * const scatter_store_fn32[2][2][2][3] = { 5638 { /* MTE Inactive */ 5639 { /* Little-endian */ 5640 { gen_helper_sve_stbs_zsu, 5641 gen_helper_sve_sths_le_zsu, 5642 gen_helper_sve_stss_le_zsu, }, 5643 { gen_helper_sve_stbs_zss, 5644 gen_helper_sve_sths_le_zss, 5645 gen_helper_sve_stss_le_zss, } }, 5646 { /* Big-endian */ 5647 { gen_helper_sve_stbs_zsu, 5648 gen_helper_sve_sths_be_zsu, 5649 gen_helper_sve_stss_be_zsu, }, 5650 { gen_helper_sve_stbs_zss, 5651 gen_helper_sve_sths_be_zss, 5652 gen_helper_sve_stss_be_zss, } } }, 5653 { /* MTE Active */ 5654 { /* Little-endian */ 5655 { gen_helper_sve_stbs_zsu_mte, 5656 gen_helper_sve_sths_le_zsu_mte, 5657 gen_helper_sve_stss_le_zsu_mte, }, 5658 { gen_helper_sve_stbs_zss_mte, 5659 gen_helper_sve_sths_le_zss_mte, 5660 gen_helper_sve_stss_le_zss_mte, } }, 5661 { /* Big-endian */ 5662 { gen_helper_sve_stbs_zsu_mte, 5663 gen_helper_sve_sths_be_zsu_mte, 5664 gen_helper_sve_stss_be_zsu_mte, }, 5665 { gen_helper_sve_stbs_zss_mte, 5666 gen_helper_sve_sths_be_zss_mte, 5667 gen_helper_sve_stss_be_zss_mte, } } }, 5668 }; 5669 5670 /* Note that we overload xs=2 to indicate 64-bit offset. */ 5671 static gen_helper_gvec_mem_scatter * const scatter_store_fn64[2][2][3][4] = { 5672 { /* MTE Inactive */ 5673 { /* Little-endian */ 5674 { gen_helper_sve_stbd_zsu, 5675 gen_helper_sve_sthd_le_zsu, 5676 gen_helper_sve_stsd_le_zsu, 5677 gen_helper_sve_stdd_le_zsu, }, 5678 { gen_helper_sve_stbd_zss, 5679 gen_helper_sve_sthd_le_zss, 5680 gen_helper_sve_stsd_le_zss, 5681 gen_helper_sve_stdd_le_zss, }, 5682 { gen_helper_sve_stbd_zd, 5683 gen_helper_sve_sthd_le_zd, 5684 gen_helper_sve_stsd_le_zd, 5685 gen_helper_sve_stdd_le_zd, } }, 5686 { /* Big-endian */ 5687 { gen_helper_sve_stbd_zsu, 5688 gen_helper_sve_sthd_be_zsu, 5689 gen_helper_sve_stsd_be_zsu, 5690 gen_helper_sve_stdd_be_zsu, }, 5691 { gen_helper_sve_stbd_zss, 5692 gen_helper_sve_sthd_be_zss, 5693 gen_helper_sve_stsd_be_zss, 5694 gen_helper_sve_stdd_be_zss, }, 5695 { gen_helper_sve_stbd_zd, 5696 gen_helper_sve_sthd_be_zd, 5697 gen_helper_sve_stsd_be_zd, 5698 gen_helper_sve_stdd_be_zd, } } }, 5699 { /* MTE Inactive */ 5700 { /* Little-endian */ 5701 { gen_helper_sve_stbd_zsu_mte, 5702 gen_helper_sve_sthd_le_zsu_mte, 5703 gen_helper_sve_stsd_le_zsu_mte, 5704 gen_helper_sve_stdd_le_zsu_mte, }, 5705 { gen_helper_sve_stbd_zss_mte, 5706 gen_helper_sve_sthd_le_zss_mte, 5707 gen_helper_sve_stsd_le_zss_mte, 5708 gen_helper_sve_stdd_le_zss_mte, }, 5709 { gen_helper_sve_stbd_zd_mte, 5710 gen_helper_sve_sthd_le_zd_mte, 5711 gen_helper_sve_stsd_le_zd_mte, 5712 gen_helper_sve_stdd_le_zd_mte, } }, 5713 { /* Big-endian */ 5714 { gen_helper_sve_stbd_zsu_mte, 5715 gen_helper_sve_sthd_be_zsu_mte, 5716 gen_helper_sve_stsd_be_zsu_mte, 5717 gen_helper_sve_stdd_be_zsu_mte, }, 5718 { gen_helper_sve_stbd_zss_mte, 5719 gen_helper_sve_sthd_be_zss_mte, 5720 gen_helper_sve_stsd_be_zss_mte, 5721 gen_helper_sve_stdd_be_zss_mte, }, 5722 { gen_helper_sve_stbd_zd_mte, 5723 gen_helper_sve_sthd_be_zd_mte, 5724 gen_helper_sve_stsd_be_zd_mte, 5725 gen_helper_sve_stdd_be_zd_mte, } } }, 5726 }; 5727 5728 static bool trans_ST1_zprz(DisasContext *s, arg_ST1_zprz *a) 5729 { 5730 gen_helper_gvec_mem_scatter *fn; 5731 bool be = s->be_data == MO_BE; 5732 bool mte = s->mte_active[0]; 5733 5734 if (a->esz < a->msz || (a->msz == 0 && a->scale)) { 5735 return false; 5736 } 5737 if (!dc_isar_feature(aa64_sve, s)) { 5738 return false; 5739 } 5740 s->is_nonstreaming = true; 5741 if (!sve_access_check(s)) { 5742 return true; 5743 } 5744 switch (a->esz) { 5745 case MO_32: 5746 fn = scatter_store_fn32[mte][be][a->xs][a->msz]; 5747 break; 5748 case MO_64: 5749 fn = scatter_store_fn64[mte][be][a->xs][a->msz]; 5750 break; 5751 default: 5752 g_assert_not_reached(); 5753 } 5754 do_mem_zpz(s, a->rd, a->pg, a->rm, a->scale * a->msz, 5755 cpu_reg_sp(s, a->rn), a->msz, true, fn); 5756 return true; 5757 } 5758 5759 static bool trans_ST1_zpiz(DisasContext *s, arg_ST1_zpiz *a) 5760 { 5761 gen_helper_gvec_mem_scatter *fn = NULL; 5762 bool be = s->be_data == MO_BE; 5763 bool mte = s->mte_active[0]; 5764 5765 if (a->esz < a->msz) { 5766 return false; 5767 } 5768 if (!dc_isar_feature(aa64_sve, s)) { 5769 return false; 5770 } 5771 s->is_nonstreaming = true; 5772 if (!sve_access_check(s)) { 5773 return true; 5774 } 5775 5776 switch (a->esz) { 5777 case MO_32: 5778 fn = scatter_store_fn32[mte][be][0][a->msz]; 5779 break; 5780 case MO_64: 5781 fn = scatter_store_fn64[mte][be][2][a->msz]; 5782 break; 5783 } 5784 assert(fn != NULL); 5785 5786 /* Treat ST1_zpiz (zn[x] + imm) the same way as ST1_zprz (rn + zm[x]) 5787 * by loading the immediate into the scalar parameter. 5788 */ 5789 do_mem_zpz(s, a->rd, a->pg, a->rn, 0, 5790 tcg_constant_i64(a->imm << a->msz), a->msz, true, fn); 5791 return true; 5792 } 5793 5794 static bool trans_STNT1_zprz(DisasContext *s, arg_ST1_zprz *a) 5795 { 5796 gen_helper_gvec_mem_scatter *fn; 5797 bool be = s->be_data == MO_BE; 5798 bool mte = s->mte_active[0]; 5799 5800 if (a->esz < a->msz) { 5801 return false; 5802 } 5803 if (!dc_isar_feature(aa64_sve2, s)) { 5804 return false; 5805 } 5806 s->is_nonstreaming = true; 5807 if (!sve_access_check(s)) { 5808 return true; 5809 } 5810 5811 switch (a->esz) { 5812 case MO_32: 5813 fn = scatter_store_fn32[mte][be][0][a->msz]; 5814 break; 5815 case MO_64: 5816 fn = scatter_store_fn64[mte][be][2][a->msz]; 5817 break; 5818 default: 5819 g_assert_not_reached(); 5820 } 5821 5822 do_mem_zpz(s, a->rd, a->pg, a->rn, 0, 5823 cpu_reg(s, a->rm), a->msz, true, fn); 5824 return true; 5825 } 5826 5827 /* 5828 * Prefetches 5829 */ 5830 5831 static bool trans_PRF(DisasContext *s, arg_PRF *a) 5832 { 5833 if (!dc_isar_feature(aa64_sve, s)) { 5834 return false; 5835 } 5836 /* Prefetch is a nop within QEMU. */ 5837 (void)sve_access_check(s); 5838 return true; 5839 } 5840 5841 static bool trans_PRF_rr(DisasContext *s, arg_PRF_rr *a) 5842 { 5843 if (a->rm == 31 || !dc_isar_feature(aa64_sve, s)) { 5844 return false; 5845 } 5846 /* Prefetch is a nop within QEMU. */ 5847 (void)sve_access_check(s); 5848 return true; 5849 } 5850 5851 static bool trans_PRF_ns(DisasContext *s, arg_PRF_ns *a) 5852 { 5853 if (!dc_isar_feature(aa64_sve, s)) { 5854 return false; 5855 } 5856 /* Prefetch is a nop within QEMU. */ 5857 s->is_nonstreaming = true; 5858 (void)sve_access_check(s); 5859 return true; 5860 } 5861 5862 /* 5863 * Move Prefix 5864 * 5865 * TODO: The implementation so far could handle predicated merging movprfx. 5866 * The helper functions as written take an extra source register to 5867 * use in the operation, but the result is only written when predication 5868 * succeeds. For unpredicated movprfx, we need to rearrange the helpers 5869 * to allow the final write back to the destination to be unconditional. 5870 * For predicated zeroing movprfx, we need to rearrange the helpers to 5871 * allow the final write back to zero inactives. 5872 * 5873 * In the meantime, just emit the moves. 5874 */ 5875 5876 TRANS_FEAT(MOVPRFX, aa64_sve, do_mov_z, a->rd, a->rn) 5877 TRANS_FEAT(MOVPRFX_m, aa64_sve, do_sel_z, a->rd, a->rn, a->rd, a->pg, a->esz) 5878 TRANS_FEAT(MOVPRFX_z, aa64_sve, do_movz_zpz, a->rd, a->rn, a->pg, a->esz, false) 5879 5880 /* 5881 * SVE2 Integer Multiply - Unpredicated 5882 */ 5883 5884 TRANS_FEAT(MUL_zzz, aa64_sve2, gen_gvec_fn_arg_zzz, tcg_gen_gvec_mul, a) 5885 5886 static gen_helper_gvec_3 * const smulh_zzz_fns[4] = { 5887 gen_helper_gvec_smulh_b, gen_helper_gvec_smulh_h, 5888 gen_helper_gvec_smulh_s, gen_helper_gvec_smulh_d, 5889 }; 5890 TRANS_FEAT(SMULH_zzz, aa64_sve2, gen_gvec_ool_arg_zzz, 5891 smulh_zzz_fns[a->esz], a, 0) 5892 5893 static gen_helper_gvec_3 * const umulh_zzz_fns[4] = { 5894 gen_helper_gvec_umulh_b, gen_helper_gvec_umulh_h, 5895 gen_helper_gvec_umulh_s, gen_helper_gvec_umulh_d, 5896 }; 5897 TRANS_FEAT(UMULH_zzz, aa64_sve2, gen_gvec_ool_arg_zzz, 5898 umulh_zzz_fns[a->esz], a, 0) 5899 5900 TRANS_FEAT(PMUL_zzz, aa64_sve2, gen_gvec_ool_arg_zzz, 5901 gen_helper_gvec_pmul_b, a, 0) 5902 5903 static gen_helper_gvec_3 * const sqdmulh_zzz_fns[4] = { 5904 gen_helper_sve2_sqdmulh_b, gen_helper_sve2_sqdmulh_h, 5905 gen_helper_sve2_sqdmulh_s, gen_helper_sve2_sqdmulh_d, 5906 }; 5907 TRANS_FEAT(SQDMULH_zzz, aa64_sve2, gen_gvec_ool_arg_zzz, 5908 sqdmulh_zzz_fns[a->esz], a, 0) 5909 5910 static gen_helper_gvec_3 * const sqrdmulh_zzz_fns[4] = { 5911 gen_helper_sve2_sqrdmulh_b, gen_helper_sve2_sqrdmulh_h, 5912 gen_helper_sve2_sqrdmulh_s, gen_helper_sve2_sqrdmulh_d, 5913 }; 5914 TRANS_FEAT(SQRDMULH_zzz, aa64_sve2, gen_gvec_ool_arg_zzz, 5915 sqrdmulh_zzz_fns[a->esz], a, 0) 5916 5917 /* 5918 * SVE2 Integer - Predicated 5919 */ 5920 5921 static gen_helper_gvec_4 * const sadlp_fns[4] = { 5922 NULL, gen_helper_sve2_sadalp_zpzz_h, 5923 gen_helper_sve2_sadalp_zpzz_s, gen_helper_sve2_sadalp_zpzz_d, 5924 }; 5925 TRANS_FEAT(SADALP_zpzz, aa64_sve2, gen_gvec_ool_arg_zpzz, 5926 sadlp_fns[a->esz], a, 0) 5927 5928 static gen_helper_gvec_4 * const uadlp_fns[4] = { 5929 NULL, gen_helper_sve2_uadalp_zpzz_h, 5930 gen_helper_sve2_uadalp_zpzz_s, gen_helper_sve2_uadalp_zpzz_d, 5931 }; 5932 TRANS_FEAT(UADALP_zpzz, aa64_sve2, gen_gvec_ool_arg_zpzz, 5933 uadlp_fns[a->esz], a, 0) 5934 5935 /* 5936 * SVE2 integer unary operations (predicated) 5937 */ 5938 5939 TRANS_FEAT(URECPE, aa64_sve2, gen_gvec_ool_arg_zpz, 5940 a->esz == 2 ? gen_helper_sve2_urecpe_s : NULL, a, 0) 5941 5942 TRANS_FEAT(URSQRTE, aa64_sve2, gen_gvec_ool_arg_zpz, 5943 a->esz == 2 ? gen_helper_sve2_ursqrte_s : NULL, a, 0) 5944 5945 static gen_helper_gvec_3 * const sqabs_fns[4] = { 5946 gen_helper_sve2_sqabs_b, gen_helper_sve2_sqabs_h, 5947 gen_helper_sve2_sqabs_s, gen_helper_sve2_sqabs_d, 5948 }; 5949 TRANS_FEAT(SQABS, aa64_sve2, gen_gvec_ool_arg_zpz, sqabs_fns[a->esz], a, 0) 5950 5951 static gen_helper_gvec_3 * const sqneg_fns[4] = { 5952 gen_helper_sve2_sqneg_b, gen_helper_sve2_sqneg_h, 5953 gen_helper_sve2_sqneg_s, gen_helper_sve2_sqneg_d, 5954 }; 5955 TRANS_FEAT(SQNEG, aa64_sve2, gen_gvec_ool_arg_zpz, sqneg_fns[a->esz], a, 0) 5956 5957 DO_ZPZZ(SQSHL, aa64_sve2, sve2_sqshl) 5958 DO_ZPZZ(SQRSHL, aa64_sve2, sve2_sqrshl) 5959 DO_ZPZZ(SRSHL, aa64_sve2, sve2_srshl) 5960 5961 DO_ZPZZ(UQSHL, aa64_sve2, sve2_uqshl) 5962 DO_ZPZZ(UQRSHL, aa64_sve2, sve2_uqrshl) 5963 DO_ZPZZ(URSHL, aa64_sve2, sve2_urshl) 5964 5965 DO_ZPZZ(SHADD, aa64_sve2, sve2_shadd) 5966 DO_ZPZZ(SRHADD, aa64_sve2, sve2_srhadd) 5967 DO_ZPZZ(SHSUB, aa64_sve2, sve2_shsub) 5968 5969 DO_ZPZZ(UHADD, aa64_sve2, sve2_uhadd) 5970 DO_ZPZZ(URHADD, aa64_sve2, sve2_urhadd) 5971 DO_ZPZZ(UHSUB, aa64_sve2, sve2_uhsub) 5972 5973 DO_ZPZZ(ADDP, aa64_sve2, sve2_addp) 5974 DO_ZPZZ(SMAXP, aa64_sve2, sve2_smaxp) 5975 DO_ZPZZ(UMAXP, aa64_sve2, sve2_umaxp) 5976 DO_ZPZZ(SMINP, aa64_sve2, sve2_sminp) 5977 DO_ZPZZ(UMINP, aa64_sve2, sve2_uminp) 5978 5979 DO_ZPZZ(SQADD_zpzz, aa64_sve2, sve2_sqadd) 5980 DO_ZPZZ(UQADD_zpzz, aa64_sve2, sve2_uqadd) 5981 DO_ZPZZ(SQSUB_zpzz, aa64_sve2, sve2_sqsub) 5982 DO_ZPZZ(UQSUB_zpzz, aa64_sve2, sve2_uqsub) 5983 DO_ZPZZ(SUQADD, aa64_sve2, sve2_suqadd) 5984 DO_ZPZZ(USQADD, aa64_sve2, sve2_usqadd) 5985 5986 /* 5987 * SVE2 Widening Integer Arithmetic 5988 */ 5989 5990 static gen_helper_gvec_3 * const saddl_fns[4] = { 5991 NULL, gen_helper_sve2_saddl_h, 5992 gen_helper_sve2_saddl_s, gen_helper_sve2_saddl_d, 5993 }; 5994 TRANS_FEAT(SADDLB, aa64_sve2, gen_gvec_ool_arg_zzz, 5995 saddl_fns[a->esz], a, 0) 5996 TRANS_FEAT(SADDLT, aa64_sve2, gen_gvec_ool_arg_zzz, 5997 saddl_fns[a->esz], a, 3) 5998 TRANS_FEAT(SADDLBT, aa64_sve2, gen_gvec_ool_arg_zzz, 5999 saddl_fns[a->esz], a, 2) 6000 6001 static gen_helper_gvec_3 * const ssubl_fns[4] = { 6002 NULL, gen_helper_sve2_ssubl_h, 6003 gen_helper_sve2_ssubl_s, gen_helper_sve2_ssubl_d, 6004 }; 6005 TRANS_FEAT(SSUBLB, aa64_sve2, gen_gvec_ool_arg_zzz, 6006 ssubl_fns[a->esz], a, 0) 6007 TRANS_FEAT(SSUBLT, aa64_sve2, gen_gvec_ool_arg_zzz, 6008 ssubl_fns[a->esz], a, 3) 6009 TRANS_FEAT(SSUBLBT, aa64_sve2, gen_gvec_ool_arg_zzz, 6010 ssubl_fns[a->esz], a, 2) 6011 TRANS_FEAT(SSUBLTB, aa64_sve2, gen_gvec_ool_arg_zzz, 6012 ssubl_fns[a->esz], a, 1) 6013 6014 static gen_helper_gvec_3 * const sabdl_fns[4] = { 6015 NULL, gen_helper_sve2_sabdl_h, 6016 gen_helper_sve2_sabdl_s, gen_helper_sve2_sabdl_d, 6017 }; 6018 TRANS_FEAT(SABDLB, aa64_sve2, gen_gvec_ool_arg_zzz, 6019 sabdl_fns[a->esz], a, 0) 6020 TRANS_FEAT(SABDLT, aa64_sve2, gen_gvec_ool_arg_zzz, 6021 sabdl_fns[a->esz], a, 3) 6022 6023 static gen_helper_gvec_3 * const uaddl_fns[4] = { 6024 NULL, gen_helper_sve2_uaddl_h, 6025 gen_helper_sve2_uaddl_s, gen_helper_sve2_uaddl_d, 6026 }; 6027 TRANS_FEAT(UADDLB, aa64_sve2, gen_gvec_ool_arg_zzz, 6028 uaddl_fns[a->esz], a, 0) 6029 TRANS_FEAT(UADDLT, aa64_sve2, gen_gvec_ool_arg_zzz, 6030 uaddl_fns[a->esz], a, 3) 6031 6032 static gen_helper_gvec_3 * const usubl_fns[4] = { 6033 NULL, gen_helper_sve2_usubl_h, 6034 gen_helper_sve2_usubl_s, gen_helper_sve2_usubl_d, 6035 }; 6036 TRANS_FEAT(USUBLB, aa64_sve2, gen_gvec_ool_arg_zzz, 6037 usubl_fns[a->esz], a, 0) 6038 TRANS_FEAT(USUBLT, aa64_sve2, gen_gvec_ool_arg_zzz, 6039 usubl_fns[a->esz], a, 3) 6040 6041 static gen_helper_gvec_3 * const uabdl_fns[4] = { 6042 NULL, gen_helper_sve2_uabdl_h, 6043 gen_helper_sve2_uabdl_s, gen_helper_sve2_uabdl_d, 6044 }; 6045 TRANS_FEAT(UABDLB, aa64_sve2, gen_gvec_ool_arg_zzz, 6046 uabdl_fns[a->esz], a, 0) 6047 TRANS_FEAT(UABDLT, aa64_sve2, gen_gvec_ool_arg_zzz, 6048 uabdl_fns[a->esz], a, 3) 6049 6050 static gen_helper_gvec_3 * const sqdmull_fns[4] = { 6051 NULL, gen_helper_sve2_sqdmull_zzz_h, 6052 gen_helper_sve2_sqdmull_zzz_s, gen_helper_sve2_sqdmull_zzz_d, 6053 }; 6054 TRANS_FEAT(SQDMULLB_zzz, aa64_sve2, gen_gvec_ool_arg_zzz, 6055 sqdmull_fns[a->esz], a, 0) 6056 TRANS_FEAT(SQDMULLT_zzz, aa64_sve2, gen_gvec_ool_arg_zzz, 6057 sqdmull_fns[a->esz], a, 3) 6058 6059 static gen_helper_gvec_3 * const smull_fns[4] = { 6060 NULL, gen_helper_sve2_smull_zzz_h, 6061 gen_helper_sve2_smull_zzz_s, gen_helper_sve2_smull_zzz_d, 6062 }; 6063 TRANS_FEAT(SMULLB_zzz, aa64_sve2, gen_gvec_ool_arg_zzz, 6064 smull_fns[a->esz], a, 0) 6065 TRANS_FEAT(SMULLT_zzz, aa64_sve2, gen_gvec_ool_arg_zzz, 6066 smull_fns[a->esz], a, 3) 6067 6068 static gen_helper_gvec_3 * const umull_fns[4] = { 6069 NULL, gen_helper_sve2_umull_zzz_h, 6070 gen_helper_sve2_umull_zzz_s, gen_helper_sve2_umull_zzz_d, 6071 }; 6072 TRANS_FEAT(UMULLB_zzz, aa64_sve2, gen_gvec_ool_arg_zzz, 6073 umull_fns[a->esz], a, 0) 6074 TRANS_FEAT(UMULLT_zzz, aa64_sve2, gen_gvec_ool_arg_zzz, 6075 umull_fns[a->esz], a, 3) 6076 6077 static gen_helper_gvec_3 * const eoril_fns[4] = { 6078 gen_helper_sve2_eoril_b, gen_helper_sve2_eoril_h, 6079 gen_helper_sve2_eoril_s, gen_helper_sve2_eoril_d, 6080 }; 6081 TRANS_FEAT(EORBT, aa64_sve2, gen_gvec_ool_arg_zzz, eoril_fns[a->esz], a, 2) 6082 TRANS_FEAT(EORTB, aa64_sve2, gen_gvec_ool_arg_zzz, eoril_fns[a->esz], a, 1) 6083 6084 static bool do_trans_pmull(DisasContext *s, arg_rrr_esz *a, bool sel) 6085 { 6086 static gen_helper_gvec_3 * const fns[4] = { 6087 gen_helper_gvec_pmull_q, gen_helper_sve2_pmull_h, 6088 NULL, gen_helper_sve2_pmull_d, 6089 }; 6090 6091 if (a->esz == 0) { 6092 if (!dc_isar_feature(aa64_sve2_pmull128, s)) { 6093 return false; 6094 } 6095 s->is_nonstreaming = true; 6096 } else if (!dc_isar_feature(aa64_sve, s)) { 6097 return false; 6098 } 6099 return gen_gvec_ool_arg_zzz(s, fns[a->esz], a, sel); 6100 } 6101 6102 TRANS_FEAT(PMULLB, aa64_sve2, do_trans_pmull, a, false) 6103 TRANS_FEAT(PMULLT, aa64_sve2, do_trans_pmull, a, true) 6104 6105 static gen_helper_gvec_3 * const saddw_fns[4] = { 6106 NULL, gen_helper_sve2_saddw_h, 6107 gen_helper_sve2_saddw_s, gen_helper_sve2_saddw_d, 6108 }; 6109 TRANS_FEAT(SADDWB, aa64_sve2, gen_gvec_ool_arg_zzz, saddw_fns[a->esz], a, 0) 6110 TRANS_FEAT(SADDWT, aa64_sve2, gen_gvec_ool_arg_zzz, saddw_fns[a->esz], a, 1) 6111 6112 static gen_helper_gvec_3 * const ssubw_fns[4] = { 6113 NULL, gen_helper_sve2_ssubw_h, 6114 gen_helper_sve2_ssubw_s, gen_helper_sve2_ssubw_d, 6115 }; 6116 TRANS_FEAT(SSUBWB, aa64_sve2, gen_gvec_ool_arg_zzz, ssubw_fns[a->esz], a, 0) 6117 TRANS_FEAT(SSUBWT, aa64_sve2, gen_gvec_ool_arg_zzz, ssubw_fns[a->esz], a, 1) 6118 6119 static gen_helper_gvec_3 * const uaddw_fns[4] = { 6120 NULL, gen_helper_sve2_uaddw_h, 6121 gen_helper_sve2_uaddw_s, gen_helper_sve2_uaddw_d, 6122 }; 6123 TRANS_FEAT(UADDWB, aa64_sve2, gen_gvec_ool_arg_zzz, uaddw_fns[a->esz], a, 0) 6124 TRANS_FEAT(UADDWT, aa64_sve2, gen_gvec_ool_arg_zzz, uaddw_fns[a->esz], a, 1) 6125 6126 static gen_helper_gvec_3 * const usubw_fns[4] = { 6127 NULL, gen_helper_sve2_usubw_h, 6128 gen_helper_sve2_usubw_s, gen_helper_sve2_usubw_d, 6129 }; 6130 TRANS_FEAT(USUBWB, aa64_sve2, gen_gvec_ool_arg_zzz, usubw_fns[a->esz], a, 0) 6131 TRANS_FEAT(USUBWT, aa64_sve2, gen_gvec_ool_arg_zzz, usubw_fns[a->esz], a, 1) 6132 6133 static void gen_sshll_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t imm) 6134 { 6135 int top = imm & 1; 6136 int shl = imm >> 1; 6137 int halfbits = 4 << vece; 6138 6139 if (top) { 6140 if (shl == halfbits) { 6141 TCGv_vec t = tcg_temp_new_vec_matching(d); 6142 tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(halfbits, halfbits)); 6143 tcg_gen_and_vec(vece, d, n, t); 6144 } else { 6145 tcg_gen_sari_vec(vece, d, n, halfbits); 6146 tcg_gen_shli_vec(vece, d, d, shl); 6147 } 6148 } else { 6149 tcg_gen_shli_vec(vece, d, n, halfbits); 6150 tcg_gen_sari_vec(vece, d, d, halfbits - shl); 6151 } 6152 } 6153 6154 static void gen_ushll_i64(unsigned vece, TCGv_i64 d, TCGv_i64 n, int imm) 6155 { 6156 int halfbits = 4 << vece; 6157 int top = imm & 1; 6158 int shl = (imm >> 1); 6159 int shift; 6160 uint64_t mask; 6161 6162 mask = MAKE_64BIT_MASK(0, halfbits); 6163 mask <<= shl; 6164 mask = dup_const(vece, mask); 6165 6166 shift = shl - top * halfbits; 6167 if (shift < 0) { 6168 tcg_gen_shri_i64(d, n, -shift); 6169 } else { 6170 tcg_gen_shli_i64(d, n, shift); 6171 } 6172 tcg_gen_andi_i64(d, d, mask); 6173 } 6174 6175 static void gen_ushll16_i64(TCGv_i64 d, TCGv_i64 n, int64_t imm) 6176 { 6177 gen_ushll_i64(MO_16, d, n, imm); 6178 } 6179 6180 static void gen_ushll32_i64(TCGv_i64 d, TCGv_i64 n, int64_t imm) 6181 { 6182 gen_ushll_i64(MO_32, d, n, imm); 6183 } 6184 6185 static void gen_ushll64_i64(TCGv_i64 d, TCGv_i64 n, int64_t imm) 6186 { 6187 gen_ushll_i64(MO_64, d, n, imm); 6188 } 6189 6190 static void gen_ushll_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t imm) 6191 { 6192 int halfbits = 4 << vece; 6193 int top = imm & 1; 6194 int shl = imm >> 1; 6195 6196 if (top) { 6197 if (shl == halfbits) { 6198 TCGv_vec t = tcg_temp_new_vec_matching(d); 6199 tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(halfbits, halfbits)); 6200 tcg_gen_and_vec(vece, d, n, t); 6201 } else { 6202 tcg_gen_shri_vec(vece, d, n, halfbits); 6203 tcg_gen_shli_vec(vece, d, d, shl); 6204 } 6205 } else { 6206 if (shl == 0) { 6207 TCGv_vec t = tcg_temp_new_vec_matching(d); 6208 tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits)); 6209 tcg_gen_and_vec(vece, d, n, t); 6210 } else { 6211 tcg_gen_shli_vec(vece, d, n, halfbits); 6212 tcg_gen_shri_vec(vece, d, d, halfbits - shl); 6213 } 6214 } 6215 } 6216 6217 static bool do_shll_tb(DisasContext *s, arg_rri_esz *a, 6218 const GVecGen2i ops[3], bool sel) 6219 { 6220 6221 if (a->esz < 0 || a->esz > 2) { 6222 return false; 6223 } 6224 if (sve_access_check(s)) { 6225 unsigned vsz = vec_full_reg_size(s); 6226 tcg_gen_gvec_2i(vec_full_reg_offset(s, a->rd), 6227 vec_full_reg_offset(s, a->rn), 6228 vsz, vsz, (a->imm << 1) | sel, 6229 &ops[a->esz]); 6230 } 6231 return true; 6232 } 6233 6234 static const TCGOpcode sshll_list[] = { 6235 INDEX_op_shli_vec, INDEX_op_sari_vec, 0 6236 }; 6237 static const GVecGen2i sshll_ops[3] = { 6238 { .fniv = gen_sshll_vec, 6239 .opt_opc = sshll_list, 6240 .fno = gen_helper_sve2_sshll_h, 6241 .vece = MO_16 }, 6242 { .fniv = gen_sshll_vec, 6243 .opt_opc = sshll_list, 6244 .fno = gen_helper_sve2_sshll_s, 6245 .vece = MO_32 }, 6246 { .fniv = gen_sshll_vec, 6247 .opt_opc = sshll_list, 6248 .fno = gen_helper_sve2_sshll_d, 6249 .vece = MO_64 } 6250 }; 6251 TRANS_FEAT(SSHLLB, aa64_sve2, do_shll_tb, a, sshll_ops, false) 6252 TRANS_FEAT(SSHLLT, aa64_sve2, do_shll_tb, a, sshll_ops, true) 6253 6254 static const TCGOpcode ushll_list[] = { 6255 INDEX_op_shli_vec, INDEX_op_shri_vec, 0 6256 }; 6257 static const GVecGen2i ushll_ops[3] = { 6258 { .fni8 = gen_ushll16_i64, 6259 .fniv = gen_ushll_vec, 6260 .opt_opc = ushll_list, 6261 .fno = gen_helper_sve2_ushll_h, 6262 .vece = MO_16 }, 6263 { .fni8 = gen_ushll32_i64, 6264 .fniv = gen_ushll_vec, 6265 .opt_opc = ushll_list, 6266 .fno = gen_helper_sve2_ushll_s, 6267 .vece = MO_32 }, 6268 { .fni8 = gen_ushll64_i64, 6269 .fniv = gen_ushll_vec, 6270 .opt_opc = ushll_list, 6271 .fno = gen_helper_sve2_ushll_d, 6272 .vece = MO_64 }, 6273 }; 6274 TRANS_FEAT(USHLLB, aa64_sve2, do_shll_tb, a, ushll_ops, false) 6275 TRANS_FEAT(USHLLT, aa64_sve2, do_shll_tb, a, ushll_ops, true) 6276 6277 static gen_helper_gvec_3 * const bext_fns[4] = { 6278 gen_helper_sve2_bext_b, gen_helper_sve2_bext_h, 6279 gen_helper_sve2_bext_s, gen_helper_sve2_bext_d, 6280 }; 6281 TRANS_FEAT_NONSTREAMING(BEXT, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz, 6282 bext_fns[a->esz], a, 0) 6283 6284 static gen_helper_gvec_3 * const bdep_fns[4] = { 6285 gen_helper_sve2_bdep_b, gen_helper_sve2_bdep_h, 6286 gen_helper_sve2_bdep_s, gen_helper_sve2_bdep_d, 6287 }; 6288 TRANS_FEAT_NONSTREAMING(BDEP, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz, 6289 bdep_fns[a->esz], a, 0) 6290 6291 static gen_helper_gvec_3 * const bgrp_fns[4] = { 6292 gen_helper_sve2_bgrp_b, gen_helper_sve2_bgrp_h, 6293 gen_helper_sve2_bgrp_s, gen_helper_sve2_bgrp_d, 6294 }; 6295 TRANS_FEAT_NONSTREAMING(BGRP, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz, 6296 bgrp_fns[a->esz], a, 0) 6297 6298 static gen_helper_gvec_3 * const cadd_fns[4] = { 6299 gen_helper_sve2_cadd_b, gen_helper_sve2_cadd_h, 6300 gen_helper_sve2_cadd_s, gen_helper_sve2_cadd_d, 6301 }; 6302 TRANS_FEAT(CADD_rot90, aa64_sve2, gen_gvec_ool_arg_zzz, 6303 cadd_fns[a->esz], a, 0) 6304 TRANS_FEAT(CADD_rot270, aa64_sve2, gen_gvec_ool_arg_zzz, 6305 cadd_fns[a->esz], a, 1) 6306 6307 static gen_helper_gvec_3 * const sqcadd_fns[4] = { 6308 gen_helper_sve2_sqcadd_b, gen_helper_sve2_sqcadd_h, 6309 gen_helper_sve2_sqcadd_s, gen_helper_sve2_sqcadd_d, 6310 }; 6311 TRANS_FEAT(SQCADD_rot90, aa64_sve2, gen_gvec_ool_arg_zzz, 6312 sqcadd_fns[a->esz], a, 0) 6313 TRANS_FEAT(SQCADD_rot270, aa64_sve2, gen_gvec_ool_arg_zzz, 6314 sqcadd_fns[a->esz], a, 1) 6315 6316 static gen_helper_gvec_4 * const sabal_fns[4] = { 6317 NULL, gen_helper_sve2_sabal_h, 6318 gen_helper_sve2_sabal_s, gen_helper_sve2_sabal_d, 6319 }; 6320 TRANS_FEAT(SABALB, aa64_sve2, gen_gvec_ool_arg_zzzz, sabal_fns[a->esz], a, 0) 6321 TRANS_FEAT(SABALT, aa64_sve2, gen_gvec_ool_arg_zzzz, sabal_fns[a->esz], a, 1) 6322 6323 static gen_helper_gvec_4 * const uabal_fns[4] = { 6324 NULL, gen_helper_sve2_uabal_h, 6325 gen_helper_sve2_uabal_s, gen_helper_sve2_uabal_d, 6326 }; 6327 TRANS_FEAT(UABALB, aa64_sve2, gen_gvec_ool_arg_zzzz, uabal_fns[a->esz], a, 0) 6328 TRANS_FEAT(UABALT, aa64_sve2, gen_gvec_ool_arg_zzzz, uabal_fns[a->esz], a, 1) 6329 6330 static bool do_adcl(DisasContext *s, arg_rrrr_esz *a, bool sel) 6331 { 6332 static gen_helper_gvec_4 * const fns[2] = { 6333 gen_helper_sve2_adcl_s, 6334 gen_helper_sve2_adcl_d, 6335 }; 6336 /* 6337 * Note that in this case the ESZ field encodes both size and sign. 6338 * Split out 'subtract' into bit 1 of the data field for the helper. 6339 */ 6340 return gen_gvec_ool_arg_zzzz(s, fns[a->esz & 1], a, (a->esz & 2) | sel); 6341 } 6342 6343 TRANS_FEAT(ADCLB, aa64_sve2, do_adcl, a, false) 6344 TRANS_FEAT(ADCLT, aa64_sve2, do_adcl, a, true) 6345 6346 TRANS_FEAT(SSRA, aa64_sve2, gen_gvec_fn_arg_zzi, gen_gvec_ssra, a) 6347 TRANS_FEAT(USRA, aa64_sve2, gen_gvec_fn_arg_zzi, gen_gvec_usra, a) 6348 TRANS_FEAT(SRSRA, aa64_sve2, gen_gvec_fn_arg_zzi, gen_gvec_srsra, a) 6349 TRANS_FEAT(URSRA, aa64_sve2, gen_gvec_fn_arg_zzi, gen_gvec_ursra, a) 6350 TRANS_FEAT(SRI, aa64_sve2, gen_gvec_fn_arg_zzi, gen_gvec_sri, a) 6351 TRANS_FEAT(SLI, aa64_sve2, gen_gvec_fn_arg_zzi, gen_gvec_sli, a) 6352 6353 TRANS_FEAT(SABA, aa64_sve2, gen_gvec_fn_arg_zzz, gen_gvec_saba, a) 6354 TRANS_FEAT(UABA, aa64_sve2, gen_gvec_fn_arg_zzz, gen_gvec_uaba, a) 6355 6356 static bool do_narrow_extract(DisasContext *s, arg_rri_esz *a, 6357 const GVecGen2 ops[3]) 6358 { 6359 if (a->esz < 0 || a->esz > MO_32 || a->imm != 0) { 6360 return false; 6361 } 6362 if (sve_access_check(s)) { 6363 unsigned vsz = vec_full_reg_size(s); 6364 tcg_gen_gvec_2(vec_full_reg_offset(s, a->rd), 6365 vec_full_reg_offset(s, a->rn), 6366 vsz, vsz, &ops[a->esz]); 6367 } 6368 return true; 6369 } 6370 6371 static const TCGOpcode sqxtn_list[] = { 6372 INDEX_op_shli_vec, INDEX_op_smin_vec, INDEX_op_smax_vec, 0 6373 }; 6374 6375 static void gen_sqxtnb_vec(unsigned vece, TCGv_vec d, TCGv_vec n) 6376 { 6377 TCGv_vec t = tcg_temp_new_vec_matching(d); 6378 int halfbits = 4 << vece; 6379 int64_t mask = (1ull << halfbits) - 1; 6380 int64_t min = -1ull << (halfbits - 1); 6381 int64_t max = -min - 1; 6382 6383 tcg_gen_dupi_vec(vece, t, min); 6384 tcg_gen_smax_vec(vece, d, n, t); 6385 tcg_gen_dupi_vec(vece, t, max); 6386 tcg_gen_smin_vec(vece, d, d, t); 6387 tcg_gen_dupi_vec(vece, t, mask); 6388 tcg_gen_and_vec(vece, d, d, t); 6389 } 6390 6391 static const GVecGen2 sqxtnb_ops[3] = { 6392 { .fniv = gen_sqxtnb_vec, 6393 .opt_opc = sqxtn_list, 6394 .fno = gen_helper_sve2_sqxtnb_h, 6395 .vece = MO_16 }, 6396 { .fniv = gen_sqxtnb_vec, 6397 .opt_opc = sqxtn_list, 6398 .fno = gen_helper_sve2_sqxtnb_s, 6399 .vece = MO_32 }, 6400 { .fniv = gen_sqxtnb_vec, 6401 .opt_opc = sqxtn_list, 6402 .fno = gen_helper_sve2_sqxtnb_d, 6403 .vece = MO_64 }, 6404 }; 6405 TRANS_FEAT(SQXTNB, aa64_sve2, do_narrow_extract, a, sqxtnb_ops) 6406 6407 static void gen_sqxtnt_vec(unsigned vece, TCGv_vec d, TCGv_vec n) 6408 { 6409 TCGv_vec t = tcg_temp_new_vec_matching(d); 6410 int halfbits = 4 << vece; 6411 int64_t mask = (1ull << halfbits) - 1; 6412 int64_t min = -1ull << (halfbits - 1); 6413 int64_t max = -min - 1; 6414 6415 tcg_gen_dupi_vec(vece, t, min); 6416 tcg_gen_smax_vec(vece, n, n, t); 6417 tcg_gen_dupi_vec(vece, t, max); 6418 tcg_gen_smin_vec(vece, n, n, t); 6419 tcg_gen_shli_vec(vece, n, n, halfbits); 6420 tcg_gen_dupi_vec(vece, t, mask); 6421 tcg_gen_bitsel_vec(vece, d, t, d, n); 6422 } 6423 6424 static const GVecGen2 sqxtnt_ops[3] = { 6425 { .fniv = gen_sqxtnt_vec, 6426 .opt_opc = sqxtn_list, 6427 .load_dest = true, 6428 .fno = gen_helper_sve2_sqxtnt_h, 6429 .vece = MO_16 }, 6430 { .fniv = gen_sqxtnt_vec, 6431 .opt_opc = sqxtn_list, 6432 .load_dest = true, 6433 .fno = gen_helper_sve2_sqxtnt_s, 6434 .vece = MO_32 }, 6435 { .fniv = gen_sqxtnt_vec, 6436 .opt_opc = sqxtn_list, 6437 .load_dest = true, 6438 .fno = gen_helper_sve2_sqxtnt_d, 6439 .vece = MO_64 }, 6440 }; 6441 TRANS_FEAT(SQXTNT, aa64_sve2, do_narrow_extract, a, sqxtnt_ops) 6442 6443 static const TCGOpcode uqxtn_list[] = { 6444 INDEX_op_shli_vec, INDEX_op_umin_vec, 0 6445 }; 6446 6447 static void gen_uqxtnb_vec(unsigned vece, TCGv_vec d, TCGv_vec n) 6448 { 6449 TCGv_vec t = tcg_temp_new_vec_matching(d); 6450 int halfbits = 4 << vece; 6451 int64_t max = (1ull << halfbits) - 1; 6452 6453 tcg_gen_dupi_vec(vece, t, max); 6454 tcg_gen_umin_vec(vece, d, n, t); 6455 } 6456 6457 static const GVecGen2 uqxtnb_ops[3] = { 6458 { .fniv = gen_uqxtnb_vec, 6459 .opt_opc = uqxtn_list, 6460 .fno = gen_helper_sve2_uqxtnb_h, 6461 .vece = MO_16 }, 6462 { .fniv = gen_uqxtnb_vec, 6463 .opt_opc = uqxtn_list, 6464 .fno = gen_helper_sve2_uqxtnb_s, 6465 .vece = MO_32 }, 6466 { .fniv = gen_uqxtnb_vec, 6467 .opt_opc = uqxtn_list, 6468 .fno = gen_helper_sve2_uqxtnb_d, 6469 .vece = MO_64 }, 6470 }; 6471 TRANS_FEAT(UQXTNB, aa64_sve2, do_narrow_extract, a, uqxtnb_ops) 6472 6473 static void gen_uqxtnt_vec(unsigned vece, TCGv_vec d, TCGv_vec n) 6474 { 6475 TCGv_vec t = tcg_temp_new_vec_matching(d); 6476 int halfbits = 4 << vece; 6477 int64_t max = (1ull << halfbits) - 1; 6478 6479 tcg_gen_dupi_vec(vece, t, max); 6480 tcg_gen_umin_vec(vece, n, n, t); 6481 tcg_gen_shli_vec(vece, n, n, halfbits); 6482 tcg_gen_bitsel_vec(vece, d, t, d, n); 6483 } 6484 6485 static const GVecGen2 uqxtnt_ops[3] = { 6486 { .fniv = gen_uqxtnt_vec, 6487 .opt_opc = uqxtn_list, 6488 .load_dest = true, 6489 .fno = gen_helper_sve2_uqxtnt_h, 6490 .vece = MO_16 }, 6491 { .fniv = gen_uqxtnt_vec, 6492 .opt_opc = uqxtn_list, 6493 .load_dest = true, 6494 .fno = gen_helper_sve2_uqxtnt_s, 6495 .vece = MO_32 }, 6496 { .fniv = gen_uqxtnt_vec, 6497 .opt_opc = uqxtn_list, 6498 .load_dest = true, 6499 .fno = gen_helper_sve2_uqxtnt_d, 6500 .vece = MO_64 }, 6501 }; 6502 TRANS_FEAT(UQXTNT, aa64_sve2, do_narrow_extract, a, uqxtnt_ops) 6503 6504 static const TCGOpcode sqxtun_list[] = { 6505 INDEX_op_shli_vec, INDEX_op_umin_vec, INDEX_op_smax_vec, 0 6506 }; 6507 6508 static void gen_sqxtunb_vec(unsigned vece, TCGv_vec d, TCGv_vec n) 6509 { 6510 TCGv_vec t = tcg_temp_new_vec_matching(d); 6511 int halfbits = 4 << vece; 6512 int64_t max = (1ull << halfbits) - 1; 6513 6514 tcg_gen_dupi_vec(vece, t, 0); 6515 tcg_gen_smax_vec(vece, d, n, t); 6516 tcg_gen_dupi_vec(vece, t, max); 6517 tcg_gen_umin_vec(vece, d, d, t); 6518 } 6519 6520 static const GVecGen2 sqxtunb_ops[3] = { 6521 { .fniv = gen_sqxtunb_vec, 6522 .opt_opc = sqxtun_list, 6523 .fno = gen_helper_sve2_sqxtunb_h, 6524 .vece = MO_16 }, 6525 { .fniv = gen_sqxtunb_vec, 6526 .opt_opc = sqxtun_list, 6527 .fno = gen_helper_sve2_sqxtunb_s, 6528 .vece = MO_32 }, 6529 { .fniv = gen_sqxtunb_vec, 6530 .opt_opc = sqxtun_list, 6531 .fno = gen_helper_sve2_sqxtunb_d, 6532 .vece = MO_64 }, 6533 }; 6534 TRANS_FEAT(SQXTUNB, aa64_sve2, do_narrow_extract, a, sqxtunb_ops) 6535 6536 static void gen_sqxtunt_vec(unsigned vece, TCGv_vec d, TCGv_vec n) 6537 { 6538 TCGv_vec t = tcg_temp_new_vec_matching(d); 6539 int halfbits = 4 << vece; 6540 int64_t max = (1ull << halfbits) - 1; 6541 6542 tcg_gen_dupi_vec(vece, t, 0); 6543 tcg_gen_smax_vec(vece, n, n, t); 6544 tcg_gen_dupi_vec(vece, t, max); 6545 tcg_gen_umin_vec(vece, n, n, t); 6546 tcg_gen_shli_vec(vece, n, n, halfbits); 6547 tcg_gen_bitsel_vec(vece, d, t, d, n); 6548 } 6549 6550 static const GVecGen2 sqxtunt_ops[3] = { 6551 { .fniv = gen_sqxtunt_vec, 6552 .opt_opc = sqxtun_list, 6553 .load_dest = true, 6554 .fno = gen_helper_sve2_sqxtunt_h, 6555 .vece = MO_16 }, 6556 { .fniv = gen_sqxtunt_vec, 6557 .opt_opc = sqxtun_list, 6558 .load_dest = true, 6559 .fno = gen_helper_sve2_sqxtunt_s, 6560 .vece = MO_32 }, 6561 { .fniv = gen_sqxtunt_vec, 6562 .opt_opc = sqxtun_list, 6563 .load_dest = true, 6564 .fno = gen_helper_sve2_sqxtunt_d, 6565 .vece = MO_64 }, 6566 }; 6567 TRANS_FEAT(SQXTUNT, aa64_sve2, do_narrow_extract, a, sqxtunt_ops) 6568 6569 static bool do_shr_narrow(DisasContext *s, arg_rri_esz *a, 6570 const GVecGen2i ops[3]) 6571 { 6572 if (a->esz < 0 || a->esz > MO_32) { 6573 return false; 6574 } 6575 assert(a->imm > 0 && a->imm <= (8 << a->esz)); 6576 if (sve_access_check(s)) { 6577 unsigned vsz = vec_full_reg_size(s); 6578 tcg_gen_gvec_2i(vec_full_reg_offset(s, a->rd), 6579 vec_full_reg_offset(s, a->rn), 6580 vsz, vsz, a->imm, &ops[a->esz]); 6581 } 6582 return true; 6583 } 6584 6585 static void gen_shrnb_i64(unsigned vece, TCGv_i64 d, TCGv_i64 n, int shr) 6586 { 6587 int halfbits = 4 << vece; 6588 uint64_t mask = dup_const(vece, MAKE_64BIT_MASK(0, halfbits)); 6589 6590 tcg_gen_shri_i64(d, n, shr); 6591 tcg_gen_andi_i64(d, d, mask); 6592 } 6593 6594 static void gen_shrnb16_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr) 6595 { 6596 gen_shrnb_i64(MO_16, d, n, shr); 6597 } 6598 6599 static void gen_shrnb32_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr) 6600 { 6601 gen_shrnb_i64(MO_32, d, n, shr); 6602 } 6603 6604 static void gen_shrnb64_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr) 6605 { 6606 gen_shrnb_i64(MO_64, d, n, shr); 6607 } 6608 6609 static void gen_shrnb_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t shr) 6610 { 6611 TCGv_vec t = tcg_temp_new_vec_matching(d); 6612 int halfbits = 4 << vece; 6613 uint64_t mask = MAKE_64BIT_MASK(0, halfbits); 6614 6615 tcg_gen_shri_vec(vece, n, n, shr); 6616 tcg_gen_dupi_vec(vece, t, mask); 6617 tcg_gen_and_vec(vece, d, n, t); 6618 } 6619 6620 static const TCGOpcode shrnb_vec_list[] = { INDEX_op_shri_vec, 0 }; 6621 static const GVecGen2i shrnb_ops[3] = { 6622 { .fni8 = gen_shrnb16_i64, 6623 .fniv = gen_shrnb_vec, 6624 .opt_opc = shrnb_vec_list, 6625 .fno = gen_helper_sve2_shrnb_h, 6626 .vece = MO_16 }, 6627 { .fni8 = gen_shrnb32_i64, 6628 .fniv = gen_shrnb_vec, 6629 .opt_opc = shrnb_vec_list, 6630 .fno = gen_helper_sve2_shrnb_s, 6631 .vece = MO_32 }, 6632 { .fni8 = gen_shrnb64_i64, 6633 .fniv = gen_shrnb_vec, 6634 .opt_opc = shrnb_vec_list, 6635 .fno = gen_helper_sve2_shrnb_d, 6636 .vece = MO_64 }, 6637 }; 6638 TRANS_FEAT(SHRNB, aa64_sve2, do_shr_narrow, a, shrnb_ops) 6639 6640 static void gen_shrnt_i64(unsigned vece, TCGv_i64 d, TCGv_i64 n, int shr) 6641 { 6642 int halfbits = 4 << vece; 6643 uint64_t mask = dup_const(vece, MAKE_64BIT_MASK(0, halfbits)); 6644 6645 tcg_gen_shli_i64(n, n, halfbits - shr); 6646 tcg_gen_andi_i64(n, n, ~mask); 6647 tcg_gen_andi_i64(d, d, mask); 6648 tcg_gen_or_i64(d, d, n); 6649 } 6650 6651 static void gen_shrnt16_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr) 6652 { 6653 gen_shrnt_i64(MO_16, d, n, shr); 6654 } 6655 6656 static void gen_shrnt32_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr) 6657 { 6658 gen_shrnt_i64(MO_32, d, n, shr); 6659 } 6660 6661 static void gen_shrnt64_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr) 6662 { 6663 tcg_gen_shri_i64(n, n, shr); 6664 tcg_gen_deposit_i64(d, d, n, 32, 32); 6665 } 6666 6667 static void gen_shrnt_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t shr) 6668 { 6669 TCGv_vec t = tcg_temp_new_vec_matching(d); 6670 int halfbits = 4 << vece; 6671 uint64_t mask = MAKE_64BIT_MASK(0, halfbits); 6672 6673 tcg_gen_shli_vec(vece, n, n, halfbits - shr); 6674 tcg_gen_dupi_vec(vece, t, mask); 6675 tcg_gen_bitsel_vec(vece, d, t, d, n); 6676 } 6677 6678 static const TCGOpcode shrnt_vec_list[] = { INDEX_op_shli_vec, 0 }; 6679 static const GVecGen2i shrnt_ops[3] = { 6680 { .fni8 = gen_shrnt16_i64, 6681 .fniv = gen_shrnt_vec, 6682 .opt_opc = shrnt_vec_list, 6683 .load_dest = true, 6684 .fno = gen_helper_sve2_shrnt_h, 6685 .vece = MO_16 }, 6686 { .fni8 = gen_shrnt32_i64, 6687 .fniv = gen_shrnt_vec, 6688 .opt_opc = shrnt_vec_list, 6689 .load_dest = true, 6690 .fno = gen_helper_sve2_shrnt_s, 6691 .vece = MO_32 }, 6692 { .fni8 = gen_shrnt64_i64, 6693 .fniv = gen_shrnt_vec, 6694 .opt_opc = shrnt_vec_list, 6695 .load_dest = true, 6696 .fno = gen_helper_sve2_shrnt_d, 6697 .vece = MO_64 }, 6698 }; 6699 TRANS_FEAT(SHRNT, aa64_sve2, do_shr_narrow, a, shrnt_ops) 6700 6701 static const GVecGen2i rshrnb_ops[3] = { 6702 { .fno = gen_helper_sve2_rshrnb_h }, 6703 { .fno = gen_helper_sve2_rshrnb_s }, 6704 { .fno = gen_helper_sve2_rshrnb_d }, 6705 }; 6706 TRANS_FEAT(RSHRNB, aa64_sve2, do_shr_narrow, a, rshrnb_ops) 6707 6708 static const GVecGen2i rshrnt_ops[3] = { 6709 { .fno = gen_helper_sve2_rshrnt_h }, 6710 { .fno = gen_helper_sve2_rshrnt_s }, 6711 { .fno = gen_helper_sve2_rshrnt_d }, 6712 }; 6713 TRANS_FEAT(RSHRNT, aa64_sve2, do_shr_narrow, a, rshrnt_ops) 6714 6715 static void gen_sqshrunb_vec(unsigned vece, TCGv_vec d, 6716 TCGv_vec n, int64_t shr) 6717 { 6718 TCGv_vec t = tcg_temp_new_vec_matching(d); 6719 int halfbits = 4 << vece; 6720 6721 tcg_gen_sari_vec(vece, n, n, shr); 6722 tcg_gen_dupi_vec(vece, t, 0); 6723 tcg_gen_smax_vec(vece, n, n, t); 6724 tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits)); 6725 tcg_gen_umin_vec(vece, d, n, t); 6726 } 6727 6728 static const TCGOpcode sqshrunb_vec_list[] = { 6729 INDEX_op_sari_vec, INDEX_op_smax_vec, INDEX_op_umin_vec, 0 6730 }; 6731 static const GVecGen2i sqshrunb_ops[3] = { 6732 { .fniv = gen_sqshrunb_vec, 6733 .opt_opc = sqshrunb_vec_list, 6734 .fno = gen_helper_sve2_sqshrunb_h, 6735 .vece = MO_16 }, 6736 { .fniv = gen_sqshrunb_vec, 6737 .opt_opc = sqshrunb_vec_list, 6738 .fno = gen_helper_sve2_sqshrunb_s, 6739 .vece = MO_32 }, 6740 { .fniv = gen_sqshrunb_vec, 6741 .opt_opc = sqshrunb_vec_list, 6742 .fno = gen_helper_sve2_sqshrunb_d, 6743 .vece = MO_64 }, 6744 }; 6745 TRANS_FEAT(SQSHRUNB, aa64_sve2, do_shr_narrow, a, sqshrunb_ops) 6746 6747 static void gen_sqshrunt_vec(unsigned vece, TCGv_vec d, 6748 TCGv_vec n, int64_t shr) 6749 { 6750 TCGv_vec t = tcg_temp_new_vec_matching(d); 6751 int halfbits = 4 << vece; 6752 6753 tcg_gen_sari_vec(vece, n, n, shr); 6754 tcg_gen_dupi_vec(vece, t, 0); 6755 tcg_gen_smax_vec(vece, n, n, t); 6756 tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits)); 6757 tcg_gen_umin_vec(vece, n, n, t); 6758 tcg_gen_shli_vec(vece, n, n, halfbits); 6759 tcg_gen_bitsel_vec(vece, d, t, d, n); 6760 } 6761 6762 static const TCGOpcode sqshrunt_vec_list[] = { 6763 INDEX_op_shli_vec, INDEX_op_sari_vec, 6764 INDEX_op_smax_vec, INDEX_op_umin_vec, 0 6765 }; 6766 static const GVecGen2i sqshrunt_ops[3] = { 6767 { .fniv = gen_sqshrunt_vec, 6768 .opt_opc = sqshrunt_vec_list, 6769 .load_dest = true, 6770 .fno = gen_helper_sve2_sqshrunt_h, 6771 .vece = MO_16 }, 6772 { .fniv = gen_sqshrunt_vec, 6773 .opt_opc = sqshrunt_vec_list, 6774 .load_dest = true, 6775 .fno = gen_helper_sve2_sqshrunt_s, 6776 .vece = MO_32 }, 6777 { .fniv = gen_sqshrunt_vec, 6778 .opt_opc = sqshrunt_vec_list, 6779 .load_dest = true, 6780 .fno = gen_helper_sve2_sqshrunt_d, 6781 .vece = MO_64 }, 6782 }; 6783 TRANS_FEAT(SQSHRUNT, aa64_sve2, do_shr_narrow, a, sqshrunt_ops) 6784 6785 static const GVecGen2i sqrshrunb_ops[3] = { 6786 { .fno = gen_helper_sve2_sqrshrunb_h }, 6787 { .fno = gen_helper_sve2_sqrshrunb_s }, 6788 { .fno = gen_helper_sve2_sqrshrunb_d }, 6789 }; 6790 TRANS_FEAT(SQRSHRUNB, aa64_sve2, do_shr_narrow, a, sqrshrunb_ops) 6791 6792 static const GVecGen2i sqrshrunt_ops[3] = { 6793 { .fno = gen_helper_sve2_sqrshrunt_h }, 6794 { .fno = gen_helper_sve2_sqrshrunt_s }, 6795 { .fno = gen_helper_sve2_sqrshrunt_d }, 6796 }; 6797 TRANS_FEAT(SQRSHRUNT, aa64_sve2, do_shr_narrow, a, sqrshrunt_ops) 6798 6799 static void gen_sqshrnb_vec(unsigned vece, TCGv_vec d, 6800 TCGv_vec n, int64_t shr) 6801 { 6802 TCGv_vec t = tcg_temp_new_vec_matching(d); 6803 int halfbits = 4 << vece; 6804 int64_t max = MAKE_64BIT_MASK(0, halfbits - 1); 6805 int64_t min = -max - 1; 6806 6807 tcg_gen_sari_vec(vece, n, n, shr); 6808 tcg_gen_dupi_vec(vece, t, min); 6809 tcg_gen_smax_vec(vece, n, n, t); 6810 tcg_gen_dupi_vec(vece, t, max); 6811 tcg_gen_smin_vec(vece, n, n, t); 6812 tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits)); 6813 tcg_gen_and_vec(vece, d, n, t); 6814 } 6815 6816 static const TCGOpcode sqshrnb_vec_list[] = { 6817 INDEX_op_sari_vec, INDEX_op_smax_vec, INDEX_op_smin_vec, 0 6818 }; 6819 static const GVecGen2i sqshrnb_ops[3] = { 6820 { .fniv = gen_sqshrnb_vec, 6821 .opt_opc = sqshrnb_vec_list, 6822 .fno = gen_helper_sve2_sqshrnb_h, 6823 .vece = MO_16 }, 6824 { .fniv = gen_sqshrnb_vec, 6825 .opt_opc = sqshrnb_vec_list, 6826 .fno = gen_helper_sve2_sqshrnb_s, 6827 .vece = MO_32 }, 6828 { .fniv = gen_sqshrnb_vec, 6829 .opt_opc = sqshrnb_vec_list, 6830 .fno = gen_helper_sve2_sqshrnb_d, 6831 .vece = MO_64 }, 6832 }; 6833 TRANS_FEAT(SQSHRNB, aa64_sve2, do_shr_narrow, a, sqshrnb_ops) 6834 6835 static void gen_sqshrnt_vec(unsigned vece, TCGv_vec d, 6836 TCGv_vec n, int64_t shr) 6837 { 6838 TCGv_vec t = tcg_temp_new_vec_matching(d); 6839 int halfbits = 4 << vece; 6840 int64_t max = MAKE_64BIT_MASK(0, halfbits - 1); 6841 int64_t min = -max - 1; 6842 6843 tcg_gen_sari_vec(vece, n, n, shr); 6844 tcg_gen_dupi_vec(vece, t, min); 6845 tcg_gen_smax_vec(vece, n, n, t); 6846 tcg_gen_dupi_vec(vece, t, max); 6847 tcg_gen_smin_vec(vece, n, n, t); 6848 tcg_gen_shli_vec(vece, n, n, halfbits); 6849 tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits)); 6850 tcg_gen_bitsel_vec(vece, d, t, d, n); 6851 } 6852 6853 static const TCGOpcode sqshrnt_vec_list[] = { 6854 INDEX_op_shli_vec, INDEX_op_sari_vec, 6855 INDEX_op_smax_vec, INDEX_op_smin_vec, 0 6856 }; 6857 static const GVecGen2i sqshrnt_ops[3] = { 6858 { .fniv = gen_sqshrnt_vec, 6859 .opt_opc = sqshrnt_vec_list, 6860 .load_dest = true, 6861 .fno = gen_helper_sve2_sqshrnt_h, 6862 .vece = MO_16 }, 6863 { .fniv = gen_sqshrnt_vec, 6864 .opt_opc = sqshrnt_vec_list, 6865 .load_dest = true, 6866 .fno = gen_helper_sve2_sqshrnt_s, 6867 .vece = MO_32 }, 6868 { .fniv = gen_sqshrnt_vec, 6869 .opt_opc = sqshrnt_vec_list, 6870 .load_dest = true, 6871 .fno = gen_helper_sve2_sqshrnt_d, 6872 .vece = MO_64 }, 6873 }; 6874 TRANS_FEAT(SQSHRNT, aa64_sve2, do_shr_narrow, a, sqshrnt_ops) 6875 6876 static const GVecGen2i sqrshrnb_ops[3] = { 6877 { .fno = gen_helper_sve2_sqrshrnb_h }, 6878 { .fno = gen_helper_sve2_sqrshrnb_s }, 6879 { .fno = gen_helper_sve2_sqrshrnb_d }, 6880 }; 6881 TRANS_FEAT(SQRSHRNB, aa64_sve2, do_shr_narrow, a, sqrshrnb_ops) 6882 6883 static const GVecGen2i sqrshrnt_ops[3] = { 6884 { .fno = gen_helper_sve2_sqrshrnt_h }, 6885 { .fno = gen_helper_sve2_sqrshrnt_s }, 6886 { .fno = gen_helper_sve2_sqrshrnt_d }, 6887 }; 6888 TRANS_FEAT(SQRSHRNT, aa64_sve2, do_shr_narrow, a, sqrshrnt_ops) 6889 6890 static void gen_uqshrnb_vec(unsigned vece, TCGv_vec d, 6891 TCGv_vec n, int64_t shr) 6892 { 6893 TCGv_vec t = tcg_temp_new_vec_matching(d); 6894 int halfbits = 4 << vece; 6895 6896 tcg_gen_shri_vec(vece, n, n, shr); 6897 tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits)); 6898 tcg_gen_umin_vec(vece, d, n, t); 6899 } 6900 6901 static const TCGOpcode uqshrnb_vec_list[] = { 6902 INDEX_op_shri_vec, INDEX_op_umin_vec, 0 6903 }; 6904 static const GVecGen2i uqshrnb_ops[3] = { 6905 { .fniv = gen_uqshrnb_vec, 6906 .opt_opc = uqshrnb_vec_list, 6907 .fno = gen_helper_sve2_uqshrnb_h, 6908 .vece = MO_16 }, 6909 { .fniv = gen_uqshrnb_vec, 6910 .opt_opc = uqshrnb_vec_list, 6911 .fno = gen_helper_sve2_uqshrnb_s, 6912 .vece = MO_32 }, 6913 { .fniv = gen_uqshrnb_vec, 6914 .opt_opc = uqshrnb_vec_list, 6915 .fno = gen_helper_sve2_uqshrnb_d, 6916 .vece = MO_64 }, 6917 }; 6918 TRANS_FEAT(UQSHRNB, aa64_sve2, do_shr_narrow, a, uqshrnb_ops) 6919 6920 static void gen_uqshrnt_vec(unsigned vece, TCGv_vec d, 6921 TCGv_vec n, int64_t shr) 6922 { 6923 TCGv_vec t = tcg_temp_new_vec_matching(d); 6924 int halfbits = 4 << vece; 6925 6926 tcg_gen_shri_vec(vece, n, n, shr); 6927 tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits)); 6928 tcg_gen_umin_vec(vece, n, n, t); 6929 tcg_gen_shli_vec(vece, n, n, halfbits); 6930 tcg_gen_bitsel_vec(vece, d, t, d, n); 6931 } 6932 6933 static const TCGOpcode uqshrnt_vec_list[] = { 6934 INDEX_op_shli_vec, INDEX_op_shri_vec, INDEX_op_umin_vec, 0 6935 }; 6936 static const GVecGen2i uqshrnt_ops[3] = { 6937 { .fniv = gen_uqshrnt_vec, 6938 .opt_opc = uqshrnt_vec_list, 6939 .load_dest = true, 6940 .fno = gen_helper_sve2_uqshrnt_h, 6941 .vece = MO_16 }, 6942 { .fniv = gen_uqshrnt_vec, 6943 .opt_opc = uqshrnt_vec_list, 6944 .load_dest = true, 6945 .fno = gen_helper_sve2_uqshrnt_s, 6946 .vece = MO_32 }, 6947 { .fniv = gen_uqshrnt_vec, 6948 .opt_opc = uqshrnt_vec_list, 6949 .load_dest = true, 6950 .fno = gen_helper_sve2_uqshrnt_d, 6951 .vece = MO_64 }, 6952 }; 6953 TRANS_FEAT(UQSHRNT, aa64_sve2, do_shr_narrow, a, uqshrnt_ops) 6954 6955 static const GVecGen2i uqrshrnb_ops[3] = { 6956 { .fno = gen_helper_sve2_uqrshrnb_h }, 6957 { .fno = gen_helper_sve2_uqrshrnb_s }, 6958 { .fno = gen_helper_sve2_uqrshrnb_d }, 6959 }; 6960 TRANS_FEAT(UQRSHRNB, aa64_sve2, do_shr_narrow, a, uqrshrnb_ops) 6961 6962 static const GVecGen2i uqrshrnt_ops[3] = { 6963 { .fno = gen_helper_sve2_uqrshrnt_h }, 6964 { .fno = gen_helper_sve2_uqrshrnt_s }, 6965 { .fno = gen_helper_sve2_uqrshrnt_d }, 6966 }; 6967 TRANS_FEAT(UQRSHRNT, aa64_sve2, do_shr_narrow, a, uqrshrnt_ops) 6968 6969 #define DO_SVE2_ZZZ_NARROW(NAME, name) \ 6970 static gen_helper_gvec_3 * const name##_fns[4] = { \ 6971 NULL, gen_helper_sve2_##name##_h, \ 6972 gen_helper_sve2_##name##_s, gen_helper_sve2_##name##_d, \ 6973 }; \ 6974 TRANS_FEAT(NAME, aa64_sve2, gen_gvec_ool_arg_zzz, \ 6975 name##_fns[a->esz], a, 0) 6976 6977 DO_SVE2_ZZZ_NARROW(ADDHNB, addhnb) 6978 DO_SVE2_ZZZ_NARROW(ADDHNT, addhnt) 6979 DO_SVE2_ZZZ_NARROW(RADDHNB, raddhnb) 6980 DO_SVE2_ZZZ_NARROW(RADDHNT, raddhnt) 6981 6982 DO_SVE2_ZZZ_NARROW(SUBHNB, subhnb) 6983 DO_SVE2_ZZZ_NARROW(SUBHNT, subhnt) 6984 DO_SVE2_ZZZ_NARROW(RSUBHNB, rsubhnb) 6985 DO_SVE2_ZZZ_NARROW(RSUBHNT, rsubhnt) 6986 6987 static gen_helper_gvec_flags_4 * const match_fns[4] = { 6988 gen_helper_sve2_match_ppzz_b, gen_helper_sve2_match_ppzz_h, NULL, NULL 6989 }; 6990 TRANS_FEAT_NONSTREAMING(MATCH, aa64_sve2, do_ppzz_flags, a, match_fns[a->esz]) 6991 6992 static gen_helper_gvec_flags_4 * const nmatch_fns[4] = { 6993 gen_helper_sve2_nmatch_ppzz_b, gen_helper_sve2_nmatch_ppzz_h, NULL, NULL 6994 }; 6995 TRANS_FEAT_NONSTREAMING(NMATCH, aa64_sve2, do_ppzz_flags, a, nmatch_fns[a->esz]) 6996 6997 static gen_helper_gvec_4 * const histcnt_fns[4] = { 6998 NULL, NULL, gen_helper_sve2_histcnt_s, gen_helper_sve2_histcnt_d 6999 }; 7000 TRANS_FEAT_NONSTREAMING(HISTCNT, aa64_sve2, gen_gvec_ool_arg_zpzz, 7001 histcnt_fns[a->esz], a, 0) 7002 7003 TRANS_FEAT_NONSTREAMING(HISTSEG, aa64_sve2, gen_gvec_ool_arg_zzz, 7004 a->esz == 0 ? gen_helper_sve2_histseg : NULL, a, 0) 7005 7006 DO_ZPZZ_FP(FADDP, aa64_sve2, sve2_faddp_zpzz) 7007 DO_ZPZZ_FP(FMAXNMP, aa64_sve2, sve2_fmaxnmp_zpzz) 7008 DO_ZPZZ_FP(FMINNMP, aa64_sve2, sve2_fminnmp_zpzz) 7009 DO_ZPZZ_FP(FMAXP, aa64_sve2, sve2_fmaxp_zpzz) 7010 DO_ZPZZ_FP(FMINP, aa64_sve2, sve2_fminp_zpzz) 7011 7012 /* 7013 * SVE Integer Multiply-Add (unpredicated) 7014 */ 7015 7016 TRANS_FEAT_NONSTREAMING(FMMLA_s, aa64_sve_f32mm, gen_gvec_fpst_zzzz, 7017 gen_helper_fmmla_s, a->rd, a->rn, a->rm, a->ra, 7018 0, FPST_FPCR) 7019 TRANS_FEAT_NONSTREAMING(FMMLA_d, aa64_sve_f64mm, gen_gvec_fpst_zzzz, 7020 gen_helper_fmmla_d, a->rd, a->rn, a->rm, a->ra, 7021 0, FPST_FPCR) 7022 7023 static gen_helper_gvec_4 * const sqdmlal_zzzw_fns[] = { 7024 NULL, gen_helper_sve2_sqdmlal_zzzw_h, 7025 gen_helper_sve2_sqdmlal_zzzw_s, gen_helper_sve2_sqdmlal_zzzw_d, 7026 }; 7027 TRANS_FEAT(SQDMLALB_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz, 7028 sqdmlal_zzzw_fns[a->esz], a, 0) 7029 TRANS_FEAT(SQDMLALT_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz, 7030 sqdmlal_zzzw_fns[a->esz], a, 3) 7031 TRANS_FEAT(SQDMLALBT, aa64_sve2, gen_gvec_ool_arg_zzzz, 7032 sqdmlal_zzzw_fns[a->esz], a, 2) 7033 7034 static gen_helper_gvec_4 * const sqdmlsl_zzzw_fns[] = { 7035 NULL, gen_helper_sve2_sqdmlsl_zzzw_h, 7036 gen_helper_sve2_sqdmlsl_zzzw_s, gen_helper_sve2_sqdmlsl_zzzw_d, 7037 }; 7038 TRANS_FEAT(SQDMLSLB_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz, 7039 sqdmlsl_zzzw_fns[a->esz], a, 0) 7040 TRANS_FEAT(SQDMLSLT_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz, 7041 sqdmlsl_zzzw_fns[a->esz], a, 3) 7042 TRANS_FEAT(SQDMLSLBT, aa64_sve2, gen_gvec_ool_arg_zzzz, 7043 sqdmlsl_zzzw_fns[a->esz], a, 2) 7044 7045 static gen_helper_gvec_4 * const sqrdmlah_fns[] = { 7046 gen_helper_sve2_sqrdmlah_b, gen_helper_sve2_sqrdmlah_h, 7047 gen_helper_sve2_sqrdmlah_s, gen_helper_sve2_sqrdmlah_d, 7048 }; 7049 TRANS_FEAT(SQRDMLAH_zzzz, aa64_sve2, gen_gvec_ool_arg_zzzz, 7050 sqrdmlah_fns[a->esz], a, 0) 7051 7052 static gen_helper_gvec_4 * const sqrdmlsh_fns[] = { 7053 gen_helper_sve2_sqrdmlsh_b, gen_helper_sve2_sqrdmlsh_h, 7054 gen_helper_sve2_sqrdmlsh_s, gen_helper_sve2_sqrdmlsh_d, 7055 }; 7056 TRANS_FEAT(SQRDMLSH_zzzz, aa64_sve2, gen_gvec_ool_arg_zzzz, 7057 sqrdmlsh_fns[a->esz], a, 0) 7058 7059 static gen_helper_gvec_4 * const smlal_zzzw_fns[] = { 7060 NULL, gen_helper_sve2_smlal_zzzw_h, 7061 gen_helper_sve2_smlal_zzzw_s, gen_helper_sve2_smlal_zzzw_d, 7062 }; 7063 TRANS_FEAT(SMLALB_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz, 7064 smlal_zzzw_fns[a->esz], a, 0) 7065 TRANS_FEAT(SMLALT_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz, 7066 smlal_zzzw_fns[a->esz], a, 1) 7067 7068 static gen_helper_gvec_4 * const umlal_zzzw_fns[] = { 7069 NULL, gen_helper_sve2_umlal_zzzw_h, 7070 gen_helper_sve2_umlal_zzzw_s, gen_helper_sve2_umlal_zzzw_d, 7071 }; 7072 TRANS_FEAT(UMLALB_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz, 7073 umlal_zzzw_fns[a->esz], a, 0) 7074 TRANS_FEAT(UMLALT_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz, 7075 umlal_zzzw_fns[a->esz], a, 1) 7076 7077 static gen_helper_gvec_4 * const smlsl_zzzw_fns[] = { 7078 NULL, gen_helper_sve2_smlsl_zzzw_h, 7079 gen_helper_sve2_smlsl_zzzw_s, gen_helper_sve2_smlsl_zzzw_d, 7080 }; 7081 TRANS_FEAT(SMLSLB_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz, 7082 smlsl_zzzw_fns[a->esz], a, 0) 7083 TRANS_FEAT(SMLSLT_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz, 7084 smlsl_zzzw_fns[a->esz], a, 1) 7085 7086 static gen_helper_gvec_4 * const umlsl_zzzw_fns[] = { 7087 NULL, gen_helper_sve2_umlsl_zzzw_h, 7088 gen_helper_sve2_umlsl_zzzw_s, gen_helper_sve2_umlsl_zzzw_d, 7089 }; 7090 TRANS_FEAT(UMLSLB_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz, 7091 umlsl_zzzw_fns[a->esz], a, 0) 7092 TRANS_FEAT(UMLSLT_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz, 7093 umlsl_zzzw_fns[a->esz], a, 1) 7094 7095 static gen_helper_gvec_4 * const cmla_fns[] = { 7096 gen_helper_sve2_cmla_zzzz_b, gen_helper_sve2_cmla_zzzz_h, 7097 gen_helper_sve2_cmla_zzzz_s, gen_helper_sve2_cmla_zzzz_d, 7098 }; 7099 TRANS_FEAT(CMLA_zzzz, aa64_sve2, gen_gvec_ool_zzzz, 7100 cmla_fns[a->esz], a->rd, a->rn, a->rm, a->ra, a->rot) 7101 7102 static gen_helper_gvec_4 * const cdot_fns[] = { 7103 NULL, NULL, gen_helper_sve2_cdot_zzzz_s, gen_helper_sve2_cdot_zzzz_d 7104 }; 7105 TRANS_FEAT(CDOT_zzzz, aa64_sve2, gen_gvec_ool_zzzz, 7106 cdot_fns[a->esz], a->rd, a->rn, a->rm, a->ra, a->rot) 7107 7108 static gen_helper_gvec_4 * const sqrdcmlah_fns[] = { 7109 gen_helper_sve2_sqrdcmlah_zzzz_b, gen_helper_sve2_sqrdcmlah_zzzz_h, 7110 gen_helper_sve2_sqrdcmlah_zzzz_s, gen_helper_sve2_sqrdcmlah_zzzz_d, 7111 }; 7112 TRANS_FEAT(SQRDCMLAH_zzzz, aa64_sve2, gen_gvec_ool_zzzz, 7113 sqrdcmlah_fns[a->esz], a->rd, a->rn, a->rm, a->ra, a->rot) 7114 7115 TRANS_FEAT(USDOT_zzzz, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz, 7116 a->esz == 2 ? gen_helper_gvec_usdot_b : NULL, a, 0) 7117 7118 TRANS_FEAT_NONSTREAMING(AESMC, aa64_sve2_aes, gen_gvec_ool_zz, 7119 gen_helper_crypto_aesmc, a->rd, a->rd, a->decrypt) 7120 7121 TRANS_FEAT_NONSTREAMING(AESE, aa64_sve2_aes, gen_gvec_ool_arg_zzz, 7122 gen_helper_crypto_aese, a, false) 7123 TRANS_FEAT_NONSTREAMING(AESD, aa64_sve2_aes, gen_gvec_ool_arg_zzz, 7124 gen_helper_crypto_aese, a, true) 7125 7126 TRANS_FEAT_NONSTREAMING(SM4E, aa64_sve2_sm4, gen_gvec_ool_arg_zzz, 7127 gen_helper_crypto_sm4e, a, 0) 7128 TRANS_FEAT_NONSTREAMING(SM4EKEY, aa64_sve2_sm4, gen_gvec_ool_arg_zzz, 7129 gen_helper_crypto_sm4ekey, a, 0) 7130 7131 TRANS_FEAT_NONSTREAMING(RAX1, aa64_sve2_sha3, gen_gvec_fn_arg_zzz, 7132 gen_gvec_rax1, a) 7133 7134 TRANS_FEAT(FCVTNT_sh, aa64_sve2, gen_gvec_fpst_arg_zpz, 7135 gen_helper_sve2_fcvtnt_sh, a, 0, FPST_FPCR) 7136 TRANS_FEAT(FCVTNT_ds, aa64_sve2, gen_gvec_fpst_arg_zpz, 7137 gen_helper_sve2_fcvtnt_ds, a, 0, FPST_FPCR) 7138 7139 TRANS_FEAT(BFCVTNT, aa64_sve_bf16, gen_gvec_fpst_arg_zpz, 7140 gen_helper_sve_bfcvtnt, a, 0, FPST_FPCR) 7141 7142 TRANS_FEAT(FCVTLT_hs, aa64_sve2, gen_gvec_fpst_arg_zpz, 7143 gen_helper_sve2_fcvtlt_hs, a, 0, FPST_FPCR) 7144 TRANS_FEAT(FCVTLT_sd, aa64_sve2, gen_gvec_fpst_arg_zpz, 7145 gen_helper_sve2_fcvtlt_sd, a, 0, FPST_FPCR) 7146 7147 TRANS_FEAT(FCVTX_ds, aa64_sve2, do_frint_mode, a, 7148 FPROUNDING_ODD, gen_helper_sve_fcvt_ds) 7149 TRANS_FEAT(FCVTXNT_ds, aa64_sve2, do_frint_mode, a, 7150 FPROUNDING_ODD, gen_helper_sve2_fcvtnt_ds) 7151 7152 static gen_helper_gvec_3_ptr * const flogb_fns[] = { 7153 NULL, gen_helper_flogb_h, 7154 gen_helper_flogb_s, gen_helper_flogb_d 7155 }; 7156 TRANS_FEAT(FLOGB, aa64_sve2, gen_gvec_fpst_arg_zpz, flogb_fns[a->esz], 7157 a, 0, a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR) 7158 7159 static bool do_FMLAL_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sub, bool sel) 7160 { 7161 return gen_gvec_ptr_zzzz(s, gen_helper_sve2_fmlal_zzzw_s, 7162 a->rd, a->rn, a->rm, a->ra, 7163 (sel << 1) | sub, cpu_env); 7164 } 7165 7166 TRANS_FEAT(FMLALB_zzzw, aa64_sve2, do_FMLAL_zzzw, a, false, false) 7167 TRANS_FEAT(FMLALT_zzzw, aa64_sve2, do_FMLAL_zzzw, a, false, true) 7168 TRANS_FEAT(FMLSLB_zzzw, aa64_sve2, do_FMLAL_zzzw, a, true, false) 7169 TRANS_FEAT(FMLSLT_zzzw, aa64_sve2, do_FMLAL_zzzw, a, true, true) 7170 7171 static bool do_FMLAL_zzxw(DisasContext *s, arg_rrxr_esz *a, bool sub, bool sel) 7172 { 7173 return gen_gvec_ptr_zzzz(s, gen_helper_sve2_fmlal_zzxw_s, 7174 a->rd, a->rn, a->rm, a->ra, 7175 (a->index << 2) | (sel << 1) | sub, cpu_env); 7176 } 7177 7178 TRANS_FEAT(FMLALB_zzxw, aa64_sve2, do_FMLAL_zzxw, a, false, false) 7179 TRANS_FEAT(FMLALT_zzxw, aa64_sve2, do_FMLAL_zzxw, a, false, true) 7180 TRANS_FEAT(FMLSLB_zzxw, aa64_sve2, do_FMLAL_zzxw, a, true, false) 7181 TRANS_FEAT(FMLSLT_zzxw, aa64_sve2, do_FMLAL_zzxw, a, true, true) 7182 7183 TRANS_FEAT_NONSTREAMING(SMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz, 7184 gen_helper_gvec_smmla_b, a, 0) 7185 TRANS_FEAT_NONSTREAMING(USMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz, 7186 gen_helper_gvec_usmmla_b, a, 0) 7187 TRANS_FEAT_NONSTREAMING(UMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz, 7188 gen_helper_gvec_ummla_b, a, 0) 7189 7190 TRANS_FEAT(BFDOT_zzzz, aa64_sve_bf16, gen_gvec_ool_arg_zzzz, 7191 gen_helper_gvec_bfdot, a, 0) 7192 TRANS_FEAT(BFDOT_zzxz, aa64_sve_bf16, gen_gvec_ool_arg_zzxz, 7193 gen_helper_gvec_bfdot_idx, a) 7194 7195 TRANS_FEAT_NONSTREAMING(BFMMLA, aa64_sve_bf16, gen_gvec_ool_arg_zzzz, 7196 gen_helper_gvec_bfmmla, a, 0) 7197 7198 static bool do_BFMLAL_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sel) 7199 { 7200 return gen_gvec_fpst_zzzz(s, gen_helper_gvec_bfmlal, 7201 a->rd, a->rn, a->rm, a->ra, sel, FPST_FPCR); 7202 } 7203 7204 TRANS_FEAT(BFMLALB_zzzw, aa64_sve_bf16, do_BFMLAL_zzzw, a, false) 7205 TRANS_FEAT(BFMLALT_zzzw, aa64_sve_bf16, do_BFMLAL_zzzw, a, true) 7206 7207 static bool do_BFMLAL_zzxw(DisasContext *s, arg_rrxr_esz *a, bool sel) 7208 { 7209 return gen_gvec_fpst_zzzz(s, gen_helper_gvec_bfmlal_idx, 7210 a->rd, a->rn, a->rm, a->ra, 7211 (a->index << 1) | sel, FPST_FPCR); 7212 } 7213 7214 TRANS_FEAT(BFMLALB_zzxw, aa64_sve_bf16, do_BFMLAL_zzxw, a, false) 7215 TRANS_FEAT(BFMLALT_zzxw, aa64_sve_bf16, do_BFMLAL_zzxw, a, true) 7216 7217 static bool trans_PSEL(DisasContext *s, arg_psel *a) 7218 { 7219 int vl = vec_full_reg_size(s); 7220 int pl = pred_gvec_reg_size(s); 7221 int elements = vl >> a->esz; 7222 TCGv_i64 tmp, didx, dbit; 7223 TCGv_ptr ptr; 7224 7225 if (!dc_isar_feature(aa64_sme, s)) { 7226 return false; 7227 } 7228 if (!sve_access_check(s)) { 7229 return true; 7230 } 7231 7232 tmp = tcg_temp_new_i64(); 7233 dbit = tcg_temp_new_i64(); 7234 didx = tcg_temp_new_i64(); 7235 ptr = tcg_temp_new_ptr(); 7236 7237 /* Compute the predicate element. */ 7238 tcg_gen_addi_i64(tmp, cpu_reg(s, a->rv), a->imm); 7239 if (is_power_of_2(elements)) { 7240 tcg_gen_andi_i64(tmp, tmp, elements - 1); 7241 } else { 7242 tcg_gen_remu_i64(tmp, tmp, tcg_constant_i64(elements)); 7243 } 7244 7245 /* Extract the predicate byte and bit indices. */ 7246 tcg_gen_shli_i64(tmp, tmp, a->esz); 7247 tcg_gen_andi_i64(dbit, tmp, 7); 7248 tcg_gen_shri_i64(didx, tmp, 3); 7249 if (HOST_BIG_ENDIAN) { 7250 tcg_gen_xori_i64(didx, didx, 7); 7251 } 7252 7253 /* Load the predicate word. */ 7254 tcg_gen_trunc_i64_ptr(ptr, didx); 7255 tcg_gen_add_ptr(ptr, ptr, cpu_env); 7256 tcg_gen_ld8u_i64(tmp, ptr, pred_full_reg_offset(s, a->pm)); 7257 7258 /* Extract the predicate bit and replicate to MO_64. */ 7259 tcg_gen_shr_i64(tmp, tmp, dbit); 7260 tcg_gen_andi_i64(tmp, tmp, 1); 7261 tcg_gen_neg_i64(tmp, tmp); 7262 7263 /* Apply to either copy the source, or write zeros. */ 7264 tcg_gen_gvec_ands(MO_64, pred_full_reg_offset(s, a->pd), 7265 pred_full_reg_offset(s, a->pn), tmp, pl, pl); 7266 return true; 7267 } 7268 7269 static void gen_sclamp_i32(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, TCGv_i32 a) 7270 { 7271 tcg_gen_smax_i32(d, a, n); 7272 tcg_gen_smin_i32(d, d, m); 7273 } 7274 7275 static void gen_sclamp_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 a) 7276 { 7277 tcg_gen_smax_i64(d, a, n); 7278 tcg_gen_smin_i64(d, d, m); 7279 } 7280 7281 static void gen_sclamp_vec(unsigned vece, TCGv_vec d, TCGv_vec n, 7282 TCGv_vec m, TCGv_vec a) 7283 { 7284 tcg_gen_smax_vec(vece, d, a, n); 7285 tcg_gen_smin_vec(vece, d, d, m); 7286 } 7287 7288 static void gen_sclamp(unsigned vece, uint32_t d, uint32_t n, uint32_t m, 7289 uint32_t a, uint32_t oprsz, uint32_t maxsz) 7290 { 7291 static const TCGOpcode vecop[] = { 7292 INDEX_op_smin_vec, INDEX_op_smax_vec, 0 7293 }; 7294 static const GVecGen4 ops[4] = { 7295 { .fniv = gen_sclamp_vec, 7296 .fno = gen_helper_gvec_sclamp_b, 7297 .opt_opc = vecop, 7298 .vece = MO_8 }, 7299 { .fniv = gen_sclamp_vec, 7300 .fno = gen_helper_gvec_sclamp_h, 7301 .opt_opc = vecop, 7302 .vece = MO_16 }, 7303 { .fni4 = gen_sclamp_i32, 7304 .fniv = gen_sclamp_vec, 7305 .fno = gen_helper_gvec_sclamp_s, 7306 .opt_opc = vecop, 7307 .vece = MO_32 }, 7308 { .fni8 = gen_sclamp_i64, 7309 .fniv = gen_sclamp_vec, 7310 .fno = gen_helper_gvec_sclamp_d, 7311 .opt_opc = vecop, 7312 .vece = MO_64, 7313 .prefer_i64 = TCG_TARGET_REG_BITS == 64 } 7314 }; 7315 tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &ops[vece]); 7316 } 7317 7318 TRANS_FEAT(SCLAMP, aa64_sme, gen_gvec_fn_arg_zzzz, gen_sclamp, a) 7319 7320 static void gen_uclamp_i32(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, TCGv_i32 a) 7321 { 7322 tcg_gen_umax_i32(d, a, n); 7323 tcg_gen_umin_i32(d, d, m); 7324 } 7325 7326 static void gen_uclamp_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 a) 7327 { 7328 tcg_gen_umax_i64(d, a, n); 7329 tcg_gen_umin_i64(d, d, m); 7330 } 7331 7332 static void gen_uclamp_vec(unsigned vece, TCGv_vec d, TCGv_vec n, 7333 TCGv_vec m, TCGv_vec a) 7334 { 7335 tcg_gen_umax_vec(vece, d, a, n); 7336 tcg_gen_umin_vec(vece, d, d, m); 7337 } 7338 7339 static void gen_uclamp(unsigned vece, uint32_t d, uint32_t n, uint32_t m, 7340 uint32_t a, uint32_t oprsz, uint32_t maxsz) 7341 { 7342 static const TCGOpcode vecop[] = { 7343 INDEX_op_umin_vec, INDEX_op_umax_vec, 0 7344 }; 7345 static const GVecGen4 ops[4] = { 7346 { .fniv = gen_uclamp_vec, 7347 .fno = gen_helper_gvec_uclamp_b, 7348 .opt_opc = vecop, 7349 .vece = MO_8 }, 7350 { .fniv = gen_uclamp_vec, 7351 .fno = gen_helper_gvec_uclamp_h, 7352 .opt_opc = vecop, 7353 .vece = MO_16 }, 7354 { .fni4 = gen_uclamp_i32, 7355 .fniv = gen_uclamp_vec, 7356 .fno = gen_helper_gvec_uclamp_s, 7357 .opt_opc = vecop, 7358 .vece = MO_32 }, 7359 { .fni8 = gen_uclamp_i64, 7360 .fniv = gen_uclamp_vec, 7361 .fno = gen_helper_gvec_uclamp_d, 7362 .opt_opc = vecop, 7363 .vece = MO_64, 7364 .prefer_i64 = TCG_TARGET_REG_BITS == 64 } 7365 }; 7366 tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &ops[vece]); 7367 } 7368 7369 TRANS_FEAT(UCLAMP, aa64_sme, gen_gvec_fn_arg_zzzz, gen_uclamp, a) 7370