1 /* 2 * AArch64 SVE translation 3 * 4 * Copyright (c) 2018 Linaro, Ltd 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "translate.h" 22 #include "translate-a64.h" 23 #include "fpu/softfloat.h" 24 25 26 typedef void GVecGen2sFn(unsigned, uint32_t, uint32_t, 27 TCGv_i64, uint32_t, uint32_t); 28 29 typedef void gen_helper_gvec_flags_3(TCGv_i32, TCGv_ptr, TCGv_ptr, 30 TCGv_ptr, TCGv_i32); 31 typedef void gen_helper_gvec_flags_4(TCGv_i32, TCGv_ptr, TCGv_ptr, 32 TCGv_ptr, TCGv_ptr, TCGv_i32); 33 34 typedef void gen_helper_gvec_mem(TCGv_env, TCGv_ptr, TCGv_i64, TCGv_i32); 35 typedef void gen_helper_gvec_mem_scatter(TCGv_env, TCGv_ptr, TCGv_ptr, 36 TCGv_ptr, TCGv_i64, TCGv_i32); 37 38 /* 39 * Helpers for extracting complex instruction fields. 40 */ 41 42 /* See e.g. ASR (immediate, predicated). 43 * Returns -1 for unallocated encoding; diagnose later. 44 */ 45 static int tszimm_esz(DisasContext *s, int x) 46 { 47 x >>= 3; /* discard imm3 */ 48 return 31 - clz32(x); 49 } 50 51 static int tszimm_shr(DisasContext *s, int x) 52 { 53 /* 54 * We won't use the tszimm_shr() value if tszimm_esz() returns -1 (the 55 * trans function will check for esz < 0), so we can return any 56 * value we like from here in that case as long as we avoid UB. 57 */ 58 int esz = tszimm_esz(s, x); 59 if (esz < 0) { 60 return esz; 61 } 62 return (16 << esz) - x; 63 } 64 65 /* See e.g. LSL (immediate, predicated). */ 66 static int tszimm_shl(DisasContext *s, int x) 67 { 68 /* As with tszimm_shr(), value will be unused if esz < 0 */ 69 int esz = tszimm_esz(s, x); 70 if (esz < 0) { 71 return esz; 72 } 73 return x - (8 << esz); 74 } 75 76 /* The SH bit is in bit 8. Extract the low 8 and shift. */ 77 static inline int expand_imm_sh8s(DisasContext *s, int x) 78 { 79 return (int8_t)x << (x & 0x100 ? 8 : 0); 80 } 81 82 static inline int expand_imm_sh8u(DisasContext *s, int x) 83 { 84 return (uint8_t)x << (x & 0x100 ? 8 : 0); 85 } 86 87 /* Convert a 2-bit memory size (msz) to a 4-bit data type (dtype) 88 * with unsigned data. C.f. SVE Memory Contiguous Load Group. 89 */ 90 static inline int msz_dtype(DisasContext *s, int msz) 91 { 92 static const uint8_t dtype[4] = { 0, 5, 10, 15 }; 93 return dtype[msz]; 94 } 95 96 /* 97 * Include the generated decoder. 98 */ 99 100 #include "decode-sve.c.inc" 101 102 /* 103 * Implement all of the translator functions referenced by the decoder. 104 */ 105 106 /* Invoke an out-of-line helper on 2 Zregs. */ 107 static bool gen_gvec_ool_zz(DisasContext *s, gen_helper_gvec_2 *fn, 108 int rd, int rn, int data) 109 { 110 if (fn == NULL) { 111 return false; 112 } 113 if (sve_access_check(s)) { 114 unsigned vsz = vec_full_reg_size(s); 115 tcg_gen_gvec_2_ool(vec_full_reg_offset(s, rd), 116 vec_full_reg_offset(s, rn), 117 vsz, vsz, data, fn); 118 } 119 return true; 120 } 121 122 static bool gen_gvec_fpst_zz(DisasContext *s, gen_helper_gvec_2_ptr *fn, 123 int rd, int rn, int data, 124 ARMFPStatusFlavour flavour) 125 { 126 if (fn == NULL) { 127 return false; 128 } 129 if (sve_access_check(s)) { 130 unsigned vsz = vec_full_reg_size(s); 131 TCGv_ptr status = fpstatus_ptr(flavour); 132 133 tcg_gen_gvec_2_ptr(vec_full_reg_offset(s, rd), 134 vec_full_reg_offset(s, rn), 135 status, vsz, vsz, data, fn); 136 } 137 return true; 138 } 139 140 static bool gen_gvec_fpst_ah_arg_zz(DisasContext *s, gen_helper_gvec_2_ptr *fn, 141 arg_rr_esz *a, int data) 142 { 143 return gen_gvec_fpst_zz(s, fn, a->rd, a->rn, data, 144 select_ah_fpst(s, a->esz)); 145 } 146 147 /* Invoke an out-of-line helper on 3 Zregs. */ 148 static bool gen_gvec_ool_zzz(DisasContext *s, gen_helper_gvec_3 *fn, 149 int rd, int rn, int rm, int data) 150 { 151 if (fn == NULL) { 152 return false; 153 } 154 if (sve_access_check(s)) { 155 unsigned vsz = vec_full_reg_size(s); 156 tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd), 157 vec_full_reg_offset(s, rn), 158 vec_full_reg_offset(s, rm), 159 vsz, vsz, data, fn); 160 } 161 return true; 162 } 163 164 static bool gen_gvec_ool_arg_zzz(DisasContext *s, gen_helper_gvec_3 *fn, 165 arg_rrr_esz *a, int data) 166 { 167 return gen_gvec_ool_zzz(s, fn, a->rd, a->rn, a->rm, data); 168 } 169 170 /* Invoke an out-of-line helper on 3 Zregs, plus float_status. */ 171 static bool gen_gvec_fpst_zzz(DisasContext *s, gen_helper_gvec_3_ptr *fn, 172 int rd, int rn, int rm, 173 int data, ARMFPStatusFlavour flavour) 174 { 175 if (fn == NULL) { 176 return false; 177 } 178 if (sve_access_check(s)) { 179 unsigned vsz = vec_full_reg_size(s); 180 TCGv_ptr status = fpstatus_ptr(flavour); 181 182 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd), 183 vec_full_reg_offset(s, rn), 184 vec_full_reg_offset(s, rm), 185 status, vsz, vsz, data, fn); 186 } 187 return true; 188 } 189 190 static bool gen_gvec_fpst_arg_zzz(DisasContext *s, gen_helper_gvec_3_ptr *fn, 191 arg_rrr_esz *a, int data) 192 { 193 return gen_gvec_fpst_zzz(s, fn, a->rd, a->rn, a->rm, data, 194 a->esz == MO_16 ? FPST_A64_F16 : FPST_A64); 195 } 196 197 static bool gen_gvec_fpst_ah_arg_zzz(DisasContext *s, gen_helper_gvec_3_ptr *fn, 198 arg_rrr_esz *a, int data) 199 { 200 return gen_gvec_fpst_zzz(s, fn, a->rd, a->rn, a->rm, data, 201 select_ah_fpst(s, a->esz)); 202 } 203 204 /* Invoke an out-of-line helper on 4 Zregs. */ 205 static bool gen_gvec_ool_zzzz(DisasContext *s, gen_helper_gvec_4 *fn, 206 int rd, int rn, int rm, int ra, int data) 207 { 208 if (fn == NULL) { 209 return false; 210 } 211 if (sve_access_check(s)) { 212 unsigned vsz = vec_full_reg_size(s); 213 tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd), 214 vec_full_reg_offset(s, rn), 215 vec_full_reg_offset(s, rm), 216 vec_full_reg_offset(s, ra), 217 vsz, vsz, data, fn); 218 } 219 return true; 220 } 221 222 static bool gen_gvec_ool_arg_zzzz(DisasContext *s, gen_helper_gvec_4 *fn, 223 arg_rrrr_esz *a, int data) 224 { 225 return gen_gvec_ool_zzzz(s, fn, a->rd, a->rn, a->rm, a->ra, data); 226 } 227 228 static bool gen_gvec_ool_arg_zzxz(DisasContext *s, gen_helper_gvec_4 *fn, 229 arg_rrxr_esz *a) 230 { 231 return gen_gvec_ool_zzzz(s, fn, a->rd, a->rn, a->rm, a->ra, a->index); 232 } 233 234 /* Invoke an out-of-line helper on 4 Zregs, plus a pointer. */ 235 static bool gen_gvec_ptr_zzzz(DisasContext *s, gen_helper_gvec_4_ptr *fn, 236 int rd, int rn, int rm, int ra, 237 int data, TCGv_ptr ptr) 238 { 239 if (fn == NULL) { 240 return false; 241 } 242 if (sve_access_check(s)) { 243 unsigned vsz = vec_full_reg_size(s); 244 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, rd), 245 vec_full_reg_offset(s, rn), 246 vec_full_reg_offset(s, rm), 247 vec_full_reg_offset(s, ra), 248 ptr, vsz, vsz, data, fn); 249 } 250 return true; 251 } 252 253 static bool gen_gvec_fpst_zzzz(DisasContext *s, gen_helper_gvec_4_ptr *fn, 254 int rd, int rn, int rm, int ra, 255 int data, ARMFPStatusFlavour flavour) 256 { 257 TCGv_ptr status = fpstatus_ptr(flavour); 258 bool ret = gen_gvec_ptr_zzzz(s, fn, rd, rn, rm, ra, data, status); 259 return ret; 260 } 261 262 static bool gen_gvec_env_zzzz(DisasContext *s, gen_helper_gvec_4_ptr *fn, 263 int rd, int rn, int rm, int ra, 264 int data) 265 { 266 return gen_gvec_ptr_zzzz(s, fn, rd, rn, rm, ra, data, tcg_env); 267 } 268 269 static bool gen_gvec_env_arg_zzzz(DisasContext *s, gen_helper_gvec_4_ptr *fn, 270 arg_rrrr_esz *a, int data) 271 { 272 return gen_gvec_env_zzzz(s, fn, a->rd, a->rn, a->rm, a->ra, data); 273 } 274 275 static bool gen_gvec_env_arg_zzxz(DisasContext *s, gen_helper_gvec_4_ptr *fn, 276 arg_rrxr_esz *a) 277 { 278 return gen_gvec_env_zzzz(s, fn, a->rd, a->rn, a->rm, a->ra, a->index); 279 } 280 281 /* Invoke an out-of-line helper on 4 Zregs, 1 Preg, plus fpst. */ 282 static bool gen_gvec_fpst_zzzzp(DisasContext *s, gen_helper_gvec_5_ptr *fn, 283 int rd, int rn, int rm, int ra, int pg, 284 int data, ARMFPStatusFlavour flavour) 285 { 286 if (fn == NULL) { 287 return false; 288 } 289 if (sve_access_check(s)) { 290 unsigned vsz = vec_full_reg_size(s); 291 TCGv_ptr status = fpstatus_ptr(flavour); 292 293 tcg_gen_gvec_5_ptr(vec_full_reg_offset(s, rd), 294 vec_full_reg_offset(s, rn), 295 vec_full_reg_offset(s, rm), 296 vec_full_reg_offset(s, ra), 297 pred_full_reg_offset(s, pg), 298 status, vsz, vsz, data, fn); 299 } 300 return true; 301 } 302 303 /* Invoke an out-of-line helper on 2 Zregs and a predicate. */ 304 static bool gen_gvec_ool_zzp(DisasContext *s, gen_helper_gvec_3 *fn, 305 int rd, int rn, int pg, int data) 306 { 307 if (fn == NULL) { 308 return false; 309 } 310 if (sve_access_check(s)) { 311 unsigned vsz = vec_full_reg_size(s); 312 tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd), 313 vec_full_reg_offset(s, rn), 314 pred_full_reg_offset(s, pg), 315 vsz, vsz, data, fn); 316 } 317 return true; 318 } 319 320 static bool gen_gvec_ool_arg_zpz(DisasContext *s, gen_helper_gvec_3 *fn, 321 arg_rpr_esz *a, int data) 322 { 323 return gen_gvec_ool_zzp(s, fn, a->rd, a->rn, a->pg, data); 324 } 325 326 static bool gen_gvec_ool_arg_zpzi(DisasContext *s, gen_helper_gvec_3 *fn, 327 arg_rpri_esz *a) 328 { 329 return gen_gvec_ool_zzp(s, fn, a->rd, a->rn, a->pg, a->imm); 330 } 331 332 static bool gen_gvec_fpst_zzp(DisasContext *s, gen_helper_gvec_3_ptr *fn, 333 int rd, int rn, int pg, int data, 334 ARMFPStatusFlavour flavour) 335 { 336 if (fn == NULL) { 337 return false; 338 } 339 if (sve_access_check(s)) { 340 unsigned vsz = vec_full_reg_size(s); 341 TCGv_ptr status = fpstatus_ptr(flavour); 342 343 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd), 344 vec_full_reg_offset(s, rn), 345 pred_full_reg_offset(s, pg), 346 status, vsz, vsz, data, fn); 347 } 348 return true; 349 } 350 351 static bool gen_gvec_fpst_arg_zpz(DisasContext *s, gen_helper_gvec_3_ptr *fn, 352 arg_rpr_esz *a, int data, 353 ARMFPStatusFlavour flavour) 354 { 355 return gen_gvec_fpst_zzp(s, fn, a->rd, a->rn, a->pg, data, flavour); 356 } 357 358 /* Invoke an out-of-line helper on 3 Zregs and a predicate. */ 359 static bool gen_gvec_ool_zzzp(DisasContext *s, gen_helper_gvec_4 *fn, 360 int rd, int rn, int rm, int pg, int data) 361 { 362 if (fn == NULL) { 363 return false; 364 } 365 if (sve_access_check(s)) { 366 unsigned vsz = vec_full_reg_size(s); 367 tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd), 368 vec_full_reg_offset(s, rn), 369 vec_full_reg_offset(s, rm), 370 pred_full_reg_offset(s, pg), 371 vsz, vsz, data, fn); 372 } 373 return true; 374 } 375 376 static bool gen_gvec_ool_arg_zpzz(DisasContext *s, gen_helper_gvec_4 *fn, 377 arg_rprr_esz *a, int data) 378 { 379 return gen_gvec_ool_zzzp(s, fn, a->rd, a->rn, a->rm, a->pg, data); 380 } 381 382 /* Invoke an out-of-line helper on 3 Zregs and a predicate. */ 383 static bool gen_gvec_fpst_zzzp(DisasContext *s, gen_helper_gvec_4_ptr *fn, 384 int rd, int rn, int rm, int pg, int data, 385 ARMFPStatusFlavour flavour) 386 { 387 if (fn == NULL) { 388 return false; 389 } 390 if (sve_access_check(s)) { 391 unsigned vsz = vec_full_reg_size(s); 392 TCGv_ptr status = fpstatus_ptr(flavour); 393 394 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, rd), 395 vec_full_reg_offset(s, rn), 396 vec_full_reg_offset(s, rm), 397 pred_full_reg_offset(s, pg), 398 status, vsz, vsz, data, fn); 399 } 400 return true; 401 } 402 403 static bool gen_gvec_fpst_arg_zpzz(DisasContext *s, gen_helper_gvec_4_ptr *fn, 404 arg_rprr_esz *a) 405 { 406 return gen_gvec_fpst_zzzp(s, fn, a->rd, a->rn, a->rm, a->pg, 0, 407 a->esz == MO_16 ? FPST_A64_F16 : FPST_A64); 408 } 409 410 /* Invoke a vector expander on two Zregs and an immediate. */ 411 static bool gen_gvec_fn_zzi(DisasContext *s, GVecGen2iFn *gvec_fn, 412 int esz, int rd, int rn, uint64_t imm) 413 { 414 if (gvec_fn == NULL) { 415 return false; 416 } 417 if (sve_access_check(s)) { 418 unsigned vsz = vec_full_reg_size(s); 419 gvec_fn(esz, vec_full_reg_offset(s, rd), 420 vec_full_reg_offset(s, rn), imm, vsz, vsz); 421 } 422 return true; 423 } 424 425 static bool gen_gvec_fn_arg_zzi(DisasContext *s, GVecGen2iFn *gvec_fn, 426 arg_rri_esz *a) 427 { 428 if (a->esz < 0) { 429 /* Invalid tsz encoding -- see tszimm_esz. */ 430 return false; 431 } 432 return gen_gvec_fn_zzi(s, gvec_fn, a->esz, a->rd, a->rn, a->imm); 433 } 434 435 /* Invoke a vector expander on three Zregs. */ 436 static bool gen_gvec_fn_zzz(DisasContext *s, GVecGen3Fn *gvec_fn, 437 int esz, int rd, int rn, int rm) 438 { 439 if (gvec_fn == NULL) { 440 return false; 441 } 442 if (sve_access_check(s)) { 443 unsigned vsz = vec_full_reg_size(s); 444 gvec_fn(esz, vec_full_reg_offset(s, rd), 445 vec_full_reg_offset(s, rn), 446 vec_full_reg_offset(s, rm), vsz, vsz); 447 } 448 return true; 449 } 450 451 static bool gen_gvec_fn_arg_zzz(DisasContext *s, GVecGen3Fn *fn, 452 arg_rrr_esz *a) 453 { 454 return gen_gvec_fn_zzz(s, fn, a->esz, a->rd, a->rn, a->rm); 455 } 456 457 /* Invoke a vector expander on four Zregs. */ 458 static bool gen_gvec_fn_arg_zzzz(DisasContext *s, GVecGen4Fn *gvec_fn, 459 arg_rrrr_esz *a) 460 { 461 if (gvec_fn == NULL) { 462 return false; 463 } 464 if (sve_access_check(s)) { 465 unsigned vsz = vec_full_reg_size(s); 466 gvec_fn(a->esz, vec_full_reg_offset(s, a->rd), 467 vec_full_reg_offset(s, a->rn), 468 vec_full_reg_offset(s, a->rm), 469 vec_full_reg_offset(s, a->ra), vsz, vsz); 470 } 471 return true; 472 } 473 474 /* Invoke a vector move on two Zregs. */ 475 static bool do_mov_z(DisasContext *s, int rd, int rn) 476 { 477 if (sve_access_check(s)) { 478 unsigned vsz = vec_full_reg_size(s); 479 tcg_gen_gvec_mov(MO_8, vec_full_reg_offset(s, rd), 480 vec_full_reg_offset(s, rn), vsz, vsz); 481 } 482 return true; 483 } 484 485 /* Initialize a Zreg with replications of a 64-bit immediate. */ 486 static void do_dupi_z(DisasContext *s, int rd, uint64_t word) 487 { 488 unsigned vsz = vec_full_reg_size(s); 489 tcg_gen_gvec_dup_imm(MO_64, vec_full_reg_offset(s, rd), vsz, vsz, word); 490 } 491 492 /* Invoke a vector expander on three Pregs. */ 493 static bool gen_gvec_fn_ppp(DisasContext *s, GVecGen3Fn *gvec_fn, 494 int rd, int rn, int rm) 495 { 496 if (sve_access_check(s)) { 497 unsigned psz = pred_gvec_reg_size(s); 498 gvec_fn(MO_64, pred_full_reg_offset(s, rd), 499 pred_full_reg_offset(s, rn), 500 pred_full_reg_offset(s, rm), psz, psz); 501 } 502 return true; 503 } 504 505 /* Invoke a vector move on two Pregs. */ 506 static bool do_mov_p(DisasContext *s, int rd, int rn) 507 { 508 if (sve_access_check(s)) { 509 unsigned psz = pred_gvec_reg_size(s); 510 tcg_gen_gvec_mov(MO_8, pred_full_reg_offset(s, rd), 511 pred_full_reg_offset(s, rn), psz, psz); 512 } 513 return true; 514 } 515 516 /* Set the cpu flags as per a return from an SVE helper. */ 517 static void do_pred_flags(TCGv_i32 t) 518 { 519 tcg_gen_mov_i32(cpu_NF, t); 520 tcg_gen_andi_i32(cpu_ZF, t, 2); 521 tcg_gen_andi_i32(cpu_CF, t, 1); 522 tcg_gen_movi_i32(cpu_VF, 0); 523 } 524 525 /* Subroutines computing the ARM PredTest psuedofunction. */ 526 static void do_predtest1(TCGv_i64 d, TCGv_i64 g) 527 { 528 TCGv_i32 t = tcg_temp_new_i32(); 529 530 gen_helper_sve_predtest1(t, d, g); 531 do_pred_flags(t); 532 } 533 534 static void do_predtest(DisasContext *s, int dofs, int gofs, int words) 535 { 536 TCGv_ptr dptr = tcg_temp_new_ptr(); 537 TCGv_ptr gptr = tcg_temp_new_ptr(); 538 TCGv_i32 t = tcg_temp_new_i32(); 539 540 tcg_gen_addi_ptr(dptr, tcg_env, dofs); 541 tcg_gen_addi_ptr(gptr, tcg_env, gofs); 542 543 gen_helper_sve_predtest(t, dptr, gptr, tcg_constant_i32(words)); 544 545 do_pred_flags(t); 546 } 547 548 /* For each element size, the bits within a predicate word that are active. */ 549 const uint64_t pred_esz_masks[5] = { 550 0xffffffffffffffffull, 0x5555555555555555ull, 551 0x1111111111111111ull, 0x0101010101010101ull, 552 0x0001000100010001ull, 553 }; 554 555 static bool trans_INVALID(DisasContext *s, arg_INVALID *a) 556 { 557 unallocated_encoding(s); 558 return true; 559 } 560 561 /* 562 *** SVE Logical - Unpredicated Group 563 */ 564 565 TRANS_FEAT(AND_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_and, a) 566 TRANS_FEAT(ORR_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_or, a) 567 TRANS_FEAT(EOR_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_xor, a) 568 TRANS_FEAT(BIC_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_andc, a) 569 570 static bool trans_XAR(DisasContext *s, arg_rrri_esz *a) 571 { 572 if (a->esz < 0 || !dc_isar_feature(aa64_sve2, s)) { 573 return false; 574 } 575 if (sve_access_check(s)) { 576 unsigned vsz = vec_full_reg_size(s); 577 gen_gvec_xar(a->esz, vec_full_reg_offset(s, a->rd), 578 vec_full_reg_offset(s, a->rn), 579 vec_full_reg_offset(s, a->rm), a->imm, vsz, vsz); 580 } 581 return true; 582 } 583 584 TRANS_FEAT(EOR3, aa64_sve2, gen_gvec_fn_arg_zzzz, gen_gvec_eor3, a) 585 TRANS_FEAT(BCAX, aa64_sve2, gen_gvec_fn_arg_zzzz, gen_gvec_bcax, a) 586 587 static void gen_bsl(unsigned vece, uint32_t d, uint32_t n, uint32_t m, 588 uint32_t a, uint32_t oprsz, uint32_t maxsz) 589 { 590 /* BSL differs from the generic bitsel in argument ordering. */ 591 tcg_gen_gvec_bitsel(vece, d, a, n, m, oprsz, maxsz); 592 } 593 594 TRANS_FEAT(BSL, aa64_sve2, gen_gvec_fn_arg_zzzz, gen_bsl, a) 595 596 static void gen_bsl1n_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k) 597 { 598 tcg_gen_andc_i64(n, k, n); 599 tcg_gen_andc_i64(m, m, k); 600 tcg_gen_or_i64(d, n, m); 601 } 602 603 static void gen_bsl1n_vec(unsigned vece, TCGv_vec d, TCGv_vec n, 604 TCGv_vec m, TCGv_vec k) 605 { 606 tcg_gen_not_vec(vece, n, n); 607 tcg_gen_bitsel_vec(vece, d, k, n, m); 608 } 609 610 static void gen_bsl1n(unsigned vece, uint32_t d, uint32_t n, uint32_t m, 611 uint32_t a, uint32_t oprsz, uint32_t maxsz) 612 { 613 static const GVecGen4 op = { 614 .fni8 = gen_bsl1n_i64, 615 .fniv = gen_bsl1n_vec, 616 .fno = gen_helper_sve2_bsl1n, 617 .vece = MO_64, 618 .prefer_i64 = TCG_TARGET_REG_BITS == 64, 619 }; 620 tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op); 621 } 622 623 TRANS_FEAT(BSL1N, aa64_sve2, gen_gvec_fn_arg_zzzz, gen_bsl1n, a) 624 625 static void gen_bsl2n_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k) 626 { 627 /* 628 * Z[dn] = (n & k) | (~m & ~k) 629 * = | ~(m | k) 630 */ 631 tcg_gen_and_i64(n, n, k); 632 if (tcg_op_supported(INDEX_op_orc_i64, TCG_TYPE_I64, 0)) { 633 tcg_gen_or_i64(m, m, k); 634 tcg_gen_orc_i64(d, n, m); 635 } else { 636 tcg_gen_nor_i64(m, m, k); 637 tcg_gen_or_i64(d, n, m); 638 } 639 } 640 641 static void gen_bsl2n_vec(unsigned vece, TCGv_vec d, TCGv_vec n, 642 TCGv_vec m, TCGv_vec k) 643 { 644 tcg_gen_not_vec(vece, m, m); 645 tcg_gen_bitsel_vec(vece, d, k, n, m); 646 } 647 648 static void gen_bsl2n(unsigned vece, uint32_t d, uint32_t n, uint32_t m, 649 uint32_t a, uint32_t oprsz, uint32_t maxsz) 650 { 651 static const GVecGen4 op = { 652 .fni8 = gen_bsl2n_i64, 653 .fniv = gen_bsl2n_vec, 654 .fno = gen_helper_sve2_bsl2n, 655 .vece = MO_64, 656 .prefer_i64 = TCG_TARGET_REG_BITS == 64, 657 }; 658 tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op); 659 } 660 661 TRANS_FEAT(BSL2N, aa64_sve2, gen_gvec_fn_arg_zzzz, gen_bsl2n, a) 662 663 static void gen_nbsl_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k) 664 { 665 tcg_gen_and_i64(n, n, k); 666 tcg_gen_andc_i64(m, m, k); 667 tcg_gen_nor_i64(d, n, m); 668 } 669 670 static void gen_nbsl_vec(unsigned vece, TCGv_vec d, TCGv_vec n, 671 TCGv_vec m, TCGv_vec k) 672 { 673 tcg_gen_bitsel_vec(vece, d, k, n, m); 674 tcg_gen_not_vec(vece, d, d); 675 } 676 677 static void gen_nbsl(unsigned vece, uint32_t d, uint32_t n, uint32_t m, 678 uint32_t a, uint32_t oprsz, uint32_t maxsz) 679 { 680 static const GVecGen4 op = { 681 .fni8 = gen_nbsl_i64, 682 .fniv = gen_nbsl_vec, 683 .fno = gen_helper_sve2_nbsl, 684 .vece = MO_64, 685 .prefer_i64 = TCG_TARGET_REG_BITS == 64, 686 }; 687 tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op); 688 } 689 690 TRANS_FEAT(NBSL, aa64_sve2, gen_gvec_fn_arg_zzzz, gen_nbsl, a) 691 692 /* 693 *** SVE Integer Arithmetic - Unpredicated Group 694 */ 695 696 TRANS_FEAT(ADD_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_add, a) 697 TRANS_FEAT(SUB_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_sub, a) 698 TRANS_FEAT(SQADD_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_ssadd, a) 699 TRANS_FEAT(SQSUB_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_sssub, a) 700 TRANS_FEAT(UQADD_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_usadd, a) 701 TRANS_FEAT(UQSUB_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_ussub, a) 702 703 /* 704 *** SVE Integer Arithmetic - Binary Predicated Group 705 */ 706 707 /* Select active elememnts from Zn and inactive elements from Zm, 708 * storing the result in Zd. 709 */ 710 static bool do_sel_z(DisasContext *s, int rd, int rn, int rm, int pg, int esz) 711 { 712 static gen_helper_gvec_4 * const fns[4] = { 713 gen_helper_sve_sel_zpzz_b, gen_helper_sve_sel_zpzz_h, 714 gen_helper_sve_sel_zpzz_s, gen_helper_sve_sel_zpzz_d 715 }; 716 return gen_gvec_ool_zzzp(s, fns[esz], rd, rn, rm, pg, 0); 717 } 718 719 #define DO_ZPZZ(NAME, FEAT, name) \ 720 static gen_helper_gvec_4 * const name##_zpzz_fns[4] = { \ 721 gen_helper_##name##_zpzz_b, gen_helper_##name##_zpzz_h, \ 722 gen_helper_##name##_zpzz_s, gen_helper_##name##_zpzz_d, \ 723 }; \ 724 TRANS_FEAT(NAME, FEAT, gen_gvec_ool_arg_zpzz, \ 725 name##_zpzz_fns[a->esz], a, 0) 726 727 DO_ZPZZ(AND_zpzz, aa64_sve, sve_and) 728 DO_ZPZZ(EOR_zpzz, aa64_sve, sve_eor) 729 DO_ZPZZ(ORR_zpzz, aa64_sve, sve_orr) 730 DO_ZPZZ(BIC_zpzz, aa64_sve, sve_bic) 731 732 DO_ZPZZ(ADD_zpzz, aa64_sve, sve_add) 733 DO_ZPZZ(SUB_zpzz, aa64_sve, sve_sub) 734 735 DO_ZPZZ(SMAX_zpzz, aa64_sve, sve_smax) 736 DO_ZPZZ(UMAX_zpzz, aa64_sve, sve_umax) 737 DO_ZPZZ(SMIN_zpzz, aa64_sve, sve_smin) 738 DO_ZPZZ(UMIN_zpzz, aa64_sve, sve_umin) 739 DO_ZPZZ(SABD_zpzz, aa64_sve, sve_sabd) 740 DO_ZPZZ(UABD_zpzz, aa64_sve, sve_uabd) 741 742 DO_ZPZZ(MUL_zpzz, aa64_sve, sve_mul) 743 DO_ZPZZ(SMULH_zpzz, aa64_sve, sve_smulh) 744 DO_ZPZZ(UMULH_zpzz, aa64_sve, sve_umulh) 745 746 DO_ZPZZ(ASR_zpzz, aa64_sve, sve_asr) 747 DO_ZPZZ(LSR_zpzz, aa64_sve, sve_lsr) 748 DO_ZPZZ(LSL_zpzz, aa64_sve, sve_lsl) 749 750 static gen_helper_gvec_4 * const sdiv_fns[4] = { 751 NULL, NULL, gen_helper_sve_sdiv_zpzz_s, gen_helper_sve_sdiv_zpzz_d 752 }; 753 TRANS_FEAT(SDIV_zpzz, aa64_sve, gen_gvec_ool_arg_zpzz, sdiv_fns[a->esz], a, 0) 754 755 static gen_helper_gvec_4 * const udiv_fns[4] = { 756 NULL, NULL, gen_helper_sve_udiv_zpzz_s, gen_helper_sve_udiv_zpzz_d 757 }; 758 TRANS_FEAT(UDIV_zpzz, aa64_sve, gen_gvec_ool_arg_zpzz, udiv_fns[a->esz], a, 0) 759 760 TRANS_FEAT(SEL_zpzz, aa64_sve, do_sel_z, a->rd, a->rn, a->rm, a->pg, a->esz) 761 762 /* 763 *** SVE Integer Arithmetic - Unary Predicated Group 764 */ 765 766 #define DO_ZPZ(NAME, FEAT, name) \ 767 static gen_helper_gvec_3 * const name##_fns[4] = { \ 768 gen_helper_##name##_b, gen_helper_##name##_h, \ 769 gen_helper_##name##_s, gen_helper_##name##_d, \ 770 }; \ 771 TRANS_FEAT(NAME, FEAT, gen_gvec_ool_arg_zpz, name##_fns[a->esz], a, 0) 772 773 DO_ZPZ(CLS, aa64_sve, sve_cls) 774 DO_ZPZ(CLZ, aa64_sve, sve_clz) 775 DO_ZPZ(CNT_zpz, aa64_sve, sve_cnt_zpz) 776 DO_ZPZ(CNOT, aa64_sve, sve_cnot) 777 DO_ZPZ(NOT_zpz, aa64_sve, sve_not_zpz) 778 DO_ZPZ(ABS, aa64_sve, sve_abs) 779 DO_ZPZ(NEG, aa64_sve, sve_neg) 780 DO_ZPZ(RBIT, aa64_sve, sve_rbit) 781 782 static gen_helper_gvec_3 * const fabs_fns[4] = { 783 NULL, gen_helper_sve_fabs_h, 784 gen_helper_sve_fabs_s, gen_helper_sve_fabs_d, 785 }; 786 static gen_helper_gvec_3 * const fabs_ah_fns[4] = { 787 NULL, gen_helper_sve_ah_fabs_h, 788 gen_helper_sve_ah_fabs_s, gen_helper_sve_ah_fabs_d, 789 }; 790 TRANS_FEAT(FABS, aa64_sve, gen_gvec_ool_arg_zpz, 791 s->fpcr_ah ? fabs_ah_fns[a->esz] : fabs_fns[a->esz], a, 0) 792 793 static gen_helper_gvec_3 * const fneg_fns[4] = { 794 NULL, gen_helper_sve_fneg_h, 795 gen_helper_sve_fneg_s, gen_helper_sve_fneg_d, 796 }; 797 static gen_helper_gvec_3 * const fneg_ah_fns[4] = { 798 NULL, gen_helper_sve_ah_fneg_h, 799 gen_helper_sve_ah_fneg_s, gen_helper_sve_ah_fneg_d, 800 }; 801 TRANS_FEAT(FNEG, aa64_sve, gen_gvec_ool_arg_zpz, 802 s->fpcr_ah ? fneg_ah_fns[a->esz] : fneg_fns[a->esz], a, 0) 803 804 static gen_helper_gvec_3 * const sxtb_fns[4] = { 805 NULL, gen_helper_sve_sxtb_h, 806 gen_helper_sve_sxtb_s, gen_helper_sve_sxtb_d, 807 }; 808 TRANS_FEAT(SXTB, aa64_sve, gen_gvec_ool_arg_zpz, sxtb_fns[a->esz], a, 0) 809 810 static gen_helper_gvec_3 * const uxtb_fns[4] = { 811 NULL, gen_helper_sve_uxtb_h, 812 gen_helper_sve_uxtb_s, gen_helper_sve_uxtb_d, 813 }; 814 TRANS_FEAT(UXTB, aa64_sve, gen_gvec_ool_arg_zpz, uxtb_fns[a->esz], a, 0) 815 816 static gen_helper_gvec_3 * const sxth_fns[4] = { 817 NULL, NULL, gen_helper_sve_sxth_s, gen_helper_sve_sxth_d 818 }; 819 TRANS_FEAT(SXTH, aa64_sve, gen_gvec_ool_arg_zpz, sxth_fns[a->esz], a, 0) 820 821 static gen_helper_gvec_3 * const uxth_fns[4] = { 822 NULL, NULL, gen_helper_sve_uxth_s, gen_helper_sve_uxth_d 823 }; 824 TRANS_FEAT(UXTH, aa64_sve, gen_gvec_ool_arg_zpz, uxth_fns[a->esz], a, 0) 825 826 TRANS_FEAT(SXTW, aa64_sve, gen_gvec_ool_arg_zpz, 827 a->esz == 3 ? gen_helper_sve_sxtw_d : NULL, a, 0) 828 TRANS_FEAT(UXTW, aa64_sve, gen_gvec_ool_arg_zpz, 829 a->esz == 3 ? gen_helper_sve_uxtw_d : NULL, a, 0) 830 831 /* 832 *** SVE Integer Reduction Group 833 */ 834 835 typedef void gen_helper_gvec_reduc(TCGv_i64, TCGv_ptr, TCGv_ptr, TCGv_i32); 836 static bool do_vpz_ool(DisasContext *s, arg_rpr_esz *a, 837 gen_helper_gvec_reduc *fn) 838 { 839 unsigned vsz = vec_full_reg_size(s); 840 TCGv_ptr t_zn, t_pg; 841 TCGv_i32 desc; 842 TCGv_i64 temp; 843 844 if (fn == NULL) { 845 return false; 846 } 847 if (!sve_access_check(s)) { 848 return true; 849 } 850 851 desc = tcg_constant_i32(simd_desc(vsz, vsz, 0)); 852 temp = tcg_temp_new_i64(); 853 t_zn = tcg_temp_new_ptr(); 854 t_pg = tcg_temp_new_ptr(); 855 856 tcg_gen_addi_ptr(t_zn, tcg_env, vec_full_reg_offset(s, a->rn)); 857 tcg_gen_addi_ptr(t_pg, tcg_env, pred_full_reg_offset(s, a->pg)); 858 fn(temp, t_zn, t_pg, desc); 859 860 write_fp_dreg(s, a->rd, temp); 861 return true; 862 } 863 864 #define DO_VPZ(NAME, name) \ 865 static gen_helper_gvec_reduc * const name##_fns[4] = { \ 866 gen_helper_sve_##name##_b, gen_helper_sve_##name##_h, \ 867 gen_helper_sve_##name##_s, gen_helper_sve_##name##_d, \ 868 }; \ 869 TRANS_FEAT(NAME, aa64_sve, do_vpz_ool, a, name##_fns[a->esz]) 870 871 DO_VPZ(ORV, orv) 872 DO_VPZ(ANDV, andv) 873 DO_VPZ(EORV, eorv) 874 875 DO_VPZ(UADDV, uaddv) 876 DO_VPZ(SMAXV, smaxv) 877 DO_VPZ(UMAXV, umaxv) 878 DO_VPZ(SMINV, sminv) 879 DO_VPZ(UMINV, uminv) 880 881 static gen_helper_gvec_reduc * const saddv_fns[4] = { 882 gen_helper_sve_saddv_b, gen_helper_sve_saddv_h, 883 gen_helper_sve_saddv_s, NULL 884 }; 885 TRANS_FEAT(SADDV, aa64_sve, do_vpz_ool, a, saddv_fns[a->esz]) 886 887 #undef DO_VPZ 888 889 /* 890 *** SVE Shift by Immediate - Predicated Group 891 */ 892 893 /* 894 * Copy Zn into Zd, storing zeros into inactive elements. 895 * If invert, store zeros into the active elements. 896 */ 897 static bool do_movz_zpz(DisasContext *s, int rd, int rn, int pg, 898 int esz, bool invert) 899 { 900 static gen_helper_gvec_3 * const fns[4] = { 901 gen_helper_sve_movz_b, gen_helper_sve_movz_h, 902 gen_helper_sve_movz_s, gen_helper_sve_movz_d, 903 }; 904 return gen_gvec_ool_zzp(s, fns[esz], rd, rn, pg, invert); 905 } 906 907 static bool do_shift_zpzi(DisasContext *s, arg_rpri_esz *a, bool asr, 908 gen_helper_gvec_3 * const fns[4]) 909 { 910 int max; 911 912 if (a->esz < 0) { 913 /* Invalid tsz encoding -- see tszimm_esz. */ 914 return false; 915 } 916 917 /* 918 * Shift by element size is architecturally valid. 919 * For arithmetic right-shift, it's the same as by one less. 920 * For logical shifts and ASRD, it is a zeroing operation. 921 */ 922 max = 8 << a->esz; 923 if (a->imm >= max) { 924 if (asr) { 925 a->imm = max - 1; 926 } else { 927 return do_movz_zpz(s, a->rd, a->rd, a->pg, a->esz, true); 928 } 929 } 930 return gen_gvec_ool_arg_zpzi(s, fns[a->esz], a); 931 } 932 933 static gen_helper_gvec_3 * const asr_zpzi_fns[4] = { 934 gen_helper_sve_asr_zpzi_b, gen_helper_sve_asr_zpzi_h, 935 gen_helper_sve_asr_zpzi_s, gen_helper_sve_asr_zpzi_d, 936 }; 937 TRANS_FEAT(ASR_zpzi, aa64_sve, do_shift_zpzi, a, true, asr_zpzi_fns) 938 939 static gen_helper_gvec_3 * const lsr_zpzi_fns[4] = { 940 gen_helper_sve_lsr_zpzi_b, gen_helper_sve_lsr_zpzi_h, 941 gen_helper_sve_lsr_zpzi_s, gen_helper_sve_lsr_zpzi_d, 942 }; 943 TRANS_FEAT(LSR_zpzi, aa64_sve, do_shift_zpzi, a, false, lsr_zpzi_fns) 944 945 static gen_helper_gvec_3 * const lsl_zpzi_fns[4] = { 946 gen_helper_sve_lsl_zpzi_b, gen_helper_sve_lsl_zpzi_h, 947 gen_helper_sve_lsl_zpzi_s, gen_helper_sve_lsl_zpzi_d, 948 }; 949 TRANS_FEAT(LSL_zpzi, aa64_sve, do_shift_zpzi, a, false, lsl_zpzi_fns) 950 951 static gen_helper_gvec_3 * const asrd_fns[4] = { 952 gen_helper_sve_asrd_b, gen_helper_sve_asrd_h, 953 gen_helper_sve_asrd_s, gen_helper_sve_asrd_d, 954 }; 955 TRANS_FEAT(ASRD, aa64_sve, do_shift_zpzi, a, false, asrd_fns) 956 957 static gen_helper_gvec_3 * const sqshl_zpzi_fns[4] = { 958 gen_helper_sve2_sqshl_zpzi_b, gen_helper_sve2_sqshl_zpzi_h, 959 gen_helper_sve2_sqshl_zpzi_s, gen_helper_sve2_sqshl_zpzi_d, 960 }; 961 TRANS_FEAT(SQSHL_zpzi, aa64_sve2, gen_gvec_ool_arg_zpzi, 962 a->esz < 0 ? NULL : sqshl_zpzi_fns[a->esz], a) 963 964 static gen_helper_gvec_3 * const uqshl_zpzi_fns[4] = { 965 gen_helper_sve2_uqshl_zpzi_b, gen_helper_sve2_uqshl_zpzi_h, 966 gen_helper_sve2_uqshl_zpzi_s, gen_helper_sve2_uqshl_zpzi_d, 967 }; 968 TRANS_FEAT(UQSHL_zpzi, aa64_sve2, gen_gvec_ool_arg_zpzi, 969 a->esz < 0 ? NULL : uqshl_zpzi_fns[a->esz], a) 970 971 static gen_helper_gvec_3 * const srshr_fns[4] = { 972 gen_helper_sve2_srshr_b, gen_helper_sve2_srshr_h, 973 gen_helper_sve2_srshr_s, gen_helper_sve2_srshr_d, 974 }; 975 TRANS_FEAT(SRSHR, aa64_sve2, gen_gvec_ool_arg_zpzi, 976 a->esz < 0 ? NULL : srshr_fns[a->esz], a) 977 978 static gen_helper_gvec_3 * const urshr_fns[4] = { 979 gen_helper_sve2_urshr_b, gen_helper_sve2_urshr_h, 980 gen_helper_sve2_urshr_s, gen_helper_sve2_urshr_d, 981 }; 982 TRANS_FEAT(URSHR, aa64_sve2, gen_gvec_ool_arg_zpzi, 983 a->esz < 0 ? NULL : urshr_fns[a->esz], a) 984 985 static gen_helper_gvec_3 * const sqshlu_fns[4] = { 986 gen_helper_sve2_sqshlu_b, gen_helper_sve2_sqshlu_h, 987 gen_helper_sve2_sqshlu_s, gen_helper_sve2_sqshlu_d, 988 }; 989 TRANS_FEAT(SQSHLU, aa64_sve2, gen_gvec_ool_arg_zpzi, 990 a->esz < 0 ? NULL : sqshlu_fns[a->esz], a) 991 992 /* 993 *** SVE Bitwise Shift - Predicated Group 994 */ 995 996 #define DO_ZPZW(NAME, name) \ 997 static gen_helper_gvec_4 * const name##_zpzw_fns[4] = { \ 998 gen_helper_sve_##name##_zpzw_b, gen_helper_sve_##name##_zpzw_h, \ 999 gen_helper_sve_##name##_zpzw_s, NULL \ 1000 }; \ 1001 TRANS_FEAT(NAME##_zpzw, aa64_sve, gen_gvec_ool_arg_zpzz, \ 1002 a->esz < 0 ? NULL : name##_zpzw_fns[a->esz], a, 0) 1003 1004 DO_ZPZW(ASR, asr) 1005 DO_ZPZW(LSR, lsr) 1006 DO_ZPZW(LSL, lsl) 1007 1008 #undef DO_ZPZW 1009 1010 /* 1011 *** SVE Bitwise Shift - Unpredicated Group 1012 */ 1013 1014 static bool do_shift_imm(DisasContext *s, arg_rri_esz *a, bool asr, 1015 void (*gvec_fn)(unsigned, uint32_t, uint32_t, 1016 int64_t, uint32_t, uint32_t)) 1017 { 1018 if (a->esz < 0) { 1019 /* Invalid tsz encoding -- see tszimm_esz. */ 1020 return false; 1021 } 1022 if (sve_access_check(s)) { 1023 unsigned vsz = vec_full_reg_size(s); 1024 /* Shift by element size is architecturally valid. For 1025 arithmetic right-shift, it's the same as by one less. 1026 Otherwise it is a zeroing operation. */ 1027 if (a->imm >= 8 << a->esz) { 1028 if (asr) { 1029 a->imm = (8 << a->esz) - 1; 1030 } else { 1031 do_dupi_z(s, a->rd, 0); 1032 return true; 1033 } 1034 } 1035 gvec_fn(a->esz, vec_full_reg_offset(s, a->rd), 1036 vec_full_reg_offset(s, a->rn), a->imm, vsz, vsz); 1037 } 1038 return true; 1039 } 1040 1041 TRANS_FEAT(ASR_zzi, aa64_sve, do_shift_imm, a, true, tcg_gen_gvec_sari) 1042 TRANS_FEAT(LSR_zzi, aa64_sve, do_shift_imm, a, false, tcg_gen_gvec_shri) 1043 TRANS_FEAT(LSL_zzi, aa64_sve, do_shift_imm, a, false, tcg_gen_gvec_shli) 1044 1045 #define DO_ZZW(NAME, name) \ 1046 static gen_helper_gvec_3 * const name##_zzw_fns[4] = { \ 1047 gen_helper_sve_##name##_zzw_b, gen_helper_sve_##name##_zzw_h, \ 1048 gen_helper_sve_##name##_zzw_s, NULL \ 1049 }; \ 1050 TRANS_FEAT(NAME, aa64_sve, gen_gvec_ool_arg_zzz, \ 1051 name##_zzw_fns[a->esz], a, 0) 1052 1053 DO_ZZW(ASR_zzw, asr) 1054 DO_ZZW(LSR_zzw, lsr) 1055 DO_ZZW(LSL_zzw, lsl) 1056 1057 #undef DO_ZZW 1058 1059 /* 1060 *** SVE Integer Multiply-Add Group 1061 */ 1062 1063 static bool do_zpzzz_ool(DisasContext *s, arg_rprrr_esz *a, 1064 gen_helper_gvec_5 *fn) 1065 { 1066 if (sve_access_check(s)) { 1067 unsigned vsz = vec_full_reg_size(s); 1068 tcg_gen_gvec_5_ool(vec_full_reg_offset(s, a->rd), 1069 vec_full_reg_offset(s, a->ra), 1070 vec_full_reg_offset(s, a->rn), 1071 vec_full_reg_offset(s, a->rm), 1072 pred_full_reg_offset(s, a->pg), 1073 vsz, vsz, 0, fn); 1074 } 1075 return true; 1076 } 1077 1078 static gen_helper_gvec_5 * const mla_fns[4] = { 1079 gen_helper_sve_mla_b, gen_helper_sve_mla_h, 1080 gen_helper_sve_mla_s, gen_helper_sve_mla_d, 1081 }; 1082 TRANS_FEAT(MLA, aa64_sve, do_zpzzz_ool, a, mla_fns[a->esz]) 1083 1084 static gen_helper_gvec_5 * const mls_fns[4] = { 1085 gen_helper_sve_mls_b, gen_helper_sve_mls_h, 1086 gen_helper_sve_mls_s, gen_helper_sve_mls_d, 1087 }; 1088 TRANS_FEAT(MLS, aa64_sve, do_zpzzz_ool, a, mls_fns[a->esz]) 1089 1090 /* 1091 *** SVE Index Generation Group 1092 */ 1093 1094 static bool do_index(DisasContext *s, int esz, int rd, 1095 TCGv_i64 start, TCGv_i64 incr) 1096 { 1097 unsigned vsz; 1098 TCGv_i32 desc; 1099 TCGv_ptr t_zd; 1100 1101 if (!sve_access_check(s)) { 1102 return true; 1103 } 1104 1105 vsz = vec_full_reg_size(s); 1106 desc = tcg_constant_i32(simd_desc(vsz, vsz, 0)); 1107 t_zd = tcg_temp_new_ptr(); 1108 1109 tcg_gen_addi_ptr(t_zd, tcg_env, vec_full_reg_offset(s, rd)); 1110 if (esz == 3) { 1111 gen_helper_sve_index_d(t_zd, start, incr, desc); 1112 } else { 1113 typedef void index_fn(TCGv_ptr, TCGv_i32, TCGv_i32, TCGv_i32); 1114 static index_fn * const fns[3] = { 1115 gen_helper_sve_index_b, 1116 gen_helper_sve_index_h, 1117 gen_helper_sve_index_s, 1118 }; 1119 TCGv_i32 s32 = tcg_temp_new_i32(); 1120 TCGv_i32 i32 = tcg_temp_new_i32(); 1121 1122 tcg_gen_extrl_i64_i32(s32, start); 1123 tcg_gen_extrl_i64_i32(i32, incr); 1124 fns[esz](t_zd, s32, i32, desc); 1125 } 1126 return true; 1127 } 1128 1129 TRANS_FEAT(INDEX_ii, aa64_sve, do_index, a->esz, a->rd, 1130 tcg_constant_i64(a->imm1), tcg_constant_i64(a->imm2)) 1131 TRANS_FEAT(INDEX_ir, aa64_sve, do_index, a->esz, a->rd, 1132 tcg_constant_i64(a->imm), cpu_reg(s, a->rm)) 1133 TRANS_FEAT(INDEX_ri, aa64_sve, do_index, a->esz, a->rd, 1134 cpu_reg(s, a->rn), tcg_constant_i64(a->imm)) 1135 TRANS_FEAT(INDEX_rr, aa64_sve, do_index, a->esz, a->rd, 1136 cpu_reg(s, a->rn), cpu_reg(s, a->rm)) 1137 1138 /* 1139 *** SVE Stack Allocation Group 1140 */ 1141 1142 static bool trans_ADDVL(DisasContext *s, arg_ADDVL *a) 1143 { 1144 if (!dc_isar_feature(aa64_sve, s)) { 1145 return false; 1146 } 1147 if (sve_access_check(s)) { 1148 TCGv_i64 rd = cpu_reg_sp(s, a->rd); 1149 TCGv_i64 rn = cpu_reg_sp(s, a->rn); 1150 tcg_gen_addi_i64(rd, rn, a->imm * vec_full_reg_size(s)); 1151 } 1152 return true; 1153 } 1154 1155 static bool trans_ADDSVL(DisasContext *s, arg_ADDSVL *a) 1156 { 1157 if (!dc_isar_feature(aa64_sme, s)) { 1158 return false; 1159 } 1160 if (sme_enabled_check(s)) { 1161 TCGv_i64 rd = cpu_reg_sp(s, a->rd); 1162 TCGv_i64 rn = cpu_reg_sp(s, a->rn); 1163 tcg_gen_addi_i64(rd, rn, a->imm * streaming_vec_reg_size(s)); 1164 } 1165 return true; 1166 } 1167 1168 static bool trans_ADDPL(DisasContext *s, arg_ADDPL *a) 1169 { 1170 if (!dc_isar_feature(aa64_sve, s)) { 1171 return false; 1172 } 1173 if (sve_access_check(s)) { 1174 TCGv_i64 rd = cpu_reg_sp(s, a->rd); 1175 TCGv_i64 rn = cpu_reg_sp(s, a->rn); 1176 tcg_gen_addi_i64(rd, rn, a->imm * pred_full_reg_size(s)); 1177 } 1178 return true; 1179 } 1180 1181 static bool trans_ADDSPL(DisasContext *s, arg_ADDSPL *a) 1182 { 1183 if (!dc_isar_feature(aa64_sme, s)) { 1184 return false; 1185 } 1186 if (sme_enabled_check(s)) { 1187 TCGv_i64 rd = cpu_reg_sp(s, a->rd); 1188 TCGv_i64 rn = cpu_reg_sp(s, a->rn); 1189 tcg_gen_addi_i64(rd, rn, a->imm * streaming_pred_reg_size(s)); 1190 } 1191 return true; 1192 } 1193 1194 static bool trans_RDVL(DisasContext *s, arg_RDVL *a) 1195 { 1196 if (!dc_isar_feature(aa64_sve, s)) { 1197 return false; 1198 } 1199 if (sve_access_check(s)) { 1200 TCGv_i64 reg = cpu_reg(s, a->rd); 1201 tcg_gen_movi_i64(reg, a->imm * vec_full_reg_size(s)); 1202 } 1203 return true; 1204 } 1205 1206 static bool trans_RDSVL(DisasContext *s, arg_RDSVL *a) 1207 { 1208 if (!dc_isar_feature(aa64_sme, s)) { 1209 return false; 1210 } 1211 if (sme_enabled_check(s)) { 1212 TCGv_i64 reg = cpu_reg(s, a->rd); 1213 tcg_gen_movi_i64(reg, a->imm * streaming_vec_reg_size(s)); 1214 } 1215 return true; 1216 } 1217 1218 /* 1219 *** SVE Compute Vector Address Group 1220 */ 1221 1222 static bool do_adr(DisasContext *s, arg_rrri *a, gen_helper_gvec_3 *fn) 1223 { 1224 return gen_gvec_ool_zzz(s, fn, a->rd, a->rn, a->rm, a->imm); 1225 } 1226 1227 TRANS_FEAT_NONSTREAMING(ADR_p32, aa64_sve, do_adr, a, gen_helper_sve_adr_p32) 1228 TRANS_FEAT_NONSTREAMING(ADR_p64, aa64_sve, do_adr, a, gen_helper_sve_adr_p64) 1229 TRANS_FEAT_NONSTREAMING(ADR_s32, aa64_sve, do_adr, a, gen_helper_sve_adr_s32) 1230 TRANS_FEAT_NONSTREAMING(ADR_u32, aa64_sve, do_adr, a, gen_helper_sve_adr_u32) 1231 1232 /* 1233 *** SVE Integer Misc - Unpredicated Group 1234 */ 1235 1236 static gen_helper_gvec_2 * const fexpa_fns[4] = { 1237 NULL, gen_helper_sve_fexpa_h, 1238 gen_helper_sve_fexpa_s, gen_helper_sve_fexpa_d, 1239 }; 1240 TRANS_FEAT_NONSTREAMING(FEXPA, aa64_sve, gen_gvec_ool_zz, 1241 fexpa_fns[a->esz], a->rd, a->rn, s->fpcr_ah) 1242 1243 static gen_helper_gvec_3 * const ftssel_fns[4] = { 1244 NULL, gen_helper_sve_ftssel_h, 1245 gen_helper_sve_ftssel_s, gen_helper_sve_ftssel_d, 1246 }; 1247 TRANS_FEAT_NONSTREAMING(FTSSEL, aa64_sve, gen_gvec_ool_arg_zzz, 1248 ftssel_fns[a->esz], a, s->fpcr_ah) 1249 1250 /* 1251 *** SVE Predicate Logical Operations Group 1252 */ 1253 1254 static bool do_pppp_flags(DisasContext *s, arg_rprr_s *a, 1255 const GVecGen4 *gvec_op) 1256 { 1257 if (!sve_access_check(s)) { 1258 return true; 1259 } 1260 1261 unsigned psz = pred_gvec_reg_size(s); 1262 int dofs = pred_full_reg_offset(s, a->rd); 1263 int nofs = pred_full_reg_offset(s, a->rn); 1264 int mofs = pred_full_reg_offset(s, a->rm); 1265 int gofs = pred_full_reg_offset(s, a->pg); 1266 1267 if (!a->s) { 1268 tcg_gen_gvec_4(dofs, nofs, mofs, gofs, psz, psz, gvec_op); 1269 return true; 1270 } 1271 1272 if (psz == 8) { 1273 /* Do the operation and the flags generation in temps. */ 1274 TCGv_i64 pd = tcg_temp_new_i64(); 1275 TCGv_i64 pn = tcg_temp_new_i64(); 1276 TCGv_i64 pm = tcg_temp_new_i64(); 1277 TCGv_i64 pg = tcg_temp_new_i64(); 1278 1279 tcg_gen_ld_i64(pn, tcg_env, nofs); 1280 tcg_gen_ld_i64(pm, tcg_env, mofs); 1281 tcg_gen_ld_i64(pg, tcg_env, gofs); 1282 1283 gvec_op->fni8(pd, pn, pm, pg); 1284 tcg_gen_st_i64(pd, tcg_env, dofs); 1285 1286 do_predtest1(pd, pg); 1287 } else { 1288 /* The operation and flags generation is large. The computation 1289 * of the flags depends on the original contents of the guarding 1290 * predicate. If the destination overwrites the guarding predicate, 1291 * then the easiest way to get this right is to save a copy. 1292 */ 1293 int tofs = gofs; 1294 if (a->rd == a->pg) { 1295 tofs = offsetof(CPUARMState, vfp.preg_tmp); 1296 tcg_gen_gvec_mov(0, tofs, gofs, psz, psz); 1297 } 1298 1299 tcg_gen_gvec_4(dofs, nofs, mofs, gofs, psz, psz, gvec_op); 1300 do_predtest(s, dofs, tofs, psz / 8); 1301 } 1302 return true; 1303 } 1304 1305 static void gen_and_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg) 1306 { 1307 tcg_gen_and_i64(pd, pn, pm); 1308 tcg_gen_and_i64(pd, pd, pg); 1309 } 1310 1311 static void gen_and_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn, 1312 TCGv_vec pm, TCGv_vec pg) 1313 { 1314 tcg_gen_and_vec(vece, pd, pn, pm); 1315 tcg_gen_and_vec(vece, pd, pd, pg); 1316 } 1317 1318 static bool trans_AND_pppp(DisasContext *s, arg_rprr_s *a) 1319 { 1320 static const GVecGen4 op = { 1321 .fni8 = gen_and_pg_i64, 1322 .fniv = gen_and_pg_vec, 1323 .fno = gen_helper_sve_and_pppp, 1324 .prefer_i64 = TCG_TARGET_REG_BITS == 64, 1325 }; 1326 1327 if (!dc_isar_feature(aa64_sve, s)) { 1328 return false; 1329 } 1330 if (!a->s) { 1331 if (a->rn == a->rm) { 1332 if (a->pg == a->rn) { 1333 return do_mov_p(s, a->rd, a->rn); 1334 } 1335 return gen_gvec_fn_ppp(s, tcg_gen_gvec_and, a->rd, a->rn, a->pg); 1336 } else if (a->pg == a->rn || a->pg == a->rm) { 1337 return gen_gvec_fn_ppp(s, tcg_gen_gvec_and, a->rd, a->rn, a->rm); 1338 } 1339 } 1340 return do_pppp_flags(s, a, &op); 1341 } 1342 1343 static void gen_bic_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg) 1344 { 1345 tcg_gen_andc_i64(pd, pn, pm); 1346 tcg_gen_and_i64(pd, pd, pg); 1347 } 1348 1349 static void gen_bic_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn, 1350 TCGv_vec pm, TCGv_vec pg) 1351 { 1352 tcg_gen_andc_vec(vece, pd, pn, pm); 1353 tcg_gen_and_vec(vece, pd, pd, pg); 1354 } 1355 1356 static bool trans_BIC_pppp(DisasContext *s, arg_rprr_s *a) 1357 { 1358 static const GVecGen4 op = { 1359 .fni8 = gen_bic_pg_i64, 1360 .fniv = gen_bic_pg_vec, 1361 .fno = gen_helper_sve_bic_pppp, 1362 .prefer_i64 = TCG_TARGET_REG_BITS == 64, 1363 }; 1364 1365 if (!dc_isar_feature(aa64_sve, s)) { 1366 return false; 1367 } 1368 if (!a->s && a->pg == a->rn) { 1369 return gen_gvec_fn_ppp(s, tcg_gen_gvec_andc, a->rd, a->rn, a->rm); 1370 } 1371 return do_pppp_flags(s, a, &op); 1372 } 1373 1374 static void gen_eor_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg) 1375 { 1376 tcg_gen_xor_i64(pd, pn, pm); 1377 tcg_gen_and_i64(pd, pd, pg); 1378 } 1379 1380 static void gen_eor_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn, 1381 TCGv_vec pm, TCGv_vec pg) 1382 { 1383 tcg_gen_xor_vec(vece, pd, pn, pm); 1384 tcg_gen_and_vec(vece, pd, pd, pg); 1385 } 1386 1387 static bool trans_EOR_pppp(DisasContext *s, arg_rprr_s *a) 1388 { 1389 static const GVecGen4 op = { 1390 .fni8 = gen_eor_pg_i64, 1391 .fniv = gen_eor_pg_vec, 1392 .fno = gen_helper_sve_eor_pppp, 1393 .prefer_i64 = TCG_TARGET_REG_BITS == 64, 1394 }; 1395 1396 if (!dc_isar_feature(aa64_sve, s)) { 1397 return false; 1398 } 1399 /* Alias NOT (predicate) is EOR Pd.B, Pg/Z, Pn.B, Pg.B */ 1400 if (!a->s && a->pg == a->rm) { 1401 return gen_gvec_fn_ppp(s, tcg_gen_gvec_andc, a->rd, a->pg, a->rn); 1402 } 1403 return do_pppp_flags(s, a, &op); 1404 } 1405 1406 static bool trans_SEL_pppp(DisasContext *s, arg_rprr_s *a) 1407 { 1408 if (a->s || !dc_isar_feature(aa64_sve, s)) { 1409 return false; 1410 } 1411 if (sve_access_check(s)) { 1412 unsigned psz = pred_gvec_reg_size(s); 1413 tcg_gen_gvec_bitsel(MO_8, pred_full_reg_offset(s, a->rd), 1414 pred_full_reg_offset(s, a->pg), 1415 pred_full_reg_offset(s, a->rn), 1416 pred_full_reg_offset(s, a->rm), psz, psz); 1417 } 1418 return true; 1419 } 1420 1421 static void gen_orr_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg) 1422 { 1423 tcg_gen_or_i64(pd, pn, pm); 1424 tcg_gen_and_i64(pd, pd, pg); 1425 } 1426 1427 static void gen_orr_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn, 1428 TCGv_vec pm, TCGv_vec pg) 1429 { 1430 tcg_gen_or_vec(vece, pd, pn, pm); 1431 tcg_gen_and_vec(vece, pd, pd, pg); 1432 } 1433 1434 static bool trans_ORR_pppp(DisasContext *s, arg_rprr_s *a) 1435 { 1436 static const GVecGen4 op = { 1437 .fni8 = gen_orr_pg_i64, 1438 .fniv = gen_orr_pg_vec, 1439 .fno = gen_helper_sve_orr_pppp, 1440 .prefer_i64 = TCG_TARGET_REG_BITS == 64, 1441 }; 1442 1443 if (!dc_isar_feature(aa64_sve, s)) { 1444 return false; 1445 } 1446 if (!a->s && a->pg == a->rn && a->rn == a->rm) { 1447 return do_mov_p(s, a->rd, a->rn); 1448 } 1449 return do_pppp_flags(s, a, &op); 1450 } 1451 1452 static void gen_orn_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg) 1453 { 1454 tcg_gen_orc_i64(pd, pn, pm); 1455 tcg_gen_and_i64(pd, pd, pg); 1456 } 1457 1458 static void gen_orn_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn, 1459 TCGv_vec pm, TCGv_vec pg) 1460 { 1461 tcg_gen_orc_vec(vece, pd, pn, pm); 1462 tcg_gen_and_vec(vece, pd, pd, pg); 1463 } 1464 1465 static bool trans_ORN_pppp(DisasContext *s, arg_rprr_s *a) 1466 { 1467 static const GVecGen4 op = { 1468 .fni8 = gen_orn_pg_i64, 1469 .fniv = gen_orn_pg_vec, 1470 .fno = gen_helper_sve_orn_pppp, 1471 .prefer_i64 = TCG_TARGET_REG_BITS == 64, 1472 }; 1473 1474 if (!dc_isar_feature(aa64_sve, s)) { 1475 return false; 1476 } 1477 return do_pppp_flags(s, a, &op); 1478 } 1479 1480 static void gen_nor_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg) 1481 { 1482 tcg_gen_or_i64(pd, pn, pm); 1483 tcg_gen_andc_i64(pd, pg, pd); 1484 } 1485 1486 static void gen_nor_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn, 1487 TCGv_vec pm, TCGv_vec pg) 1488 { 1489 tcg_gen_or_vec(vece, pd, pn, pm); 1490 tcg_gen_andc_vec(vece, pd, pg, pd); 1491 } 1492 1493 static bool trans_NOR_pppp(DisasContext *s, arg_rprr_s *a) 1494 { 1495 static const GVecGen4 op = { 1496 .fni8 = gen_nor_pg_i64, 1497 .fniv = gen_nor_pg_vec, 1498 .fno = gen_helper_sve_nor_pppp, 1499 .prefer_i64 = TCG_TARGET_REG_BITS == 64, 1500 }; 1501 1502 if (!dc_isar_feature(aa64_sve, s)) { 1503 return false; 1504 } 1505 return do_pppp_flags(s, a, &op); 1506 } 1507 1508 static void gen_nand_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg) 1509 { 1510 tcg_gen_and_i64(pd, pn, pm); 1511 tcg_gen_andc_i64(pd, pg, pd); 1512 } 1513 1514 static void gen_nand_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn, 1515 TCGv_vec pm, TCGv_vec pg) 1516 { 1517 tcg_gen_and_vec(vece, pd, pn, pm); 1518 tcg_gen_andc_vec(vece, pd, pg, pd); 1519 } 1520 1521 static bool trans_NAND_pppp(DisasContext *s, arg_rprr_s *a) 1522 { 1523 static const GVecGen4 op = { 1524 .fni8 = gen_nand_pg_i64, 1525 .fniv = gen_nand_pg_vec, 1526 .fno = gen_helper_sve_nand_pppp, 1527 .prefer_i64 = TCG_TARGET_REG_BITS == 64, 1528 }; 1529 1530 if (!dc_isar_feature(aa64_sve, s)) { 1531 return false; 1532 } 1533 return do_pppp_flags(s, a, &op); 1534 } 1535 1536 /* 1537 *** SVE Predicate Misc Group 1538 */ 1539 1540 static bool trans_PTEST(DisasContext *s, arg_PTEST *a) 1541 { 1542 if (!dc_isar_feature(aa64_sve, s)) { 1543 return false; 1544 } 1545 if (sve_access_check(s)) { 1546 int nofs = pred_full_reg_offset(s, a->rn); 1547 int gofs = pred_full_reg_offset(s, a->pg); 1548 int words = DIV_ROUND_UP(pred_full_reg_size(s), 8); 1549 1550 if (words == 1) { 1551 TCGv_i64 pn = tcg_temp_new_i64(); 1552 TCGv_i64 pg = tcg_temp_new_i64(); 1553 1554 tcg_gen_ld_i64(pn, tcg_env, nofs); 1555 tcg_gen_ld_i64(pg, tcg_env, gofs); 1556 do_predtest1(pn, pg); 1557 } else { 1558 do_predtest(s, nofs, gofs, words); 1559 } 1560 } 1561 return true; 1562 } 1563 1564 /* See the ARM pseudocode DecodePredCount. */ 1565 static unsigned decode_pred_count(unsigned fullsz, int pattern, int esz) 1566 { 1567 unsigned elements = fullsz >> esz; 1568 unsigned bound; 1569 1570 switch (pattern) { 1571 case 0x0: /* POW2 */ 1572 return pow2floor(elements); 1573 case 0x1: /* VL1 */ 1574 case 0x2: /* VL2 */ 1575 case 0x3: /* VL3 */ 1576 case 0x4: /* VL4 */ 1577 case 0x5: /* VL5 */ 1578 case 0x6: /* VL6 */ 1579 case 0x7: /* VL7 */ 1580 case 0x8: /* VL8 */ 1581 bound = pattern; 1582 break; 1583 case 0x9: /* VL16 */ 1584 case 0xa: /* VL32 */ 1585 case 0xb: /* VL64 */ 1586 case 0xc: /* VL128 */ 1587 case 0xd: /* VL256 */ 1588 bound = 16 << (pattern - 9); 1589 break; 1590 case 0x1d: /* MUL4 */ 1591 return elements - elements % 4; 1592 case 0x1e: /* MUL3 */ 1593 return elements - elements % 3; 1594 case 0x1f: /* ALL */ 1595 return elements; 1596 default: /* #uimm5 */ 1597 return 0; 1598 } 1599 return elements >= bound ? bound : 0; 1600 } 1601 1602 /* This handles all of the predicate initialization instructions, 1603 * PTRUE, PFALSE, SETFFR. For PFALSE, we will have set PAT == 32 1604 * so that decode_pred_count returns 0. For SETFFR, we will have 1605 * set RD == 16 == FFR. 1606 */ 1607 static bool do_predset(DisasContext *s, int esz, int rd, int pat, bool setflag) 1608 { 1609 if (!sve_access_check(s)) { 1610 return true; 1611 } 1612 1613 unsigned fullsz = vec_full_reg_size(s); 1614 unsigned ofs = pred_full_reg_offset(s, rd); 1615 unsigned numelem, setsz, i; 1616 uint64_t word, lastword; 1617 TCGv_i64 t; 1618 1619 numelem = decode_pred_count(fullsz, pat, esz); 1620 1621 /* Determine what we must store into each bit, and how many. */ 1622 if (numelem == 0) { 1623 lastword = word = 0; 1624 setsz = fullsz; 1625 } else { 1626 setsz = numelem << esz; 1627 lastword = word = pred_esz_masks[esz]; 1628 if (setsz % 64) { 1629 lastword &= MAKE_64BIT_MASK(0, setsz % 64); 1630 } 1631 } 1632 1633 t = tcg_temp_new_i64(); 1634 if (fullsz <= 64) { 1635 tcg_gen_movi_i64(t, lastword); 1636 tcg_gen_st_i64(t, tcg_env, ofs); 1637 goto done; 1638 } 1639 1640 if (word == lastword) { 1641 unsigned maxsz = size_for_gvec(fullsz / 8); 1642 unsigned oprsz = size_for_gvec(setsz / 8); 1643 1644 if (oprsz * 8 == setsz) { 1645 tcg_gen_gvec_dup_imm(MO_64, ofs, oprsz, maxsz, word); 1646 goto done; 1647 } 1648 } 1649 1650 setsz /= 8; 1651 fullsz /= 8; 1652 1653 tcg_gen_movi_i64(t, word); 1654 for (i = 0; i < QEMU_ALIGN_DOWN(setsz, 8); i += 8) { 1655 tcg_gen_st_i64(t, tcg_env, ofs + i); 1656 } 1657 if (lastword != word) { 1658 tcg_gen_movi_i64(t, lastword); 1659 tcg_gen_st_i64(t, tcg_env, ofs + i); 1660 i += 8; 1661 } 1662 if (i < fullsz) { 1663 tcg_gen_movi_i64(t, 0); 1664 for (; i < fullsz; i += 8) { 1665 tcg_gen_st_i64(t, tcg_env, ofs + i); 1666 } 1667 } 1668 1669 done: 1670 /* PTRUES */ 1671 if (setflag) { 1672 tcg_gen_movi_i32(cpu_NF, -(word != 0)); 1673 tcg_gen_movi_i32(cpu_CF, word == 0); 1674 tcg_gen_movi_i32(cpu_VF, 0); 1675 tcg_gen_mov_i32(cpu_ZF, cpu_NF); 1676 } 1677 return true; 1678 } 1679 1680 TRANS_FEAT(PTRUE, aa64_sve, do_predset, a->esz, a->rd, a->pat, a->s) 1681 1682 /* Note pat == 31 is #all, to set all elements. */ 1683 TRANS_FEAT_NONSTREAMING(SETFFR, aa64_sve, 1684 do_predset, 0, FFR_PRED_NUM, 31, false) 1685 1686 /* Note pat == 32 is #unimp, to set no elements. */ 1687 TRANS_FEAT(PFALSE, aa64_sve, do_predset, 0, a->rd, 32, false) 1688 1689 static bool trans_RDFFR_p(DisasContext *s, arg_RDFFR_p *a) 1690 { 1691 /* The path through do_pppp_flags is complicated enough to want to avoid 1692 * duplication. Frob the arguments into the form of a predicated AND. 1693 */ 1694 arg_rprr_s alt_a = { 1695 .rd = a->rd, .pg = a->pg, .s = a->s, 1696 .rn = FFR_PRED_NUM, .rm = FFR_PRED_NUM, 1697 }; 1698 1699 s->is_nonstreaming = true; 1700 return trans_AND_pppp(s, &alt_a); 1701 } 1702 1703 TRANS_FEAT_NONSTREAMING(RDFFR, aa64_sve, do_mov_p, a->rd, FFR_PRED_NUM) 1704 TRANS_FEAT_NONSTREAMING(WRFFR, aa64_sve, do_mov_p, FFR_PRED_NUM, a->rn) 1705 1706 static bool do_pfirst_pnext(DisasContext *s, arg_rr_esz *a, 1707 void (*gen_fn)(TCGv_i32, TCGv_ptr, 1708 TCGv_ptr, TCGv_i32)) 1709 { 1710 if (!sve_access_check(s)) { 1711 return true; 1712 } 1713 1714 TCGv_ptr t_pd = tcg_temp_new_ptr(); 1715 TCGv_ptr t_pg = tcg_temp_new_ptr(); 1716 TCGv_i32 t; 1717 unsigned desc = 0; 1718 1719 desc = FIELD_DP32(desc, PREDDESC, OPRSZ, pred_full_reg_size(s)); 1720 desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz); 1721 1722 tcg_gen_addi_ptr(t_pd, tcg_env, pred_full_reg_offset(s, a->rd)); 1723 tcg_gen_addi_ptr(t_pg, tcg_env, pred_full_reg_offset(s, a->rn)); 1724 t = tcg_temp_new_i32(); 1725 1726 gen_fn(t, t_pd, t_pg, tcg_constant_i32(desc)); 1727 1728 do_pred_flags(t); 1729 return true; 1730 } 1731 1732 TRANS_FEAT(PFIRST, aa64_sve, do_pfirst_pnext, a, gen_helper_sve_pfirst) 1733 TRANS_FEAT(PNEXT, aa64_sve, do_pfirst_pnext, a, gen_helper_sve_pnext) 1734 1735 /* 1736 *** SVE Element Count Group 1737 */ 1738 1739 /* Perform an inline saturating addition of a 32-bit value within 1740 * a 64-bit register. The second operand is known to be positive, 1741 * which halves the comparisons we must perform to bound the result. 1742 */ 1743 static void do_sat_addsub_32(TCGv_i64 reg, TCGv_i64 val, bool u, bool d) 1744 { 1745 int64_t ibound; 1746 1747 /* Use normal 64-bit arithmetic to detect 32-bit overflow. */ 1748 if (u) { 1749 tcg_gen_ext32u_i64(reg, reg); 1750 } else { 1751 tcg_gen_ext32s_i64(reg, reg); 1752 } 1753 if (d) { 1754 tcg_gen_sub_i64(reg, reg, val); 1755 ibound = (u ? 0 : INT32_MIN); 1756 tcg_gen_smax_i64(reg, reg, tcg_constant_i64(ibound)); 1757 } else { 1758 tcg_gen_add_i64(reg, reg, val); 1759 ibound = (u ? UINT32_MAX : INT32_MAX); 1760 tcg_gen_smin_i64(reg, reg, tcg_constant_i64(ibound)); 1761 } 1762 } 1763 1764 /* Similarly with 64-bit values. */ 1765 static void do_sat_addsub_64(TCGv_i64 reg, TCGv_i64 val, bool u, bool d) 1766 { 1767 TCGv_i64 t0 = tcg_temp_new_i64(); 1768 TCGv_i64 t2; 1769 1770 if (u) { 1771 if (d) { 1772 tcg_gen_sub_i64(t0, reg, val); 1773 t2 = tcg_constant_i64(0); 1774 tcg_gen_movcond_i64(TCG_COND_LTU, reg, reg, val, t2, t0); 1775 } else { 1776 tcg_gen_add_i64(t0, reg, val); 1777 t2 = tcg_constant_i64(-1); 1778 tcg_gen_movcond_i64(TCG_COND_LTU, reg, t0, reg, t2, t0); 1779 } 1780 } else { 1781 TCGv_i64 t1 = tcg_temp_new_i64(); 1782 if (d) { 1783 /* Detect signed overflow for subtraction. */ 1784 tcg_gen_xor_i64(t0, reg, val); 1785 tcg_gen_sub_i64(t1, reg, val); 1786 tcg_gen_xor_i64(reg, reg, t1); 1787 tcg_gen_and_i64(t0, t0, reg); 1788 1789 /* Bound the result. */ 1790 tcg_gen_movi_i64(reg, INT64_MIN); 1791 t2 = tcg_constant_i64(0); 1792 tcg_gen_movcond_i64(TCG_COND_LT, reg, t0, t2, reg, t1); 1793 } else { 1794 /* Detect signed overflow for addition. */ 1795 tcg_gen_xor_i64(t0, reg, val); 1796 tcg_gen_add_i64(reg, reg, val); 1797 tcg_gen_xor_i64(t1, reg, val); 1798 tcg_gen_andc_i64(t0, t1, t0); 1799 1800 /* Bound the result. */ 1801 tcg_gen_movi_i64(t1, INT64_MAX); 1802 t2 = tcg_constant_i64(0); 1803 tcg_gen_movcond_i64(TCG_COND_LT, reg, t0, t2, t1, reg); 1804 } 1805 } 1806 } 1807 1808 /* Similarly with a vector and a scalar operand. */ 1809 static void do_sat_addsub_vec(DisasContext *s, int esz, int rd, int rn, 1810 TCGv_i64 val, bool u, bool d) 1811 { 1812 unsigned vsz = vec_full_reg_size(s); 1813 TCGv_ptr dptr, nptr; 1814 TCGv_i32 t32, desc; 1815 TCGv_i64 t64; 1816 1817 dptr = tcg_temp_new_ptr(); 1818 nptr = tcg_temp_new_ptr(); 1819 tcg_gen_addi_ptr(dptr, tcg_env, vec_full_reg_offset(s, rd)); 1820 tcg_gen_addi_ptr(nptr, tcg_env, vec_full_reg_offset(s, rn)); 1821 desc = tcg_constant_i32(simd_desc(vsz, vsz, 0)); 1822 1823 switch (esz) { 1824 case MO_8: 1825 t32 = tcg_temp_new_i32(); 1826 tcg_gen_extrl_i64_i32(t32, val); 1827 if (d) { 1828 tcg_gen_neg_i32(t32, t32); 1829 } 1830 if (u) { 1831 gen_helper_sve_uqaddi_b(dptr, nptr, t32, desc); 1832 } else { 1833 gen_helper_sve_sqaddi_b(dptr, nptr, t32, desc); 1834 } 1835 break; 1836 1837 case MO_16: 1838 t32 = tcg_temp_new_i32(); 1839 tcg_gen_extrl_i64_i32(t32, val); 1840 if (d) { 1841 tcg_gen_neg_i32(t32, t32); 1842 } 1843 if (u) { 1844 gen_helper_sve_uqaddi_h(dptr, nptr, t32, desc); 1845 } else { 1846 gen_helper_sve_sqaddi_h(dptr, nptr, t32, desc); 1847 } 1848 break; 1849 1850 case MO_32: 1851 t64 = tcg_temp_new_i64(); 1852 if (d) { 1853 tcg_gen_neg_i64(t64, val); 1854 } else { 1855 tcg_gen_mov_i64(t64, val); 1856 } 1857 if (u) { 1858 gen_helper_sve_uqaddi_s(dptr, nptr, t64, desc); 1859 } else { 1860 gen_helper_sve_sqaddi_s(dptr, nptr, t64, desc); 1861 } 1862 break; 1863 1864 case MO_64: 1865 if (u) { 1866 if (d) { 1867 gen_helper_sve_uqsubi_d(dptr, nptr, val, desc); 1868 } else { 1869 gen_helper_sve_uqaddi_d(dptr, nptr, val, desc); 1870 } 1871 } else if (d) { 1872 t64 = tcg_temp_new_i64(); 1873 tcg_gen_neg_i64(t64, val); 1874 gen_helper_sve_sqaddi_d(dptr, nptr, t64, desc); 1875 } else { 1876 gen_helper_sve_sqaddi_d(dptr, nptr, val, desc); 1877 } 1878 break; 1879 1880 default: 1881 g_assert_not_reached(); 1882 } 1883 } 1884 1885 static bool trans_CNT_r(DisasContext *s, arg_CNT_r *a) 1886 { 1887 if (!dc_isar_feature(aa64_sve, s)) { 1888 return false; 1889 } 1890 if (sve_access_check(s)) { 1891 unsigned fullsz = vec_full_reg_size(s); 1892 unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz); 1893 tcg_gen_movi_i64(cpu_reg(s, a->rd), numelem * a->imm); 1894 } 1895 return true; 1896 } 1897 1898 static bool trans_INCDEC_r(DisasContext *s, arg_incdec_cnt *a) 1899 { 1900 if (!dc_isar_feature(aa64_sve, s)) { 1901 return false; 1902 } 1903 if (sve_access_check(s)) { 1904 unsigned fullsz = vec_full_reg_size(s); 1905 unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz); 1906 int inc = numelem * a->imm * (a->d ? -1 : 1); 1907 TCGv_i64 reg = cpu_reg(s, a->rd); 1908 1909 tcg_gen_addi_i64(reg, reg, inc); 1910 } 1911 return true; 1912 } 1913 1914 static bool trans_SINCDEC_r_32(DisasContext *s, arg_incdec_cnt *a) 1915 { 1916 if (!dc_isar_feature(aa64_sve, s)) { 1917 return false; 1918 } 1919 if (!sve_access_check(s)) { 1920 return true; 1921 } 1922 1923 unsigned fullsz = vec_full_reg_size(s); 1924 unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz); 1925 int inc = numelem * a->imm; 1926 TCGv_i64 reg = cpu_reg(s, a->rd); 1927 1928 /* Use normal 64-bit arithmetic to detect 32-bit overflow. */ 1929 if (inc == 0) { 1930 if (a->u) { 1931 tcg_gen_ext32u_i64(reg, reg); 1932 } else { 1933 tcg_gen_ext32s_i64(reg, reg); 1934 } 1935 } else { 1936 do_sat_addsub_32(reg, tcg_constant_i64(inc), a->u, a->d); 1937 } 1938 return true; 1939 } 1940 1941 static bool trans_SINCDEC_r_64(DisasContext *s, arg_incdec_cnt *a) 1942 { 1943 if (!dc_isar_feature(aa64_sve, s)) { 1944 return false; 1945 } 1946 if (!sve_access_check(s)) { 1947 return true; 1948 } 1949 1950 unsigned fullsz = vec_full_reg_size(s); 1951 unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz); 1952 int inc = numelem * a->imm; 1953 TCGv_i64 reg = cpu_reg(s, a->rd); 1954 1955 if (inc != 0) { 1956 do_sat_addsub_64(reg, tcg_constant_i64(inc), a->u, a->d); 1957 } 1958 return true; 1959 } 1960 1961 static bool trans_INCDEC_v(DisasContext *s, arg_incdec2_cnt *a) 1962 { 1963 if (a->esz == 0 || !dc_isar_feature(aa64_sve, s)) { 1964 return false; 1965 } 1966 1967 unsigned fullsz = vec_full_reg_size(s); 1968 unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz); 1969 int inc = numelem * a->imm; 1970 1971 if (inc != 0) { 1972 if (sve_access_check(s)) { 1973 tcg_gen_gvec_adds(a->esz, vec_full_reg_offset(s, a->rd), 1974 vec_full_reg_offset(s, a->rn), 1975 tcg_constant_i64(a->d ? -inc : inc), 1976 fullsz, fullsz); 1977 } 1978 } else { 1979 do_mov_z(s, a->rd, a->rn); 1980 } 1981 return true; 1982 } 1983 1984 static bool trans_SINCDEC_v(DisasContext *s, arg_incdec2_cnt *a) 1985 { 1986 if (a->esz == 0 || !dc_isar_feature(aa64_sve, s)) { 1987 return false; 1988 } 1989 1990 unsigned fullsz = vec_full_reg_size(s); 1991 unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz); 1992 int inc = numelem * a->imm; 1993 1994 if (inc != 0) { 1995 if (sve_access_check(s)) { 1996 do_sat_addsub_vec(s, a->esz, a->rd, a->rn, 1997 tcg_constant_i64(inc), a->u, a->d); 1998 } 1999 } else { 2000 do_mov_z(s, a->rd, a->rn); 2001 } 2002 return true; 2003 } 2004 2005 /* 2006 *** SVE Bitwise Immediate Group 2007 */ 2008 2009 static bool do_zz_dbm(DisasContext *s, arg_rr_dbm *a, GVecGen2iFn *gvec_fn) 2010 { 2011 uint64_t imm; 2012 if (!logic_imm_decode_wmask(&imm, extract32(a->dbm, 12, 1), 2013 extract32(a->dbm, 0, 6), 2014 extract32(a->dbm, 6, 6))) { 2015 return false; 2016 } 2017 return gen_gvec_fn_zzi(s, gvec_fn, MO_64, a->rd, a->rn, imm); 2018 } 2019 2020 TRANS_FEAT(AND_zzi, aa64_sve, do_zz_dbm, a, tcg_gen_gvec_andi) 2021 TRANS_FEAT(ORR_zzi, aa64_sve, do_zz_dbm, a, tcg_gen_gvec_ori) 2022 TRANS_FEAT(EOR_zzi, aa64_sve, do_zz_dbm, a, tcg_gen_gvec_xori) 2023 2024 static bool trans_DUPM(DisasContext *s, arg_DUPM *a) 2025 { 2026 uint64_t imm; 2027 2028 if (!dc_isar_feature(aa64_sve, s)) { 2029 return false; 2030 } 2031 if (!logic_imm_decode_wmask(&imm, extract32(a->dbm, 12, 1), 2032 extract32(a->dbm, 0, 6), 2033 extract32(a->dbm, 6, 6))) { 2034 return false; 2035 } 2036 if (sve_access_check(s)) { 2037 do_dupi_z(s, a->rd, imm); 2038 } 2039 return true; 2040 } 2041 2042 /* 2043 *** SVE Integer Wide Immediate - Predicated Group 2044 */ 2045 2046 /* Implement all merging copies. This is used for CPY (immediate), 2047 * FCPY, CPY (scalar), CPY (SIMD&FP scalar). 2048 */ 2049 static void do_cpy_m(DisasContext *s, int esz, int rd, int rn, int pg, 2050 TCGv_i64 val) 2051 { 2052 typedef void gen_cpy(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv_i32); 2053 static gen_cpy * const fns[4] = { 2054 gen_helper_sve_cpy_m_b, gen_helper_sve_cpy_m_h, 2055 gen_helper_sve_cpy_m_s, gen_helper_sve_cpy_m_d, 2056 }; 2057 unsigned vsz = vec_full_reg_size(s); 2058 TCGv_i32 desc = tcg_constant_i32(simd_desc(vsz, vsz, 0)); 2059 TCGv_ptr t_zd = tcg_temp_new_ptr(); 2060 TCGv_ptr t_zn = tcg_temp_new_ptr(); 2061 TCGv_ptr t_pg = tcg_temp_new_ptr(); 2062 2063 tcg_gen_addi_ptr(t_zd, tcg_env, vec_full_reg_offset(s, rd)); 2064 tcg_gen_addi_ptr(t_zn, tcg_env, vec_full_reg_offset(s, rn)); 2065 tcg_gen_addi_ptr(t_pg, tcg_env, pred_full_reg_offset(s, pg)); 2066 2067 fns[esz](t_zd, t_zn, t_pg, val, desc); 2068 } 2069 2070 static bool trans_FCPY(DisasContext *s, arg_FCPY *a) 2071 { 2072 if (a->esz == 0 || !dc_isar_feature(aa64_sve, s)) { 2073 return false; 2074 } 2075 if (sve_access_check(s)) { 2076 /* Decode the VFP immediate. */ 2077 uint64_t imm = vfp_expand_imm(a->esz, a->imm); 2078 do_cpy_m(s, a->esz, a->rd, a->rn, a->pg, tcg_constant_i64(imm)); 2079 } 2080 return true; 2081 } 2082 2083 static bool trans_CPY_m_i(DisasContext *s, arg_rpri_esz *a) 2084 { 2085 if (!dc_isar_feature(aa64_sve, s)) { 2086 return false; 2087 } 2088 if (sve_access_check(s)) { 2089 do_cpy_m(s, a->esz, a->rd, a->rn, a->pg, tcg_constant_i64(a->imm)); 2090 } 2091 return true; 2092 } 2093 2094 static bool trans_CPY_z_i(DisasContext *s, arg_CPY_z_i *a) 2095 { 2096 static gen_helper_gvec_2i * const fns[4] = { 2097 gen_helper_sve_cpy_z_b, gen_helper_sve_cpy_z_h, 2098 gen_helper_sve_cpy_z_s, gen_helper_sve_cpy_z_d, 2099 }; 2100 2101 if (!dc_isar_feature(aa64_sve, s)) { 2102 return false; 2103 } 2104 if (sve_access_check(s)) { 2105 unsigned vsz = vec_full_reg_size(s); 2106 tcg_gen_gvec_2i_ool(vec_full_reg_offset(s, a->rd), 2107 pred_full_reg_offset(s, a->pg), 2108 tcg_constant_i64(a->imm), 2109 vsz, vsz, 0, fns[a->esz]); 2110 } 2111 return true; 2112 } 2113 2114 /* 2115 *** SVE Permute Extract Group 2116 */ 2117 2118 static bool do_EXT(DisasContext *s, int rd, int rn, int rm, int imm) 2119 { 2120 if (!sve_access_check(s)) { 2121 return true; 2122 } 2123 2124 unsigned vsz = vec_full_reg_size(s); 2125 unsigned n_ofs = imm >= vsz ? 0 : imm; 2126 unsigned n_siz = vsz - n_ofs; 2127 unsigned d = vec_full_reg_offset(s, rd); 2128 unsigned n = vec_full_reg_offset(s, rn); 2129 unsigned m = vec_full_reg_offset(s, rm); 2130 2131 /* Use host vector move insns if we have appropriate sizes 2132 * and no unfortunate overlap. 2133 */ 2134 if (m != d 2135 && n_ofs == size_for_gvec(n_ofs) 2136 && n_siz == size_for_gvec(n_siz) 2137 && (d != n || n_siz <= n_ofs)) { 2138 tcg_gen_gvec_mov(0, d, n + n_ofs, n_siz, n_siz); 2139 if (n_ofs != 0) { 2140 tcg_gen_gvec_mov(0, d + n_siz, m, n_ofs, n_ofs); 2141 } 2142 } else { 2143 tcg_gen_gvec_3_ool(d, n, m, vsz, vsz, n_ofs, gen_helper_sve_ext); 2144 } 2145 return true; 2146 } 2147 2148 TRANS_FEAT(EXT, aa64_sve, do_EXT, a->rd, a->rn, a->rm, a->imm) 2149 TRANS_FEAT(EXT_sve2, aa64_sve2, do_EXT, a->rd, a->rn, (a->rn + 1) % 32, a->imm) 2150 2151 /* 2152 *** SVE Permute - Unpredicated Group 2153 */ 2154 2155 static bool trans_DUP_s(DisasContext *s, arg_DUP_s *a) 2156 { 2157 if (!dc_isar_feature(aa64_sve, s)) { 2158 return false; 2159 } 2160 if (sve_access_check(s)) { 2161 unsigned vsz = vec_full_reg_size(s); 2162 tcg_gen_gvec_dup_i64(a->esz, vec_full_reg_offset(s, a->rd), 2163 vsz, vsz, cpu_reg_sp(s, a->rn)); 2164 } 2165 return true; 2166 } 2167 2168 static bool trans_DUP_x(DisasContext *s, arg_DUP_x *a) 2169 { 2170 if (!dc_isar_feature(aa64_sve, s)) { 2171 return false; 2172 } 2173 if ((a->imm & 0x1f) == 0) { 2174 return false; 2175 } 2176 if (sve_access_check(s)) { 2177 unsigned vsz = vec_full_reg_size(s); 2178 unsigned dofs = vec_full_reg_offset(s, a->rd); 2179 unsigned esz, index; 2180 2181 esz = ctz32(a->imm); 2182 index = a->imm >> (esz + 1); 2183 2184 if ((index << esz) < vsz) { 2185 unsigned nofs = vec_reg_offset(s, a->rn, index, esz); 2186 tcg_gen_gvec_dup_mem(esz, dofs, nofs, vsz, vsz); 2187 } else { 2188 /* 2189 * While dup_mem handles 128-bit elements, dup_imm does not. 2190 * Thankfully element size doesn't matter for splatting zero. 2191 */ 2192 tcg_gen_gvec_dup_imm(MO_64, dofs, vsz, vsz, 0); 2193 } 2194 } 2195 return true; 2196 } 2197 2198 static void do_insr_i64(DisasContext *s, arg_rrr_esz *a, TCGv_i64 val) 2199 { 2200 typedef void gen_insr(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv_i32); 2201 static gen_insr * const fns[4] = { 2202 gen_helper_sve_insr_b, gen_helper_sve_insr_h, 2203 gen_helper_sve_insr_s, gen_helper_sve_insr_d, 2204 }; 2205 unsigned vsz = vec_full_reg_size(s); 2206 TCGv_i32 desc = tcg_constant_i32(simd_desc(vsz, vsz, 0)); 2207 TCGv_ptr t_zd = tcg_temp_new_ptr(); 2208 TCGv_ptr t_zn = tcg_temp_new_ptr(); 2209 2210 tcg_gen_addi_ptr(t_zd, tcg_env, vec_full_reg_offset(s, a->rd)); 2211 tcg_gen_addi_ptr(t_zn, tcg_env, vec_full_reg_offset(s, a->rn)); 2212 2213 fns[a->esz](t_zd, t_zn, val, desc); 2214 } 2215 2216 static bool trans_INSR_f(DisasContext *s, arg_rrr_esz *a) 2217 { 2218 if (!dc_isar_feature(aa64_sve, s)) { 2219 return false; 2220 } 2221 if (sve_access_check(s)) { 2222 TCGv_i64 t = tcg_temp_new_i64(); 2223 tcg_gen_ld_i64(t, tcg_env, vec_reg_offset(s, a->rm, 0, MO_64)); 2224 do_insr_i64(s, a, t); 2225 } 2226 return true; 2227 } 2228 2229 static bool trans_INSR_r(DisasContext *s, arg_rrr_esz *a) 2230 { 2231 if (!dc_isar_feature(aa64_sve, s)) { 2232 return false; 2233 } 2234 if (sve_access_check(s)) { 2235 do_insr_i64(s, a, cpu_reg(s, a->rm)); 2236 } 2237 return true; 2238 } 2239 2240 static gen_helper_gvec_2 * const rev_fns[4] = { 2241 gen_helper_sve_rev_b, gen_helper_sve_rev_h, 2242 gen_helper_sve_rev_s, gen_helper_sve_rev_d 2243 }; 2244 TRANS_FEAT(REV_v, aa64_sve, gen_gvec_ool_zz, rev_fns[a->esz], a->rd, a->rn, 0) 2245 2246 static gen_helper_gvec_3 * const sve_tbl_fns[4] = { 2247 gen_helper_sve_tbl_b, gen_helper_sve_tbl_h, 2248 gen_helper_sve_tbl_s, gen_helper_sve_tbl_d 2249 }; 2250 TRANS_FEAT(TBL, aa64_sve, gen_gvec_ool_arg_zzz, sve_tbl_fns[a->esz], a, 0) 2251 2252 static gen_helper_gvec_4 * const sve2_tbl_fns[4] = { 2253 gen_helper_sve2_tbl_b, gen_helper_sve2_tbl_h, 2254 gen_helper_sve2_tbl_s, gen_helper_sve2_tbl_d 2255 }; 2256 TRANS_FEAT(TBL_sve2, aa64_sve2, gen_gvec_ool_zzzz, sve2_tbl_fns[a->esz], 2257 a->rd, a->rn, (a->rn + 1) % 32, a->rm, 0) 2258 2259 static gen_helper_gvec_3 * const tbx_fns[4] = { 2260 gen_helper_sve2_tbx_b, gen_helper_sve2_tbx_h, 2261 gen_helper_sve2_tbx_s, gen_helper_sve2_tbx_d 2262 }; 2263 TRANS_FEAT(TBX, aa64_sve2, gen_gvec_ool_arg_zzz, tbx_fns[a->esz], a, 0) 2264 2265 static bool trans_UNPK(DisasContext *s, arg_UNPK *a) 2266 { 2267 static gen_helper_gvec_2 * const fns[4][2] = { 2268 { NULL, NULL }, 2269 { gen_helper_sve_sunpk_h, gen_helper_sve_uunpk_h }, 2270 { gen_helper_sve_sunpk_s, gen_helper_sve_uunpk_s }, 2271 { gen_helper_sve_sunpk_d, gen_helper_sve_uunpk_d }, 2272 }; 2273 2274 if (a->esz == 0 || !dc_isar_feature(aa64_sve, s)) { 2275 return false; 2276 } 2277 if (sve_access_check(s)) { 2278 unsigned vsz = vec_full_reg_size(s); 2279 tcg_gen_gvec_2_ool(vec_full_reg_offset(s, a->rd), 2280 vec_full_reg_offset(s, a->rn) 2281 + (a->h ? vsz / 2 : 0), 2282 vsz, vsz, 0, fns[a->esz][a->u]); 2283 } 2284 return true; 2285 } 2286 2287 /* 2288 *** SVE Permute - Predicates Group 2289 */ 2290 2291 static bool do_perm_pred3(DisasContext *s, arg_rrr_esz *a, bool high_odd, 2292 gen_helper_gvec_3 *fn) 2293 { 2294 if (!sve_access_check(s)) { 2295 return true; 2296 } 2297 2298 unsigned vsz = pred_full_reg_size(s); 2299 2300 TCGv_ptr t_d = tcg_temp_new_ptr(); 2301 TCGv_ptr t_n = tcg_temp_new_ptr(); 2302 TCGv_ptr t_m = tcg_temp_new_ptr(); 2303 uint32_t desc = 0; 2304 2305 desc = FIELD_DP32(desc, PREDDESC, OPRSZ, vsz); 2306 desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz); 2307 desc = FIELD_DP32(desc, PREDDESC, DATA, high_odd); 2308 2309 tcg_gen_addi_ptr(t_d, tcg_env, pred_full_reg_offset(s, a->rd)); 2310 tcg_gen_addi_ptr(t_n, tcg_env, pred_full_reg_offset(s, a->rn)); 2311 tcg_gen_addi_ptr(t_m, tcg_env, pred_full_reg_offset(s, a->rm)); 2312 2313 fn(t_d, t_n, t_m, tcg_constant_i32(desc)); 2314 return true; 2315 } 2316 2317 static bool do_perm_pred2(DisasContext *s, arg_rr_esz *a, bool high_odd, 2318 gen_helper_gvec_2 *fn) 2319 { 2320 if (!sve_access_check(s)) { 2321 return true; 2322 } 2323 2324 unsigned vsz = pred_full_reg_size(s); 2325 TCGv_ptr t_d = tcg_temp_new_ptr(); 2326 TCGv_ptr t_n = tcg_temp_new_ptr(); 2327 uint32_t desc = 0; 2328 2329 tcg_gen_addi_ptr(t_d, tcg_env, pred_full_reg_offset(s, a->rd)); 2330 tcg_gen_addi_ptr(t_n, tcg_env, pred_full_reg_offset(s, a->rn)); 2331 2332 desc = FIELD_DP32(desc, PREDDESC, OPRSZ, vsz); 2333 desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz); 2334 desc = FIELD_DP32(desc, PREDDESC, DATA, high_odd); 2335 2336 fn(t_d, t_n, tcg_constant_i32(desc)); 2337 return true; 2338 } 2339 2340 TRANS_FEAT(ZIP1_p, aa64_sve, do_perm_pred3, a, 0, gen_helper_sve_zip_p) 2341 TRANS_FEAT(ZIP2_p, aa64_sve, do_perm_pred3, a, 1, gen_helper_sve_zip_p) 2342 TRANS_FEAT(UZP1_p, aa64_sve, do_perm_pred3, a, 0, gen_helper_sve_uzp_p) 2343 TRANS_FEAT(UZP2_p, aa64_sve, do_perm_pred3, a, 1, gen_helper_sve_uzp_p) 2344 TRANS_FEAT(TRN1_p, aa64_sve, do_perm_pred3, a, 0, gen_helper_sve_trn_p) 2345 TRANS_FEAT(TRN2_p, aa64_sve, do_perm_pred3, a, 1, gen_helper_sve_trn_p) 2346 2347 TRANS_FEAT(REV_p, aa64_sve, do_perm_pred2, a, 0, gen_helper_sve_rev_p) 2348 TRANS_FEAT(PUNPKLO, aa64_sve, do_perm_pred2, a, 0, gen_helper_sve_punpk_p) 2349 TRANS_FEAT(PUNPKHI, aa64_sve, do_perm_pred2, a, 1, gen_helper_sve_punpk_p) 2350 2351 /* 2352 *** SVE Permute - Interleaving Group 2353 */ 2354 2355 static gen_helper_gvec_3 * const zip_fns[4] = { 2356 gen_helper_sve_zip_b, gen_helper_sve_zip_h, 2357 gen_helper_sve_zip_s, gen_helper_sve_zip_d, 2358 }; 2359 TRANS_FEAT(ZIP1_z, aa64_sve, gen_gvec_ool_arg_zzz, 2360 zip_fns[a->esz], a, 0) 2361 TRANS_FEAT(ZIP2_z, aa64_sve, gen_gvec_ool_arg_zzz, 2362 zip_fns[a->esz], a, vec_full_reg_size(s) / 2) 2363 2364 TRANS_FEAT(ZIP1_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz, 2365 gen_helper_sve2_zip_q, a, 0) 2366 TRANS_FEAT(ZIP2_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz, 2367 gen_helper_sve2_zip_q, a, 2368 QEMU_ALIGN_DOWN(vec_full_reg_size(s), 32) / 2) 2369 2370 static gen_helper_gvec_3 * const uzp_fns[4] = { 2371 gen_helper_sve_uzp_b, gen_helper_sve_uzp_h, 2372 gen_helper_sve_uzp_s, gen_helper_sve_uzp_d, 2373 }; 2374 2375 TRANS_FEAT(UZP1_z, aa64_sve, gen_gvec_ool_arg_zzz, 2376 uzp_fns[a->esz], a, 0) 2377 TRANS_FEAT(UZP2_z, aa64_sve, gen_gvec_ool_arg_zzz, 2378 uzp_fns[a->esz], a, 1 << a->esz) 2379 2380 TRANS_FEAT(UZP1_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz, 2381 gen_helper_sve2_uzp_q, a, 0) 2382 TRANS_FEAT(UZP2_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz, 2383 gen_helper_sve2_uzp_q, a, 16) 2384 2385 static gen_helper_gvec_3 * const trn_fns[4] = { 2386 gen_helper_sve_trn_b, gen_helper_sve_trn_h, 2387 gen_helper_sve_trn_s, gen_helper_sve_trn_d, 2388 }; 2389 2390 TRANS_FEAT(TRN1_z, aa64_sve, gen_gvec_ool_arg_zzz, 2391 trn_fns[a->esz], a, 0) 2392 TRANS_FEAT(TRN2_z, aa64_sve, gen_gvec_ool_arg_zzz, 2393 trn_fns[a->esz], a, 1 << a->esz) 2394 2395 TRANS_FEAT(TRN1_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz, 2396 gen_helper_sve2_trn_q, a, 0) 2397 TRANS_FEAT(TRN2_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz, 2398 gen_helper_sve2_trn_q, a, 16) 2399 2400 /* 2401 *** SVE Permute Vector - Predicated Group 2402 */ 2403 2404 static gen_helper_gvec_3 * const compact_fns[4] = { 2405 NULL, NULL, gen_helper_sve_compact_s, gen_helper_sve_compact_d 2406 }; 2407 TRANS_FEAT_NONSTREAMING(COMPACT, aa64_sve, gen_gvec_ool_arg_zpz, 2408 compact_fns[a->esz], a, 0) 2409 2410 /* Call the helper that computes the ARM LastActiveElement pseudocode 2411 * function, scaled by the element size. This includes the not found 2412 * indication; e.g. not found for esz=3 is -8. 2413 */ 2414 static void find_last_active(DisasContext *s, TCGv_i32 ret, int esz, int pg) 2415 { 2416 /* Predicate sizes may be smaller and cannot use simd_desc. We cannot 2417 * round up, as we do elsewhere, because we need the exact size. 2418 */ 2419 TCGv_ptr t_p = tcg_temp_new_ptr(); 2420 unsigned desc = 0; 2421 2422 desc = FIELD_DP32(desc, PREDDESC, OPRSZ, pred_full_reg_size(s)); 2423 desc = FIELD_DP32(desc, PREDDESC, ESZ, esz); 2424 2425 tcg_gen_addi_ptr(t_p, tcg_env, pred_full_reg_offset(s, pg)); 2426 2427 gen_helper_sve_last_active_element(ret, t_p, tcg_constant_i32(desc)); 2428 } 2429 2430 /* Increment LAST to the offset of the next element in the vector, 2431 * wrapping around to 0. 2432 */ 2433 static void incr_last_active(DisasContext *s, TCGv_i32 last, int esz) 2434 { 2435 unsigned vsz = vec_full_reg_size(s); 2436 2437 tcg_gen_addi_i32(last, last, 1 << esz); 2438 if (is_power_of_2(vsz)) { 2439 tcg_gen_andi_i32(last, last, vsz - 1); 2440 } else { 2441 TCGv_i32 max = tcg_constant_i32(vsz); 2442 TCGv_i32 zero = tcg_constant_i32(0); 2443 tcg_gen_movcond_i32(TCG_COND_GEU, last, last, max, zero, last); 2444 } 2445 } 2446 2447 /* If LAST < 0, set LAST to the offset of the last element in the vector. */ 2448 static void wrap_last_active(DisasContext *s, TCGv_i32 last, int esz) 2449 { 2450 unsigned vsz = vec_full_reg_size(s); 2451 2452 if (is_power_of_2(vsz)) { 2453 tcg_gen_andi_i32(last, last, vsz - 1); 2454 } else { 2455 TCGv_i32 max = tcg_constant_i32(vsz - (1 << esz)); 2456 TCGv_i32 zero = tcg_constant_i32(0); 2457 tcg_gen_movcond_i32(TCG_COND_LT, last, last, zero, max, last); 2458 } 2459 } 2460 2461 /* Load an unsigned element of ESZ from BASE+OFS. */ 2462 static TCGv_i64 load_esz(TCGv_ptr base, int ofs, int esz) 2463 { 2464 TCGv_i64 r = tcg_temp_new_i64(); 2465 2466 switch (esz) { 2467 case 0: 2468 tcg_gen_ld8u_i64(r, base, ofs); 2469 break; 2470 case 1: 2471 tcg_gen_ld16u_i64(r, base, ofs); 2472 break; 2473 case 2: 2474 tcg_gen_ld32u_i64(r, base, ofs); 2475 break; 2476 case 3: 2477 tcg_gen_ld_i64(r, base, ofs); 2478 break; 2479 default: 2480 g_assert_not_reached(); 2481 } 2482 return r; 2483 } 2484 2485 /* Load an unsigned element of ESZ from RM[LAST]. */ 2486 static TCGv_i64 load_last_active(DisasContext *s, TCGv_i32 last, 2487 int rm, int esz) 2488 { 2489 TCGv_ptr p = tcg_temp_new_ptr(); 2490 2491 /* Convert offset into vector into offset into ENV. 2492 * The final adjustment for the vector register base 2493 * is added via constant offset to the load. 2494 */ 2495 #if HOST_BIG_ENDIAN 2496 /* Adjust for element ordering. See vec_reg_offset. */ 2497 if (esz < 3) { 2498 tcg_gen_xori_i32(last, last, 8 - (1 << esz)); 2499 } 2500 #endif 2501 tcg_gen_ext_i32_ptr(p, last); 2502 tcg_gen_add_ptr(p, p, tcg_env); 2503 2504 return load_esz(p, vec_full_reg_offset(s, rm), esz); 2505 } 2506 2507 /* Compute CLAST for a Zreg. */ 2508 static bool do_clast_vector(DisasContext *s, arg_rprr_esz *a, bool before) 2509 { 2510 TCGv_i32 last; 2511 TCGLabel *over; 2512 TCGv_i64 ele; 2513 unsigned vsz, esz = a->esz; 2514 2515 if (!sve_access_check(s)) { 2516 return true; 2517 } 2518 2519 last = tcg_temp_new_i32(); 2520 over = gen_new_label(); 2521 2522 find_last_active(s, last, esz, a->pg); 2523 2524 /* There is of course no movcond for a 2048-bit vector, 2525 * so we must branch over the actual store. 2526 */ 2527 tcg_gen_brcondi_i32(TCG_COND_LT, last, 0, over); 2528 2529 if (!before) { 2530 incr_last_active(s, last, esz); 2531 } 2532 2533 ele = load_last_active(s, last, a->rm, esz); 2534 2535 vsz = vec_full_reg_size(s); 2536 tcg_gen_gvec_dup_i64(esz, vec_full_reg_offset(s, a->rd), vsz, vsz, ele); 2537 2538 /* If this insn used MOVPRFX, we may need a second move. */ 2539 if (a->rd != a->rn) { 2540 TCGLabel *done = gen_new_label(); 2541 tcg_gen_br(done); 2542 2543 gen_set_label(over); 2544 do_mov_z(s, a->rd, a->rn); 2545 2546 gen_set_label(done); 2547 } else { 2548 gen_set_label(over); 2549 } 2550 return true; 2551 } 2552 2553 TRANS_FEAT(CLASTA_z, aa64_sve, do_clast_vector, a, false) 2554 TRANS_FEAT(CLASTB_z, aa64_sve, do_clast_vector, a, true) 2555 2556 /* Compute CLAST for a scalar. */ 2557 static void do_clast_scalar(DisasContext *s, int esz, int pg, int rm, 2558 bool before, TCGv_i64 reg_val) 2559 { 2560 TCGv_i32 last = tcg_temp_new_i32(); 2561 TCGv_i64 ele, cmp; 2562 2563 find_last_active(s, last, esz, pg); 2564 2565 /* Extend the original value of last prior to incrementing. */ 2566 cmp = tcg_temp_new_i64(); 2567 tcg_gen_ext_i32_i64(cmp, last); 2568 2569 if (!before) { 2570 incr_last_active(s, last, esz); 2571 } 2572 2573 /* The conceit here is that while last < 0 indicates not found, after 2574 * adjusting for tcg_env->vfp.zregs[rm], it is still a valid address 2575 * from which we can load garbage. We then discard the garbage with 2576 * a conditional move. 2577 */ 2578 ele = load_last_active(s, last, rm, esz); 2579 2580 tcg_gen_movcond_i64(TCG_COND_GE, reg_val, cmp, tcg_constant_i64(0), 2581 ele, reg_val); 2582 } 2583 2584 /* Compute CLAST for a Vreg. */ 2585 static bool do_clast_fp(DisasContext *s, arg_rpr_esz *a, bool before) 2586 { 2587 if (sve_access_check(s)) { 2588 int esz = a->esz; 2589 int ofs = vec_reg_offset(s, a->rd, 0, esz); 2590 TCGv_i64 reg = load_esz(tcg_env, ofs, esz); 2591 2592 do_clast_scalar(s, esz, a->pg, a->rn, before, reg); 2593 write_fp_dreg(s, a->rd, reg); 2594 } 2595 return true; 2596 } 2597 2598 TRANS_FEAT(CLASTA_v, aa64_sve, do_clast_fp, a, false) 2599 TRANS_FEAT(CLASTB_v, aa64_sve, do_clast_fp, a, true) 2600 2601 /* Compute CLAST for a Xreg. */ 2602 static bool do_clast_general(DisasContext *s, arg_rpr_esz *a, bool before) 2603 { 2604 TCGv_i64 reg; 2605 2606 if (!sve_access_check(s)) { 2607 return true; 2608 } 2609 2610 reg = cpu_reg(s, a->rd); 2611 switch (a->esz) { 2612 case 0: 2613 tcg_gen_ext8u_i64(reg, reg); 2614 break; 2615 case 1: 2616 tcg_gen_ext16u_i64(reg, reg); 2617 break; 2618 case 2: 2619 tcg_gen_ext32u_i64(reg, reg); 2620 break; 2621 case 3: 2622 break; 2623 default: 2624 g_assert_not_reached(); 2625 } 2626 2627 do_clast_scalar(s, a->esz, a->pg, a->rn, before, reg); 2628 return true; 2629 } 2630 2631 TRANS_FEAT(CLASTA_r, aa64_sve, do_clast_general, a, false) 2632 TRANS_FEAT(CLASTB_r, aa64_sve, do_clast_general, a, true) 2633 2634 /* Compute LAST for a scalar. */ 2635 static TCGv_i64 do_last_scalar(DisasContext *s, int esz, 2636 int pg, int rm, bool before) 2637 { 2638 TCGv_i32 last = tcg_temp_new_i32(); 2639 2640 find_last_active(s, last, esz, pg); 2641 if (before) { 2642 wrap_last_active(s, last, esz); 2643 } else { 2644 incr_last_active(s, last, esz); 2645 } 2646 2647 return load_last_active(s, last, rm, esz); 2648 } 2649 2650 /* Compute LAST for a Vreg. */ 2651 static bool do_last_fp(DisasContext *s, arg_rpr_esz *a, bool before) 2652 { 2653 if (sve_access_check(s)) { 2654 TCGv_i64 val = do_last_scalar(s, a->esz, a->pg, a->rn, before); 2655 write_fp_dreg(s, a->rd, val); 2656 } 2657 return true; 2658 } 2659 2660 TRANS_FEAT(LASTA_v, aa64_sve, do_last_fp, a, false) 2661 TRANS_FEAT(LASTB_v, aa64_sve, do_last_fp, a, true) 2662 2663 /* Compute LAST for a Xreg. */ 2664 static bool do_last_general(DisasContext *s, arg_rpr_esz *a, bool before) 2665 { 2666 if (sve_access_check(s)) { 2667 TCGv_i64 val = do_last_scalar(s, a->esz, a->pg, a->rn, before); 2668 tcg_gen_mov_i64(cpu_reg(s, a->rd), val); 2669 } 2670 return true; 2671 } 2672 2673 TRANS_FEAT(LASTA_r, aa64_sve, do_last_general, a, false) 2674 TRANS_FEAT(LASTB_r, aa64_sve, do_last_general, a, true) 2675 2676 static bool trans_CPY_m_r(DisasContext *s, arg_rpr_esz *a) 2677 { 2678 if (!dc_isar_feature(aa64_sve, s)) { 2679 return false; 2680 } 2681 if (sve_access_check(s)) { 2682 do_cpy_m(s, a->esz, a->rd, a->rd, a->pg, cpu_reg_sp(s, a->rn)); 2683 } 2684 return true; 2685 } 2686 2687 static bool trans_CPY_m_v(DisasContext *s, arg_rpr_esz *a) 2688 { 2689 if (!dc_isar_feature(aa64_sve, s)) { 2690 return false; 2691 } 2692 if (sve_access_check(s)) { 2693 int ofs = vec_reg_offset(s, a->rn, 0, a->esz); 2694 TCGv_i64 t = load_esz(tcg_env, ofs, a->esz); 2695 do_cpy_m(s, a->esz, a->rd, a->rd, a->pg, t); 2696 } 2697 return true; 2698 } 2699 2700 static gen_helper_gvec_3 * const revb_fns[4] = { 2701 NULL, gen_helper_sve_revb_h, 2702 gen_helper_sve_revb_s, gen_helper_sve_revb_d, 2703 }; 2704 TRANS_FEAT(REVB, aa64_sve, gen_gvec_ool_arg_zpz, revb_fns[a->esz], a, 0) 2705 2706 static gen_helper_gvec_3 * const revh_fns[4] = { 2707 NULL, NULL, gen_helper_sve_revh_s, gen_helper_sve_revh_d, 2708 }; 2709 TRANS_FEAT(REVH, aa64_sve, gen_gvec_ool_arg_zpz, revh_fns[a->esz], a, 0) 2710 2711 TRANS_FEAT(REVW, aa64_sve, gen_gvec_ool_arg_zpz, 2712 a->esz == 3 ? gen_helper_sve_revw_d : NULL, a, 0) 2713 2714 TRANS_FEAT(REVD, aa64_sme, gen_gvec_ool_arg_zpz, gen_helper_sme_revd_q, a, 0) 2715 2716 TRANS_FEAT(SPLICE, aa64_sve, gen_gvec_ool_arg_zpzz, 2717 gen_helper_sve_splice, a, a->esz) 2718 2719 TRANS_FEAT(SPLICE_sve2, aa64_sve2, gen_gvec_ool_zzzp, gen_helper_sve_splice, 2720 a->rd, a->rn, (a->rn + 1) % 32, a->pg, a->esz) 2721 2722 /* 2723 *** SVE Integer Compare - Vectors Group 2724 */ 2725 2726 static bool do_ppzz_flags(DisasContext *s, arg_rprr_esz *a, 2727 gen_helper_gvec_flags_4 *gen_fn) 2728 { 2729 TCGv_ptr pd, zn, zm, pg; 2730 unsigned vsz; 2731 TCGv_i32 t; 2732 2733 if (gen_fn == NULL) { 2734 return false; 2735 } 2736 if (!sve_access_check(s)) { 2737 return true; 2738 } 2739 2740 vsz = vec_full_reg_size(s); 2741 t = tcg_temp_new_i32(); 2742 pd = tcg_temp_new_ptr(); 2743 zn = tcg_temp_new_ptr(); 2744 zm = tcg_temp_new_ptr(); 2745 pg = tcg_temp_new_ptr(); 2746 2747 tcg_gen_addi_ptr(pd, tcg_env, pred_full_reg_offset(s, a->rd)); 2748 tcg_gen_addi_ptr(zn, tcg_env, vec_full_reg_offset(s, a->rn)); 2749 tcg_gen_addi_ptr(zm, tcg_env, vec_full_reg_offset(s, a->rm)); 2750 tcg_gen_addi_ptr(pg, tcg_env, pred_full_reg_offset(s, a->pg)); 2751 2752 gen_fn(t, pd, zn, zm, pg, tcg_constant_i32(simd_desc(vsz, vsz, 0))); 2753 2754 do_pred_flags(t); 2755 return true; 2756 } 2757 2758 #define DO_PPZZ(NAME, name) \ 2759 static gen_helper_gvec_flags_4 * const name##_ppzz_fns[4] = { \ 2760 gen_helper_sve_##name##_ppzz_b, gen_helper_sve_##name##_ppzz_h, \ 2761 gen_helper_sve_##name##_ppzz_s, gen_helper_sve_##name##_ppzz_d, \ 2762 }; \ 2763 TRANS_FEAT(NAME##_ppzz, aa64_sve, do_ppzz_flags, \ 2764 a, name##_ppzz_fns[a->esz]) 2765 2766 DO_PPZZ(CMPEQ, cmpeq) 2767 DO_PPZZ(CMPNE, cmpne) 2768 DO_PPZZ(CMPGT, cmpgt) 2769 DO_PPZZ(CMPGE, cmpge) 2770 DO_PPZZ(CMPHI, cmphi) 2771 DO_PPZZ(CMPHS, cmphs) 2772 2773 #undef DO_PPZZ 2774 2775 #define DO_PPZW(NAME, name) \ 2776 static gen_helper_gvec_flags_4 * const name##_ppzw_fns[4] = { \ 2777 gen_helper_sve_##name##_ppzw_b, gen_helper_sve_##name##_ppzw_h, \ 2778 gen_helper_sve_##name##_ppzw_s, NULL \ 2779 }; \ 2780 TRANS_FEAT(NAME##_ppzw, aa64_sve, do_ppzz_flags, \ 2781 a, name##_ppzw_fns[a->esz]) 2782 2783 DO_PPZW(CMPEQ, cmpeq) 2784 DO_PPZW(CMPNE, cmpne) 2785 DO_PPZW(CMPGT, cmpgt) 2786 DO_PPZW(CMPGE, cmpge) 2787 DO_PPZW(CMPHI, cmphi) 2788 DO_PPZW(CMPHS, cmphs) 2789 DO_PPZW(CMPLT, cmplt) 2790 DO_PPZW(CMPLE, cmple) 2791 DO_PPZW(CMPLO, cmplo) 2792 DO_PPZW(CMPLS, cmpls) 2793 2794 #undef DO_PPZW 2795 2796 /* 2797 *** SVE Integer Compare - Immediate Groups 2798 */ 2799 2800 static bool do_ppzi_flags(DisasContext *s, arg_rpri_esz *a, 2801 gen_helper_gvec_flags_3 *gen_fn) 2802 { 2803 TCGv_ptr pd, zn, pg; 2804 unsigned vsz; 2805 TCGv_i32 t; 2806 2807 if (gen_fn == NULL) { 2808 return false; 2809 } 2810 if (!sve_access_check(s)) { 2811 return true; 2812 } 2813 2814 vsz = vec_full_reg_size(s); 2815 t = tcg_temp_new_i32(); 2816 pd = tcg_temp_new_ptr(); 2817 zn = tcg_temp_new_ptr(); 2818 pg = tcg_temp_new_ptr(); 2819 2820 tcg_gen_addi_ptr(pd, tcg_env, pred_full_reg_offset(s, a->rd)); 2821 tcg_gen_addi_ptr(zn, tcg_env, vec_full_reg_offset(s, a->rn)); 2822 tcg_gen_addi_ptr(pg, tcg_env, pred_full_reg_offset(s, a->pg)); 2823 2824 gen_fn(t, pd, zn, pg, tcg_constant_i32(simd_desc(vsz, vsz, a->imm))); 2825 2826 do_pred_flags(t); 2827 return true; 2828 } 2829 2830 #define DO_PPZI(NAME, name) \ 2831 static gen_helper_gvec_flags_3 * const name##_ppzi_fns[4] = { \ 2832 gen_helper_sve_##name##_ppzi_b, gen_helper_sve_##name##_ppzi_h, \ 2833 gen_helper_sve_##name##_ppzi_s, gen_helper_sve_##name##_ppzi_d, \ 2834 }; \ 2835 TRANS_FEAT(NAME##_ppzi, aa64_sve, do_ppzi_flags, a, \ 2836 name##_ppzi_fns[a->esz]) 2837 2838 DO_PPZI(CMPEQ, cmpeq) 2839 DO_PPZI(CMPNE, cmpne) 2840 DO_PPZI(CMPGT, cmpgt) 2841 DO_PPZI(CMPGE, cmpge) 2842 DO_PPZI(CMPHI, cmphi) 2843 DO_PPZI(CMPHS, cmphs) 2844 DO_PPZI(CMPLT, cmplt) 2845 DO_PPZI(CMPLE, cmple) 2846 DO_PPZI(CMPLO, cmplo) 2847 DO_PPZI(CMPLS, cmpls) 2848 2849 #undef DO_PPZI 2850 2851 /* 2852 *** SVE Partition Break Group 2853 */ 2854 2855 static bool do_brk3(DisasContext *s, arg_rprr_s *a, 2856 gen_helper_gvec_4 *fn, gen_helper_gvec_flags_4 *fn_s) 2857 { 2858 if (!sve_access_check(s)) { 2859 return true; 2860 } 2861 2862 unsigned vsz = pred_full_reg_size(s); 2863 2864 /* Predicate sizes may be smaller and cannot use simd_desc. */ 2865 TCGv_ptr d = tcg_temp_new_ptr(); 2866 TCGv_ptr n = tcg_temp_new_ptr(); 2867 TCGv_ptr m = tcg_temp_new_ptr(); 2868 TCGv_ptr g = tcg_temp_new_ptr(); 2869 TCGv_i32 desc = tcg_constant_i32(FIELD_DP32(0, PREDDESC, OPRSZ, vsz)); 2870 2871 tcg_gen_addi_ptr(d, tcg_env, pred_full_reg_offset(s, a->rd)); 2872 tcg_gen_addi_ptr(n, tcg_env, pred_full_reg_offset(s, a->rn)); 2873 tcg_gen_addi_ptr(m, tcg_env, pred_full_reg_offset(s, a->rm)); 2874 tcg_gen_addi_ptr(g, tcg_env, pred_full_reg_offset(s, a->pg)); 2875 2876 if (a->s) { 2877 TCGv_i32 t = tcg_temp_new_i32(); 2878 fn_s(t, d, n, m, g, desc); 2879 do_pred_flags(t); 2880 } else { 2881 fn(d, n, m, g, desc); 2882 } 2883 return true; 2884 } 2885 2886 static bool do_brk2(DisasContext *s, arg_rpr_s *a, 2887 gen_helper_gvec_3 *fn, gen_helper_gvec_flags_3 *fn_s) 2888 { 2889 if (!sve_access_check(s)) { 2890 return true; 2891 } 2892 2893 unsigned vsz = pred_full_reg_size(s); 2894 2895 /* Predicate sizes may be smaller and cannot use simd_desc. */ 2896 TCGv_ptr d = tcg_temp_new_ptr(); 2897 TCGv_ptr n = tcg_temp_new_ptr(); 2898 TCGv_ptr g = tcg_temp_new_ptr(); 2899 TCGv_i32 desc = tcg_constant_i32(FIELD_DP32(0, PREDDESC, OPRSZ, vsz)); 2900 2901 tcg_gen_addi_ptr(d, tcg_env, pred_full_reg_offset(s, a->rd)); 2902 tcg_gen_addi_ptr(n, tcg_env, pred_full_reg_offset(s, a->rn)); 2903 tcg_gen_addi_ptr(g, tcg_env, pred_full_reg_offset(s, a->pg)); 2904 2905 if (a->s) { 2906 TCGv_i32 t = tcg_temp_new_i32(); 2907 fn_s(t, d, n, g, desc); 2908 do_pred_flags(t); 2909 } else { 2910 fn(d, n, g, desc); 2911 } 2912 return true; 2913 } 2914 2915 TRANS_FEAT(BRKPA, aa64_sve, do_brk3, a, 2916 gen_helper_sve_brkpa, gen_helper_sve_brkpas) 2917 TRANS_FEAT(BRKPB, aa64_sve, do_brk3, a, 2918 gen_helper_sve_brkpb, gen_helper_sve_brkpbs) 2919 2920 TRANS_FEAT(BRKA_m, aa64_sve, do_brk2, a, 2921 gen_helper_sve_brka_m, gen_helper_sve_brkas_m) 2922 TRANS_FEAT(BRKB_m, aa64_sve, do_brk2, a, 2923 gen_helper_sve_brkb_m, gen_helper_sve_brkbs_m) 2924 2925 TRANS_FEAT(BRKA_z, aa64_sve, do_brk2, a, 2926 gen_helper_sve_brka_z, gen_helper_sve_brkas_z) 2927 TRANS_FEAT(BRKB_z, aa64_sve, do_brk2, a, 2928 gen_helper_sve_brkb_z, gen_helper_sve_brkbs_z) 2929 2930 TRANS_FEAT(BRKN, aa64_sve, do_brk2, a, 2931 gen_helper_sve_brkn, gen_helper_sve_brkns) 2932 2933 /* 2934 *** SVE Predicate Count Group 2935 */ 2936 2937 static void do_cntp(DisasContext *s, TCGv_i64 val, int esz, int pn, int pg) 2938 { 2939 unsigned psz = pred_full_reg_size(s); 2940 2941 if (psz <= 8) { 2942 uint64_t psz_mask; 2943 2944 tcg_gen_ld_i64(val, tcg_env, pred_full_reg_offset(s, pn)); 2945 if (pn != pg) { 2946 TCGv_i64 g = tcg_temp_new_i64(); 2947 tcg_gen_ld_i64(g, tcg_env, pred_full_reg_offset(s, pg)); 2948 tcg_gen_and_i64(val, val, g); 2949 } 2950 2951 /* Reduce the pred_esz_masks value simply to reduce the 2952 * size of the code generated here. 2953 */ 2954 psz_mask = MAKE_64BIT_MASK(0, psz * 8); 2955 tcg_gen_andi_i64(val, val, pred_esz_masks[esz] & psz_mask); 2956 2957 tcg_gen_ctpop_i64(val, val); 2958 } else { 2959 TCGv_ptr t_pn = tcg_temp_new_ptr(); 2960 TCGv_ptr t_pg = tcg_temp_new_ptr(); 2961 unsigned desc = 0; 2962 2963 desc = FIELD_DP32(desc, PREDDESC, OPRSZ, psz); 2964 desc = FIELD_DP32(desc, PREDDESC, ESZ, esz); 2965 2966 tcg_gen_addi_ptr(t_pn, tcg_env, pred_full_reg_offset(s, pn)); 2967 tcg_gen_addi_ptr(t_pg, tcg_env, pred_full_reg_offset(s, pg)); 2968 2969 gen_helper_sve_cntp(val, t_pn, t_pg, tcg_constant_i32(desc)); 2970 } 2971 } 2972 2973 static bool trans_CNTP(DisasContext *s, arg_CNTP *a) 2974 { 2975 if (!dc_isar_feature(aa64_sve, s)) { 2976 return false; 2977 } 2978 if (sve_access_check(s)) { 2979 do_cntp(s, cpu_reg(s, a->rd), a->esz, a->rn, a->pg); 2980 } 2981 return true; 2982 } 2983 2984 static bool trans_INCDECP_r(DisasContext *s, arg_incdec_pred *a) 2985 { 2986 if (!dc_isar_feature(aa64_sve, s)) { 2987 return false; 2988 } 2989 if (sve_access_check(s)) { 2990 TCGv_i64 reg = cpu_reg(s, a->rd); 2991 TCGv_i64 val = tcg_temp_new_i64(); 2992 2993 do_cntp(s, val, a->esz, a->pg, a->pg); 2994 if (a->d) { 2995 tcg_gen_sub_i64(reg, reg, val); 2996 } else { 2997 tcg_gen_add_i64(reg, reg, val); 2998 } 2999 } 3000 return true; 3001 } 3002 3003 static bool trans_INCDECP_z(DisasContext *s, arg_incdec2_pred *a) 3004 { 3005 if (a->esz == 0 || !dc_isar_feature(aa64_sve, s)) { 3006 return false; 3007 } 3008 if (sve_access_check(s)) { 3009 unsigned vsz = vec_full_reg_size(s); 3010 TCGv_i64 val = tcg_temp_new_i64(); 3011 GVecGen2sFn *gvec_fn = a->d ? tcg_gen_gvec_subs : tcg_gen_gvec_adds; 3012 3013 do_cntp(s, val, a->esz, a->pg, a->pg); 3014 gvec_fn(a->esz, vec_full_reg_offset(s, a->rd), 3015 vec_full_reg_offset(s, a->rn), val, vsz, vsz); 3016 } 3017 return true; 3018 } 3019 3020 static bool trans_SINCDECP_r_32(DisasContext *s, arg_incdec_pred *a) 3021 { 3022 if (!dc_isar_feature(aa64_sve, s)) { 3023 return false; 3024 } 3025 if (sve_access_check(s)) { 3026 TCGv_i64 reg = cpu_reg(s, a->rd); 3027 TCGv_i64 val = tcg_temp_new_i64(); 3028 3029 do_cntp(s, val, a->esz, a->pg, a->pg); 3030 do_sat_addsub_32(reg, val, a->u, a->d); 3031 } 3032 return true; 3033 } 3034 3035 static bool trans_SINCDECP_r_64(DisasContext *s, arg_incdec_pred *a) 3036 { 3037 if (!dc_isar_feature(aa64_sve, s)) { 3038 return false; 3039 } 3040 if (sve_access_check(s)) { 3041 TCGv_i64 reg = cpu_reg(s, a->rd); 3042 TCGv_i64 val = tcg_temp_new_i64(); 3043 3044 do_cntp(s, val, a->esz, a->pg, a->pg); 3045 do_sat_addsub_64(reg, val, a->u, a->d); 3046 } 3047 return true; 3048 } 3049 3050 static bool trans_SINCDECP_z(DisasContext *s, arg_incdec2_pred *a) 3051 { 3052 if (a->esz == 0 || !dc_isar_feature(aa64_sve, s)) { 3053 return false; 3054 } 3055 if (sve_access_check(s)) { 3056 TCGv_i64 val = tcg_temp_new_i64(); 3057 do_cntp(s, val, a->esz, a->pg, a->pg); 3058 do_sat_addsub_vec(s, a->esz, a->rd, a->rn, val, a->u, a->d); 3059 } 3060 return true; 3061 } 3062 3063 /* 3064 *** SVE Integer Compare Scalars Group 3065 */ 3066 3067 static bool trans_CTERM(DisasContext *s, arg_CTERM *a) 3068 { 3069 if (!dc_isar_feature(aa64_sve, s)) { 3070 return false; 3071 } 3072 if (!sve_access_check(s)) { 3073 return true; 3074 } 3075 3076 TCGCond cond = (a->ne ? TCG_COND_NE : TCG_COND_EQ); 3077 TCGv_i64 rn = read_cpu_reg(s, a->rn, a->sf); 3078 TCGv_i64 rm = read_cpu_reg(s, a->rm, a->sf); 3079 TCGv_i64 cmp = tcg_temp_new_i64(); 3080 3081 tcg_gen_setcond_i64(cond, cmp, rn, rm); 3082 tcg_gen_extrl_i64_i32(cpu_NF, cmp); 3083 3084 /* VF = !NF & !CF. */ 3085 tcg_gen_xori_i32(cpu_VF, cpu_NF, 1); 3086 tcg_gen_andc_i32(cpu_VF, cpu_VF, cpu_CF); 3087 3088 /* Both NF and VF actually look at bit 31. */ 3089 tcg_gen_neg_i32(cpu_NF, cpu_NF); 3090 tcg_gen_neg_i32(cpu_VF, cpu_VF); 3091 return true; 3092 } 3093 3094 static bool trans_WHILE(DisasContext *s, arg_WHILE *a) 3095 { 3096 TCGv_i64 op0, op1, t0, t1, tmax; 3097 TCGv_i32 t2; 3098 TCGv_ptr ptr; 3099 unsigned vsz = vec_full_reg_size(s); 3100 unsigned desc = 0; 3101 TCGCond cond; 3102 uint64_t maxval; 3103 /* Note that GE/HS has a->eq == 0 and GT/HI has a->eq == 1. */ 3104 bool eq = a->eq == a->lt; 3105 3106 /* The greater-than conditions are all SVE2. */ 3107 if (a->lt 3108 ? !dc_isar_feature(aa64_sve, s) 3109 : !dc_isar_feature(aa64_sve2, s)) { 3110 return false; 3111 } 3112 if (!sve_access_check(s)) { 3113 return true; 3114 } 3115 3116 op0 = read_cpu_reg(s, a->rn, 1); 3117 op1 = read_cpu_reg(s, a->rm, 1); 3118 3119 if (!a->sf) { 3120 if (a->u) { 3121 tcg_gen_ext32u_i64(op0, op0); 3122 tcg_gen_ext32u_i64(op1, op1); 3123 } else { 3124 tcg_gen_ext32s_i64(op0, op0); 3125 tcg_gen_ext32s_i64(op1, op1); 3126 } 3127 } 3128 3129 /* For the helper, compress the different conditions into a computation 3130 * of how many iterations for which the condition is true. 3131 */ 3132 t0 = tcg_temp_new_i64(); 3133 t1 = tcg_temp_new_i64(); 3134 3135 if (a->lt) { 3136 tcg_gen_sub_i64(t0, op1, op0); 3137 if (a->u) { 3138 maxval = a->sf ? UINT64_MAX : UINT32_MAX; 3139 cond = eq ? TCG_COND_LEU : TCG_COND_LTU; 3140 } else { 3141 maxval = a->sf ? INT64_MAX : INT32_MAX; 3142 cond = eq ? TCG_COND_LE : TCG_COND_LT; 3143 } 3144 } else { 3145 tcg_gen_sub_i64(t0, op0, op1); 3146 if (a->u) { 3147 maxval = 0; 3148 cond = eq ? TCG_COND_GEU : TCG_COND_GTU; 3149 } else { 3150 maxval = a->sf ? INT64_MIN : INT32_MIN; 3151 cond = eq ? TCG_COND_GE : TCG_COND_GT; 3152 } 3153 } 3154 3155 tmax = tcg_constant_i64(vsz >> a->esz); 3156 if (eq) { 3157 /* Equality means one more iteration. */ 3158 tcg_gen_addi_i64(t0, t0, 1); 3159 3160 /* 3161 * For the less-than while, if op1 is maxval (and the only time 3162 * the addition above could overflow), then we produce an all-true 3163 * predicate by setting the count to the vector length. This is 3164 * because the pseudocode is described as an increment + compare 3165 * loop, and the maximum integer would always compare true. 3166 * Similarly, the greater-than while has the same issue with the 3167 * minimum integer due to the decrement + compare loop. 3168 */ 3169 tcg_gen_movi_i64(t1, maxval); 3170 tcg_gen_movcond_i64(TCG_COND_EQ, t0, op1, t1, tmax, t0); 3171 } 3172 3173 /* Bound to the maximum. */ 3174 tcg_gen_umin_i64(t0, t0, tmax); 3175 3176 /* Set the count to zero if the condition is false. */ 3177 tcg_gen_movi_i64(t1, 0); 3178 tcg_gen_movcond_i64(cond, t0, op0, op1, t0, t1); 3179 3180 /* Since we're bounded, pass as a 32-bit type. */ 3181 t2 = tcg_temp_new_i32(); 3182 tcg_gen_extrl_i64_i32(t2, t0); 3183 3184 /* Scale elements to bits. */ 3185 tcg_gen_shli_i32(t2, t2, a->esz); 3186 3187 desc = FIELD_DP32(desc, PREDDESC, OPRSZ, vsz / 8); 3188 desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz); 3189 3190 ptr = tcg_temp_new_ptr(); 3191 tcg_gen_addi_ptr(ptr, tcg_env, pred_full_reg_offset(s, a->rd)); 3192 3193 if (a->lt) { 3194 gen_helper_sve_whilel(t2, ptr, t2, tcg_constant_i32(desc)); 3195 } else { 3196 gen_helper_sve_whileg(t2, ptr, t2, tcg_constant_i32(desc)); 3197 } 3198 do_pred_flags(t2); 3199 return true; 3200 } 3201 3202 static bool trans_WHILE_ptr(DisasContext *s, arg_WHILE_ptr *a) 3203 { 3204 TCGv_i64 op0, op1, diff, t1, tmax; 3205 TCGv_i32 t2; 3206 TCGv_ptr ptr; 3207 unsigned vsz = vec_full_reg_size(s); 3208 unsigned desc = 0; 3209 3210 if (!dc_isar_feature(aa64_sve2, s)) { 3211 return false; 3212 } 3213 if (!sve_access_check(s)) { 3214 return true; 3215 } 3216 3217 op0 = read_cpu_reg(s, a->rn, 1); 3218 op1 = read_cpu_reg(s, a->rm, 1); 3219 3220 tmax = tcg_constant_i64(vsz); 3221 diff = tcg_temp_new_i64(); 3222 3223 if (a->rw) { 3224 /* WHILERW */ 3225 /* diff = abs(op1 - op0), noting that op0/1 are unsigned. */ 3226 t1 = tcg_temp_new_i64(); 3227 tcg_gen_sub_i64(diff, op0, op1); 3228 tcg_gen_sub_i64(t1, op1, op0); 3229 tcg_gen_movcond_i64(TCG_COND_GEU, diff, op0, op1, diff, t1); 3230 /* Round down to a multiple of ESIZE. */ 3231 tcg_gen_andi_i64(diff, diff, -1 << a->esz); 3232 /* If op1 == op0, diff == 0, and the condition is always true. */ 3233 tcg_gen_movcond_i64(TCG_COND_EQ, diff, op0, op1, tmax, diff); 3234 } else { 3235 /* WHILEWR */ 3236 tcg_gen_sub_i64(diff, op1, op0); 3237 /* Round down to a multiple of ESIZE. */ 3238 tcg_gen_andi_i64(diff, diff, -1 << a->esz); 3239 /* If op0 >= op1, diff <= 0, the condition is always true. */ 3240 tcg_gen_movcond_i64(TCG_COND_GEU, diff, op0, op1, tmax, diff); 3241 } 3242 3243 /* Bound to the maximum. */ 3244 tcg_gen_umin_i64(diff, diff, tmax); 3245 3246 /* Since we're bounded, pass as a 32-bit type. */ 3247 t2 = tcg_temp_new_i32(); 3248 tcg_gen_extrl_i64_i32(t2, diff); 3249 3250 desc = FIELD_DP32(desc, PREDDESC, OPRSZ, vsz / 8); 3251 desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz); 3252 3253 ptr = tcg_temp_new_ptr(); 3254 tcg_gen_addi_ptr(ptr, tcg_env, pred_full_reg_offset(s, a->rd)); 3255 3256 gen_helper_sve_whilel(t2, ptr, t2, tcg_constant_i32(desc)); 3257 do_pred_flags(t2); 3258 return true; 3259 } 3260 3261 /* 3262 *** SVE Integer Wide Immediate - Unpredicated Group 3263 */ 3264 3265 static bool trans_FDUP(DisasContext *s, arg_FDUP *a) 3266 { 3267 if (a->esz == 0 || !dc_isar_feature(aa64_sve, s)) { 3268 return false; 3269 } 3270 if (sve_access_check(s)) { 3271 unsigned vsz = vec_full_reg_size(s); 3272 int dofs = vec_full_reg_offset(s, a->rd); 3273 uint64_t imm; 3274 3275 /* Decode the VFP immediate. */ 3276 imm = vfp_expand_imm(a->esz, a->imm); 3277 tcg_gen_gvec_dup_imm(a->esz, dofs, vsz, vsz, imm); 3278 } 3279 return true; 3280 } 3281 3282 static bool trans_DUP_i(DisasContext *s, arg_DUP_i *a) 3283 { 3284 if (!dc_isar_feature(aa64_sve, s)) { 3285 return false; 3286 } 3287 if (sve_access_check(s)) { 3288 unsigned vsz = vec_full_reg_size(s); 3289 int dofs = vec_full_reg_offset(s, a->rd); 3290 tcg_gen_gvec_dup_imm(a->esz, dofs, vsz, vsz, a->imm); 3291 } 3292 return true; 3293 } 3294 3295 TRANS_FEAT(ADD_zzi, aa64_sve, gen_gvec_fn_arg_zzi, tcg_gen_gvec_addi, a) 3296 3297 static bool trans_SUB_zzi(DisasContext *s, arg_rri_esz *a) 3298 { 3299 a->imm = -a->imm; 3300 return trans_ADD_zzi(s, a); 3301 } 3302 3303 static bool trans_SUBR_zzi(DisasContext *s, arg_rri_esz *a) 3304 { 3305 static const TCGOpcode vecop_list[] = { INDEX_op_sub_vec, 0 }; 3306 static const GVecGen2s op[4] = { 3307 { .fni8 = tcg_gen_vec_sub8_i64, 3308 .fniv = tcg_gen_sub_vec, 3309 .fno = gen_helper_sve_subri_b, 3310 .opt_opc = vecop_list, 3311 .vece = MO_8, 3312 .scalar_first = true }, 3313 { .fni8 = tcg_gen_vec_sub16_i64, 3314 .fniv = tcg_gen_sub_vec, 3315 .fno = gen_helper_sve_subri_h, 3316 .opt_opc = vecop_list, 3317 .vece = MO_16, 3318 .scalar_first = true }, 3319 { .fni4 = tcg_gen_sub_i32, 3320 .fniv = tcg_gen_sub_vec, 3321 .fno = gen_helper_sve_subri_s, 3322 .opt_opc = vecop_list, 3323 .vece = MO_32, 3324 .scalar_first = true }, 3325 { .fni8 = tcg_gen_sub_i64, 3326 .fniv = tcg_gen_sub_vec, 3327 .fno = gen_helper_sve_subri_d, 3328 .opt_opc = vecop_list, 3329 .prefer_i64 = TCG_TARGET_REG_BITS == 64, 3330 .vece = MO_64, 3331 .scalar_first = true } 3332 }; 3333 3334 if (!dc_isar_feature(aa64_sve, s)) { 3335 return false; 3336 } 3337 if (sve_access_check(s)) { 3338 unsigned vsz = vec_full_reg_size(s); 3339 tcg_gen_gvec_2s(vec_full_reg_offset(s, a->rd), 3340 vec_full_reg_offset(s, a->rn), 3341 vsz, vsz, tcg_constant_i64(a->imm), &op[a->esz]); 3342 } 3343 return true; 3344 } 3345 3346 TRANS_FEAT(MUL_zzi, aa64_sve, gen_gvec_fn_arg_zzi, tcg_gen_gvec_muli, a) 3347 3348 static bool do_zzi_sat(DisasContext *s, arg_rri_esz *a, bool u, bool d) 3349 { 3350 if (sve_access_check(s)) { 3351 do_sat_addsub_vec(s, a->esz, a->rd, a->rn, 3352 tcg_constant_i64(a->imm), u, d); 3353 } 3354 return true; 3355 } 3356 3357 TRANS_FEAT(SQADD_zzi, aa64_sve, do_zzi_sat, a, false, false) 3358 TRANS_FEAT(UQADD_zzi, aa64_sve, do_zzi_sat, a, true, false) 3359 TRANS_FEAT(SQSUB_zzi, aa64_sve, do_zzi_sat, a, false, true) 3360 TRANS_FEAT(UQSUB_zzi, aa64_sve, do_zzi_sat, a, true, true) 3361 3362 static bool do_zzi_ool(DisasContext *s, arg_rri_esz *a, gen_helper_gvec_2i *fn) 3363 { 3364 if (sve_access_check(s)) { 3365 unsigned vsz = vec_full_reg_size(s); 3366 tcg_gen_gvec_2i_ool(vec_full_reg_offset(s, a->rd), 3367 vec_full_reg_offset(s, a->rn), 3368 tcg_constant_i64(a->imm), vsz, vsz, 0, fn); 3369 } 3370 return true; 3371 } 3372 3373 #define DO_ZZI(NAME, name) \ 3374 static gen_helper_gvec_2i * const name##i_fns[4] = { \ 3375 gen_helper_sve_##name##i_b, gen_helper_sve_##name##i_h, \ 3376 gen_helper_sve_##name##i_s, gen_helper_sve_##name##i_d, \ 3377 }; \ 3378 TRANS_FEAT(NAME##_zzi, aa64_sve, do_zzi_ool, a, name##i_fns[a->esz]) 3379 3380 DO_ZZI(SMAX, smax) 3381 DO_ZZI(UMAX, umax) 3382 DO_ZZI(SMIN, smin) 3383 DO_ZZI(UMIN, umin) 3384 3385 #undef DO_ZZI 3386 3387 static gen_helper_gvec_4 * const dot_fns[2][2] = { 3388 { gen_helper_gvec_sdot_b, gen_helper_gvec_sdot_h }, 3389 { gen_helper_gvec_udot_b, gen_helper_gvec_udot_h } 3390 }; 3391 TRANS_FEAT(DOT_zzzz, aa64_sve, gen_gvec_ool_zzzz, 3392 dot_fns[a->u][a->sz], a->rd, a->rn, a->rm, a->ra, 0) 3393 3394 /* 3395 * SVE Multiply - Indexed 3396 */ 3397 3398 TRANS_FEAT(SDOT_zzxw_s, aa64_sve, gen_gvec_ool_arg_zzxz, 3399 gen_helper_gvec_sdot_idx_b, a) 3400 TRANS_FEAT(SDOT_zzxw_d, aa64_sve, gen_gvec_ool_arg_zzxz, 3401 gen_helper_gvec_sdot_idx_h, a) 3402 TRANS_FEAT(UDOT_zzxw_s, aa64_sve, gen_gvec_ool_arg_zzxz, 3403 gen_helper_gvec_udot_idx_b, a) 3404 TRANS_FEAT(UDOT_zzxw_d, aa64_sve, gen_gvec_ool_arg_zzxz, 3405 gen_helper_gvec_udot_idx_h, a) 3406 3407 TRANS_FEAT(SUDOT_zzxw_s, aa64_sve_i8mm, gen_gvec_ool_arg_zzxz, 3408 gen_helper_gvec_sudot_idx_b, a) 3409 TRANS_FEAT(USDOT_zzxw_s, aa64_sve_i8mm, gen_gvec_ool_arg_zzxz, 3410 gen_helper_gvec_usdot_idx_b, a) 3411 3412 #define DO_SVE2_RRX(NAME, FUNC) \ 3413 TRANS_FEAT(NAME, aa64_sve, gen_gvec_ool_zzz, FUNC, \ 3414 a->rd, a->rn, a->rm, a->index) 3415 3416 DO_SVE2_RRX(MUL_zzx_h, gen_helper_gvec_mul_idx_h) 3417 DO_SVE2_RRX(MUL_zzx_s, gen_helper_gvec_mul_idx_s) 3418 DO_SVE2_RRX(MUL_zzx_d, gen_helper_gvec_mul_idx_d) 3419 3420 DO_SVE2_RRX(SQDMULH_zzx_h, gen_helper_sve2_sqdmulh_idx_h) 3421 DO_SVE2_RRX(SQDMULH_zzx_s, gen_helper_sve2_sqdmulh_idx_s) 3422 DO_SVE2_RRX(SQDMULH_zzx_d, gen_helper_sve2_sqdmulh_idx_d) 3423 3424 DO_SVE2_RRX(SQRDMULH_zzx_h, gen_helper_sve2_sqrdmulh_idx_h) 3425 DO_SVE2_RRX(SQRDMULH_zzx_s, gen_helper_sve2_sqrdmulh_idx_s) 3426 DO_SVE2_RRX(SQRDMULH_zzx_d, gen_helper_sve2_sqrdmulh_idx_d) 3427 3428 #undef DO_SVE2_RRX 3429 3430 #define DO_SVE2_RRX_TB(NAME, FUNC, TOP) \ 3431 TRANS_FEAT(NAME, aa64_sve, gen_gvec_ool_zzz, FUNC, \ 3432 a->rd, a->rn, a->rm, (a->index << 1) | TOP) 3433 3434 DO_SVE2_RRX_TB(SQDMULLB_zzx_s, gen_helper_sve2_sqdmull_idx_s, false) 3435 DO_SVE2_RRX_TB(SQDMULLB_zzx_d, gen_helper_sve2_sqdmull_idx_d, false) 3436 DO_SVE2_RRX_TB(SQDMULLT_zzx_s, gen_helper_sve2_sqdmull_idx_s, true) 3437 DO_SVE2_RRX_TB(SQDMULLT_zzx_d, gen_helper_sve2_sqdmull_idx_d, true) 3438 3439 DO_SVE2_RRX_TB(SMULLB_zzx_s, gen_helper_sve2_smull_idx_s, false) 3440 DO_SVE2_RRX_TB(SMULLB_zzx_d, gen_helper_sve2_smull_idx_d, false) 3441 DO_SVE2_RRX_TB(SMULLT_zzx_s, gen_helper_sve2_smull_idx_s, true) 3442 DO_SVE2_RRX_TB(SMULLT_zzx_d, gen_helper_sve2_smull_idx_d, true) 3443 3444 DO_SVE2_RRX_TB(UMULLB_zzx_s, gen_helper_sve2_umull_idx_s, false) 3445 DO_SVE2_RRX_TB(UMULLB_zzx_d, gen_helper_sve2_umull_idx_d, false) 3446 DO_SVE2_RRX_TB(UMULLT_zzx_s, gen_helper_sve2_umull_idx_s, true) 3447 DO_SVE2_RRX_TB(UMULLT_zzx_d, gen_helper_sve2_umull_idx_d, true) 3448 3449 #undef DO_SVE2_RRX_TB 3450 3451 #define DO_SVE2_RRXR(NAME, FUNC) \ 3452 TRANS_FEAT(NAME, aa64_sve2, gen_gvec_ool_arg_zzxz, FUNC, a) 3453 3454 DO_SVE2_RRXR(MLA_zzxz_h, gen_helper_gvec_mla_idx_h) 3455 DO_SVE2_RRXR(MLA_zzxz_s, gen_helper_gvec_mla_idx_s) 3456 DO_SVE2_RRXR(MLA_zzxz_d, gen_helper_gvec_mla_idx_d) 3457 3458 DO_SVE2_RRXR(MLS_zzxz_h, gen_helper_gvec_mls_idx_h) 3459 DO_SVE2_RRXR(MLS_zzxz_s, gen_helper_gvec_mls_idx_s) 3460 DO_SVE2_RRXR(MLS_zzxz_d, gen_helper_gvec_mls_idx_d) 3461 3462 DO_SVE2_RRXR(SQRDMLAH_zzxz_h, gen_helper_sve2_sqrdmlah_idx_h) 3463 DO_SVE2_RRXR(SQRDMLAH_zzxz_s, gen_helper_sve2_sqrdmlah_idx_s) 3464 DO_SVE2_RRXR(SQRDMLAH_zzxz_d, gen_helper_sve2_sqrdmlah_idx_d) 3465 3466 DO_SVE2_RRXR(SQRDMLSH_zzxz_h, gen_helper_sve2_sqrdmlsh_idx_h) 3467 DO_SVE2_RRXR(SQRDMLSH_zzxz_s, gen_helper_sve2_sqrdmlsh_idx_s) 3468 DO_SVE2_RRXR(SQRDMLSH_zzxz_d, gen_helper_sve2_sqrdmlsh_idx_d) 3469 3470 #undef DO_SVE2_RRXR 3471 3472 #define DO_SVE2_RRXR_TB(NAME, FUNC, TOP) \ 3473 TRANS_FEAT(NAME, aa64_sve2, gen_gvec_ool_zzzz, FUNC, \ 3474 a->rd, a->rn, a->rm, a->ra, (a->index << 1) | TOP) 3475 3476 DO_SVE2_RRXR_TB(SQDMLALB_zzxw_s, gen_helper_sve2_sqdmlal_idx_s, false) 3477 DO_SVE2_RRXR_TB(SQDMLALB_zzxw_d, gen_helper_sve2_sqdmlal_idx_d, false) 3478 DO_SVE2_RRXR_TB(SQDMLALT_zzxw_s, gen_helper_sve2_sqdmlal_idx_s, true) 3479 DO_SVE2_RRXR_TB(SQDMLALT_zzxw_d, gen_helper_sve2_sqdmlal_idx_d, true) 3480 3481 DO_SVE2_RRXR_TB(SQDMLSLB_zzxw_s, gen_helper_sve2_sqdmlsl_idx_s, false) 3482 DO_SVE2_RRXR_TB(SQDMLSLB_zzxw_d, gen_helper_sve2_sqdmlsl_idx_d, false) 3483 DO_SVE2_RRXR_TB(SQDMLSLT_zzxw_s, gen_helper_sve2_sqdmlsl_idx_s, true) 3484 DO_SVE2_RRXR_TB(SQDMLSLT_zzxw_d, gen_helper_sve2_sqdmlsl_idx_d, true) 3485 3486 DO_SVE2_RRXR_TB(SMLALB_zzxw_s, gen_helper_sve2_smlal_idx_s, false) 3487 DO_SVE2_RRXR_TB(SMLALB_zzxw_d, gen_helper_sve2_smlal_idx_d, false) 3488 DO_SVE2_RRXR_TB(SMLALT_zzxw_s, gen_helper_sve2_smlal_idx_s, true) 3489 DO_SVE2_RRXR_TB(SMLALT_zzxw_d, gen_helper_sve2_smlal_idx_d, true) 3490 3491 DO_SVE2_RRXR_TB(UMLALB_zzxw_s, gen_helper_sve2_umlal_idx_s, false) 3492 DO_SVE2_RRXR_TB(UMLALB_zzxw_d, gen_helper_sve2_umlal_idx_d, false) 3493 DO_SVE2_RRXR_TB(UMLALT_zzxw_s, gen_helper_sve2_umlal_idx_s, true) 3494 DO_SVE2_RRXR_TB(UMLALT_zzxw_d, gen_helper_sve2_umlal_idx_d, true) 3495 3496 DO_SVE2_RRXR_TB(SMLSLB_zzxw_s, gen_helper_sve2_smlsl_idx_s, false) 3497 DO_SVE2_RRXR_TB(SMLSLB_zzxw_d, gen_helper_sve2_smlsl_idx_d, false) 3498 DO_SVE2_RRXR_TB(SMLSLT_zzxw_s, gen_helper_sve2_smlsl_idx_s, true) 3499 DO_SVE2_RRXR_TB(SMLSLT_zzxw_d, gen_helper_sve2_smlsl_idx_d, true) 3500 3501 DO_SVE2_RRXR_TB(UMLSLB_zzxw_s, gen_helper_sve2_umlsl_idx_s, false) 3502 DO_SVE2_RRXR_TB(UMLSLB_zzxw_d, gen_helper_sve2_umlsl_idx_d, false) 3503 DO_SVE2_RRXR_TB(UMLSLT_zzxw_s, gen_helper_sve2_umlsl_idx_s, true) 3504 DO_SVE2_RRXR_TB(UMLSLT_zzxw_d, gen_helper_sve2_umlsl_idx_d, true) 3505 3506 #undef DO_SVE2_RRXR_TB 3507 3508 #define DO_SVE2_RRXR_ROT(NAME, FUNC) \ 3509 TRANS_FEAT(NAME, aa64_sve2, gen_gvec_ool_zzzz, FUNC, \ 3510 a->rd, a->rn, a->rm, a->ra, (a->index << 2) | a->rot) 3511 3512 DO_SVE2_RRXR_ROT(CMLA_zzxz_h, gen_helper_sve2_cmla_idx_h) 3513 DO_SVE2_RRXR_ROT(CMLA_zzxz_s, gen_helper_sve2_cmla_idx_s) 3514 3515 DO_SVE2_RRXR_ROT(SQRDCMLAH_zzxz_h, gen_helper_sve2_sqrdcmlah_idx_h) 3516 DO_SVE2_RRXR_ROT(SQRDCMLAH_zzxz_s, gen_helper_sve2_sqrdcmlah_idx_s) 3517 3518 DO_SVE2_RRXR_ROT(CDOT_zzxw_s, gen_helper_sve2_cdot_idx_s) 3519 DO_SVE2_RRXR_ROT(CDOT_zzxw_d, gen_helper_sve2_cdot_idx_d) 3520 3521 #undef DO_SVE2_RRXR_ROT 3522 3523 /* 3524 *** SVE Floating Point Multiply-Add Indexed Group 3525 */ 3526 3527 static gen_helper_gvec_4_ptr * const fmla_idx_fns[4] = { 3528 NULL, gen_helper_gvec_fmla_idx_h, 3529 gen_helper_gvec_fmla_idx_s, gen_helper_gvec_fmla_idx_d 3530 }; 3531 TRANS_FEAT(FMLA_zzxz, aa64_sve, gen_gvec_fpst_zzzz, 3532 fmla_idx_fns[a->esz], a->rd, a->rn, a->rm, a->ra, a->index, 3533 a->esz == MO_16 ? FPST_A64_F16 : FPST_A64) 3534 3535 static gen_helper_gvec_4_ptr * const fmls_idx_fns[4][2] = { 3536 { NULL, NULL }, 3537 { gen_helper_gvec_fmls_idx_h, gen_helper_gvec_ah_fmls_idx_h }, 3538 { gen_helper_gvec_fmls_idx_s, gen_helper_gvec_ah_fmls_idx_s }, 3539 { gen_helper_gvec_fmls_idx_d, gen_helper_gvec_ah_fmls_idx_d }, 3540 }; 3541 TRANS_FEAT(FMLS_zzxz, aa64_sve, gen_gvec_fpst_zzzz, 3542 fmls_idx_fns[a->esz][s->fpcr_ah], 3543 a->rd, a->rn, a->rm, a->ra, a->index, 3544 a->esz == MO_16 ? FPST_A64_F16 : FPST_A64) 3545 3546 /* 3547 *** SVE Floating Point Multiply Indexed Group 3548 */ 3549 3550 static gen_helper_gvec_3_ptr * const fmul_idx_fns[4] = { 3551 NULL, gen_helper_gvec_fmul_idx_h, 3552 gen_helper_gvec_fmul_idx_s, gen_helper_gvec_fmul_idx_d, 3553 }; 3554 TRANS_FEAT(FMUL_zzx, aa64_sve, gen_gvec_fpst_zzz, 3555 fmul_idx_fns[a->esz], a->rd, a->rn, a->rm, a->index, 3556 a->esz == MO_16 ? FPST_A64_F16 : FPST_A64) 3557 3558 /* 3559 *** SVE Floating Point Fast Reduction Group 3560 */ 3561 3562 typedef void gen_helper_fp_reduce(TCGv_i64, TCGv_ptr, TCGv_ptr, 3563 TCGv_ptr, TCGv_i32); 3564 3565 static bool do_reduce(DisasContext *s, arg_rpr_esz *a, 3566 gen_helper_fp_reduce *fn) 3567 { 3568 unsigned vsz, p2vsz; 3569 TCGv_i32 t_desc; 3570 TCGv_ptr t_zn, t_pg, status; 3571 TCGv_i64 temp; 3572 3573 if (fn == NULL) { 3574 return false; 3575 } 3576 if (!sve_access_check(s)) { 3577 return true; 3578 } 3579 3580 vsz = vec_full_reg_size(s); 3581 p2vsz = pow2ceil(vsz); 3582 t_desc = tcg_constant_i32(simd_desc(vsz, vsz, p2vsz)); 3583 temp = tcg_temp_new_i64(); 3584 t_zn = tcg_temp_new_ptr(); 3585 t_pg = tcg_temp_new_ptr(); 3586 3587 tcg_gen_addi_ptr(t_zn, tcg_env, vec_full_reg_offset(s, a->rn)); 3588 tcg_gen_addi_ptr(t_pg, tcg_env, pred_full_reg_offset(s, a->pg)); 3589 status = fpstatus_ptr(a->esz == MO_16 ? FPST_A64_F16 : FPST_A64); 3590 3591 fn(temp, t_zn, t_pg, status, t_desc); 3592 3593 write_fp_dreg(s, a->rd, temp); 3594 return true; 3595 } 3596 3597 #define DO_VPZ(NAME, name) \ 3598 static gen_helper_fp_reduce * const name##_fns[4] = { \ 3599 NULL, gen_helper_sve_##name##_h, \ 3600 gen_helper_sve_##name##_s, gen_helper_sve_##name##_d, \ 3601 }; \ 3602 TRANS_FEAT(NAME, aa64_sve, do_reduce, a, name##_fns[a->esz]) 3603 3604 #define DO_VPZ_AH(NAME, name) \ 3605 static gen_helper_fp_reduce * const name##_fns[4] = { \ 3606 NULL, gen_helper_sve_##name##_h, \ 3607 gen_helper_sve_##name##_s, gen_helper_sve_##name##_d, \ 3608 }; \ 3609 static gen_helper_fp_reduce * const name##_ah_fns[4] = { \ 3610 NULL, gen_helper_sve_ah_##name##_h, \ 3611 gen_helper_sve_ah_##name##_s, gen_helper_sve_ah_##name##_d, \ 3612 }; \ 3613 TRANS_FEAT(NAME, aa64_sve, do_reduce, a, \ 3614 s->fpcr_ah ? name##_ah_fns[a->esz] : name##_fns[a->esz]) 3615 3616 DO_VPZ(FADDV, faddv) 3617 DO_VPZ(FMINNMV, fminnmv) 3618 DO_VPZ(FMAXNMV, fmaxnmv) 3619 DO_VPZ_AH(FMINV, fminv) 3620 DO_VPZ_AH(FMAXV, fmaxv) 3621 3622 #undef DO_VPZ 3623 3624 /* 3625 *** SVE Floating Point Unary Operations - Unpredicated Group 3626 */ 3627 3628 static gen_helper_gvec_2_ptr * const frecpe_fns[] = { 3629 NULL, gen_helper_gvec_frecpe_h, 3630 gen_helper_gvec_frecpe_s, gen_helper_gvec_frecpe_d, 3631 }; 3632 TRANS_FEAT(FRECPE, aa64_sve, gen_gvec_fpst_ah_arg_zz, frecpe_fns[a->esz], a, 0) 3633 3634 static gen_helper_gvec_2_ptr * const frsqrte_fns[] = { 3635 NULL, gen_helper_gvec_frsqrte_h, 3636 gen_helper_gvec_frsqrte_s, gen_helper_gvec_frsqrte_d, 3637 }; 3638 TRANS_FEAT(FRSQRTE, aa64_sve, gen_gvec_fpst_ah_arg_zz, frsqrte_fns[a->esz], a, 0) 3639 3640 /* 3641 *** SVE Floating Point Compare with Zero Group 3642 */ 3643 3644 static bool do_ppz_fp(DisasContext *s, arg_rpr_esz *a, 3645 gen_helper_gvec_3_ptr *fn) 3646 { 3647 if (fn == NULL) { 3648 return false; 3649 } 3650 if (sve_access_check(s)) { 3651 unsigned vsz = vec_full_reg_size(s); 3652 TCGv_ptr status = 3653 fpstatus_ptr(a->esz == MO_16 ? FPST_A64_F16 : FPST_A64); 3654 3655 tcg_gen_gvec_3_ptr(pred_full_reg_offset(s, a->rd), 3656 vec_full_reg_offset(s, a->rn), 3657 pred_full_reg_offset(s, a->pg), 3658 status, vsz, vsz, 0, fn); 3659 } 3660 return true; 3661 } 3662 3663 #define DO_PPZ(NAME, name) \ 3664 static gen_helper_gvec_3_ptr * const name##_fns[] = { \ 3665 NULL, gen_helper_sve_##name##_h, \ 3666 gen_helper_sve_##name##_s, gen_helper_sve_##name##_d, \ 3667 }; \ 3668 TRANS_FEAT(NAME, aa64_sve, do_ppz_fp, a, name##_fns[a->esz]) 3669 3670 DO_PPZ(FCMGE_ppz0, fcmge0) 3671 DO_PPZ(FCMGT_ppz0, fcmgt0) 3672 DO_PPZ(FCMLE_ppz0, fcmle0) 3673 DO_PPZ(FCMLT_ppz0, fcmlt0) 3674 DO_PPZ(FCMEQ_ppz0, fcmeq0) 3675 DO_PPZ(FCMNE_ppz0, fcmne0) 3676 3677 #undef DO_PPZ 3678 3679 /* 3680 *** SVE floating-point trig multiply-add coefficient 3681 */ 3682 3683 static gen_helper_gvec_3_ptr * const ftmad_fns[4] = { 3684 NULL, gen_helper_sve_ftmad_h, 3685 gen_helper_sve_ftmad_s, gen_helper_sve_ftmad_d, 3686 }; 3687 TRANS_FEAT_NONSTREAMING(FTMAD, aa64_sve, gen_gvec_fpst_zzz, 3688 ftmad_fns[a->esz], a->rd, a->rn, a->rm, 3689 a->imm | (s->fpcr_ah << 3), 3690 a->esz == MO_16 ? FPST_A64_F16 : FPST_A64) 3691 3692 /* 3693 *** SVE Floating Point Accumulating Reduction Group 3694 */ 3695 3696 static bool trans_FADDA(DisasContext *s, arg_rprr_esz *a) 3697 { 3698 typedef void fadda_fn(TCGv_i64, TCGv_i64, TCGv_ptr, 3699 TCGv_ptr, TCGv_ptr, TCGv_i32); 3700 static fadda_fn * const fns[3] = { 3701 gen_helper_sve_fadda_h, 3702 gen_helper_sve_fadda_s, 3703 gen_helper_sve_fadda_d, 3704 }; 3705 unsigned vsz = vec_full_reg_size(s); 3706 TCGv_ptr t_rm, t_pg, t_fpst; 3707 TCGv_i64 t_val; 3708 TCGv_i32 t_desc; 3709 3710 if (a->esz == 0 || !dc_isar_feature(aa64_sve, s)) { 3711 return false; 3712 } 3713 s->is_nonstreaming = true; 3714 if (!sve_access_check(s)) { 3715 return true; 3716 } 3717 3718 t_val = load_esz(tcg_env, vec_reg_offset(s, a->rn, 0, a->esz), a->esz); 3719 t_rm = tcg_temp_new_ptr(); 3720 t_pg = tcg_temp_new_ptr(); 3721 tcg_gen_addi_ptr(t_rm, tcg_env, vec_full_reg_offset(s, a->rm)); 3722 tcg_gen_addi_ptr(t_pg, tcg_env, pred_full_reg_offset(s, a->pg)); 3723 t_fpst = fpstatus_ptr(a->esz == MO_16 ? FPST_A64_F16 : FPST_A64); 3724 t_desc = tcg_constant_i32(simd_desc(vsz, vsz, 0)); 3725 3726 fns[a->esz - 1](t_val, t_val, t_rm, t_pg, t_fpst, t_desc); 3727 3728 write_fp_dreg(s, a->rd, t_val); 3729 return true; 3730 } 3731 3732 /* 3733 *** SVE Floating Point Arithmetic - Unpredicated Group 3734 */ 3735 3736 #define DO_FP3(NAME, name) \ 3737 static gen_helper_gvec_3_ptr * const name##_fns[4] = { \ 3738 NULL, gen_helper_gvec_##name##_h, \ 3739 gen_helper_gvec_##name##_s, gen_helper_gvec_##name##_d \ 3740 }; \ 3741 TRANS_FEAT(NAME, aa64_sve, gen_gvec_fpst_arg_zzz, name##_fns[a->esz], a, 0) 3742 3743 #define DO_FP3_AH(NAME, name) \ 3744 static gen_helper_gvec_3_ptr * const name##_fns[4] = { \ 3745 NULL, gen_helper_gvec_##name##_h, \ 3746 gen_helper_gvec_##name##_s, gen_helper_gvec_##name##_d \ 3747 }; \ 3748 static gen_helper_gvec_3_ptr * const name##_ah_fns[4] = { \ 3749 NULL, gen_helper_gvec_ah_##name##_h, \ 3750 gen_helper_gvec_ah_##name##_s, gen_helper_gvec_ah_##name##_d \ 3751 }; \ 3752 TRANS_FEAT(NAME, aa64_sve, gen_gvec_fpst_ah_arg_zzz, \ 3753 s->fpcr_ah ? name##_ah_fns[a->esz] : name##_fns[a->esz], a, 0) 3754 3755 DO_FP3(FADD_zzz, fadd) 3756 DO_FP3(FSUB_zzz, fsub) 3757 DO_FP3(FMUL_zzz, fmul) 3758 DO_FP3_AH(FRECPS, recps) 3759 DO_FP3_AH(FRSQRTS, rsqrts) 3760 3761 #undef DO_FP3 3762 3763 static gen_helper_gvec_3_ptr * const ftsmul_fns[4] = { 3764 NULL, gen_helper_gvec_ftsmul_h, 3765 gen_helper_gvec_ftsmul_s, gen_helper_gvec_ftsmul_d 3766 }; 3767 TRANS_FEAT_NONSTREAMING(FTSMUL, aa64_sve, gen_gvec_fpst_arg_zzz, 3768 ftsmul_fns[a->esz], a, 0) 3769 3770 /* 3771 *** SVE Floating Point Arithmetic - Predicated Group 3772 */ 3773 3774 #define DO_ZPZZ_FP(NAME, FEAT, name) \ 3775 static gen_helper_gvec_4_ptr * const name##_zpzz_fns[4] = { \ 3776 NULL, gen_helper_##name##_h, \ 3777 gen_helper_##name##_s, gen_helper_##name##_d \ 3778 }; \ 3779 TRANS_FEAT(NAME, FEAT, gen_gvec_fpst_arg_zpzz, name##_zpzz_fns[a->esz], a) 3780 3781 #define DO_ZPZZ_AH_FP(NAME, FEAT, name, ah_name) \ 3782 static gen_helper_gvec_4_ptr * const name##_zpzz_fns[4] = { \ 3783 NULL, gen_helper_##name##_h, \ 3784 gen_helper_##name##_s, gen_helper_##name##_d \ 3785 }; \ 3786 static gen_helper_gvec_4_ptr * const name##_ah_zpzz_fns[4] = { \ 3787 NULL, gen_helper_##ah_name##_h, \ 3788 gen_helper_##ah_name##_s, gen_helper_##ah_name##_d \ 3789 }; \ 3790 TRANS_FEAT(NAME, FEAT, gen_gvec_fpst_arg_zpzz, \ 3791 s->fpcr_ah ? name##_ah_zpzz_fns[a->esz] : \ 3792 name##_zpzz_fns[a->esz], a) 3793 3794 DO_ZPZZ_FP(FADD_zpzz, aa64_sve, sve_fadd) 3795 DO_ZPZZ_FP(FSUB_zpzz, aa64_sve, sve_fsub) 3796 DO_ZPZZ_FP(FMUL_zpzz, aa64_sve, sve_fmul) 3797 DO_ZPZZ_AH_FP(FMIN_zpzz, aa64_sve, sve_fmin, sve_ah_fmin) 3798 DO_ZPZZ_AH_FP(FMAX_zpzz, aa64_sve, sve_fmax, sve_ah_fmax) 3799 DO_ZPZZ_FP(FMINNM_zpzz, aa64_sve, sve_fminnum) 3800 DO_ZPZZ_FP(FMAXNM_zpzz, aa64_sve, sve_fmaxnum) 3801 DO_ZPZZ_AH_FP(FABD, aa64_sve, sve_fabd, sve_ah_fabd) 3802 DO_ZPZZ_FP(FSCALE, aa64_sve, sve_fscalbn) 3803 DO_ZPZZ_FP(FDIV, aa64_sve, sve_fdiv) 3804 DO_ZPZZ_FP(FMULX, aa64_sve, sve_fmulx) 3805 3806 typedef void gen_helper_sve_fp2scalar(TCGv_ptr, TCGv_ptr, TCGv_ptr, 3807 TCGv_i64, TCGv_ptr, TCGv_i32); 3808 3809 static void do_fp_scalar(DisasContext *s, int zd, int zn, int pg, bool is_fp16, 3810 TCGv_i64 scalar, gen_helper_sve_fp2scalar *fn) 3811 { 3812 unsigned vsz = vec_full_reg_size(s); 3813 TCGv_ptr t_zd, t_zn, t_pg, status; 3814 TCGv_i32 desc; 3815 3816 t_zd = tcg_temp_new_ptr(); 3817 t_zn = tcg_temp_new_ptr(); 3818 t_pg = tcg_temp_new_ptr(); 3819 tcg_gen_addi_ptr(t_zd, tcg_env, vec_full_reg_offset(s, zd)); 3820 tcg_gen_addi_ptr(t_zn, tcg_env, vec_full_reg_offset(s, zn)); 3821 tcg_gen_addi_ptr(t_pg, tcg_env, pred_full_reg_offset(s, pg)); 3822 3823 status = fpstatus_ptr(is_fp16 ? FPST_A64_F16 : FPST_A64); 3824 desc = tcg_constant_i32(simd_desc(vsz, vsz, 0)); 3825 fn(t_zd, t_zn, t_pg, scalar, status, desc); 3826 } 3827 3828 static bool do_fp_imm(DisasContext *s, arg_rpri_esz *a, uint64_t imm, 3829 gen_helper_sve_fp2scalar *fn) 3830 { 3831 if (fn == NULL) { 3832 return false; 3833 } 3834 if (sve_access_check(s)) { 3835 do_fp_scalar(s, a->rd, a->rn, a->pg, a->esz == MO_16, 3836 tcg_constant_i64(imm), fn); 3837 } 3838 return true; 3839 } 3840 3841 #define DO_FP_IMM(NAME, name, const0, const1) \ 3842 static gen_helper_sve_fp2scalar * const name##_fns[4] = { \ 3843 NULL, gen_helper_sve_##name##_h, \ 3844 gen_helper_sve_##name##_s, \ 3845 gen_helper_sve_##name##_d \ 3846 }; \ 3847 static uint64_t const name##_const[4][2] = { \ 3848 { -1, -1 }, \ 3849 { float16_##const0, float16_##const1 }, \ 3850 { float32_##const0, float32_##const1 }, \ 3851 { float64_##const0, float64_##const1 }, \ 3852 }; \ 3853 TRANS_FEAT(NAME##_zpzi, aa64_sve, do_fp_imm, a, \ 3854 name##_const[a->esz][a->imm], name##_fns[a->esz]) 3855 3856 #define DO_FP_AH_IMM(NAME, name, const0, const1) \ 3857 static gen_helper_sve_fp2scalar * const name##_fns[4] = { \ 3858 NULL, gen_helper_sve_##name##_h, \ 3859 gen_helper_sve_##name##_s, \ 3860 gen_helper_sve_##name##_d \ 3861 }; \ 3862 static gen_helper_sve_fp2scalar * const name##_ah_fns[4] = { \ 3863 NULL, gen_helper_sve_ah_##name##_h, \ 3864 gen_helper_sve_ah_##name##_s, \ 3865 gen_helper_sve_ah_##name##_d \ 3866 }; \ 3867 static uint64_t const name##_const[4][2] = { \ 3868 { -1, -1 }, \ 3869 { float16_##const0, float16_##const1 }, \ 3870 { float32_##const0, float32_##const1 }, \ 3871 { float64_##const0, float64_##const1 }, \ 3872 }; \ 3873 TRANS_FEAT(NAME##_zpzi, aa64_sve, do_fp_imm, a, \ 3874 name##_const[a->esz][a->imm], \ 3875 s->fpcr_ah ? name##_ah_fns[a->esz] : name##_fns[a->esz]) 3876 3877 DO_FP_IMM(FADD, fadds, half, one) 3878 DO_FP_IMM(FSUB, fsubs, half, one) 3879 DO_FP_IMM(FMUL, fmuls, half, two) 3880 DO_FP_IMM(FSUBR, fsubrs, half, one) 3881 DO_FP_IMM(FMAXNM, fmaxnms, zero, one) 3882 DO_FP_IMM(FMINNM, fminnms, zero, one) 3883 DO_FP_AH_IMM(FMAX, fmaxs, zero, one) 3884 DO_FP_AH_IMM(FMIN, fmins, zero, one) 3885 3886 #undef DO_FP_IMM 3887 3888 static bool do_fp_cmp(DisasContext *s, arg_rprr_esz *a, 3889 gen_helper_gvec_4_ptr *fn) 3890 { 3891 if (fn == NULL) { 3892 return false; 3893 } 3894 if (sve_access_check(s)) { 3895 unsigned vsz = vec_full_reg_size(s); 3896 TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_A64_F16 : FPST_A64); 3897 tcg_gen_gvec_4_ptr(pred_full_reg_offset(s, a->rd), 3898 vec_full_reg_offset(s, a->rn), 3899 vec_full_reg_offset(s, a->rm), 3900 pred_full_reg_offset(s, a->pg), 3901 status, vsz, vsz, 0, fn); 3902 } 3903 return true; 3904 } 3905 3906 #define DO_FPCMP(NAME, name) \ 3907 static gen_helper_gvec_4_ptr * const name##_fns[4] = { \ 3908 NULL, gen_helper_sve_##name##_h, \ 3909 gen_helper_sve_##name##_s, gen_helper_sve_##name##_d \ 3910 }; \ 3911 TRANS_FEAT(NAME##_ppzz, aa64_sve, do_fp_cmp, a, name##_fns[a->esz]) 3912 3913 DO_FPCMP(FCMGE, fcmge) 3914 DO_FPCMP(FCMGT, fcmgt) 3915 DO_FPCMP(FCMEQ, fcmeq) 3916 DO_FPCMP(FCMNE, fcmne) 3917 DO_FPCMP(FCMUO, fcmuo) 3918 DO_FPCMP(FACGE, facge) 3919 DO_FPCMP(FACGT, facgt) 3920 3921 #undef DO_FPCMP 3922 3923 static gen_helper_gvec_4_ptr * const fcadd_fns[] = { 3924 NULL, gen_helper_sve_fcadd_h, 3925 gen_helper_sve_fcadd_s, gen_helper_sve_fcadd_d, 3926 }; 3927 TRANS_FEAT(FCADD, aa64_sve, gen_gvec_fpst_zzzp, fcadd_fns[a->esz], 3928 a->rd, a->rn, a->rm, a->pg, a->rot | (s->fpcr_ah << 1), 3929 a->esz == MO_16 ? FPST_A64_F16 : FPST_A64) 3930 3931 #define DO_FMLA(NAME, name, ah_name) \ 3932 static gen_helper_gvec_5_ptr * const name##_fns[4] = { \ 3933 NULL, gen_helper_sve_##name##_h, \ 3934 gen_helper_sve_##name##_s, gen_helper_sve_##name##_d \ 3935 }; \ 3936 static gen_helper_gvec_5_ptr * const name##_ah_fns[4] = { \ 3937 NULL, gen_helper_sve_##ah_name##_h, \ 3938 gen_helper_sve_##ah_name##_s, gen_helper_sve_##ah_name##_d \ 3939 }; \ 3940 TRANS_FEAT(NAME, aa64_sve, gen_gvec_fpst_zzzzp, \ 3941 s->fpcr_ah ? name##_ah_fns[a->esz] : name##_fns[a->esz], \ 3942 a->rd, a->rn, a->rm, a->ra, a->pg, 0, \ 3943 a->esz == MO_16 ? FPST_A64_F16 : FPST_A64) 3944 3945 /* We don't need an ah_fmla_zpzzz because fmla doesn't negate anything */ 3946 DO_FMLA(FMLA_zpzzz, fmla_zpzzz, fmla_zpzzz) 3947 DO_FMLA(FMLS_zpzzz, fmls_zpzzz, ah_fmls_zpzzz) 3948 DO_FMLA(FNMLA_zpzzz, fnmla_zpzzz, ah_fnmla_zpzzz) 3949 DO_FMLA(FNMLS_zpzzz, fnmls_zpzzz, ah_fnmls_zpzzz) 3950 3951 #undef DO_FMLA 3952 3953 static gen_helper_gvec_5_ptr * const fcmla_fns[4] = { 3954 NULL, gen_helper_sve_fcmla_zpzzz_h, 3955 gen_helper_sve_fcmla_zpzzz_s, gen_helper_sve_fcmla_zpzzz_d, 3956 }; 3957 TRANS_FEAT(FCMLA_zpzzz, aa64_sve, gen_gvec_fpst_zzzzp, fcmla_fns[a->esz], 3958 a->rd, a->rn, a->rm, a->ra, a->pg, a->rot | (s->fpcr_ah << 2), 3959 a->esz == MO_16 ? FPST_A64_F16 : FPST_A64) 3960 3961 static gen_helper_gvec_4_ptr * const fcmla_idx_fns[4] = { 3962 NULL, gen_helper_gvec_fcmlah_idx, gen_helper_gvec_fcmlas_idx, NULL 3963 }; 3964 TRANS_FEAT(FCMLA_zzxz, aa64_sve, gen_gvec_fpst_zzzz, fcmla_idx_fns[a->esz], 3965 a->rd, a->rn, a->rm, a->ra, a->index * 4 + a->rot, 3966 a->esz == MO_16 ? FPST_A64_F16 : FPST_A64) 3967 3968 /* 3969 *** SVE Floating Point Unary Operations Predicated Group 3970 */ 3971 3972 TRANS_FEAT(FCVT_sh, aa64_sve, gen_gvec_fpst_arg_zpz, 3973 gen_helper_sve_fcvt_sh, a, 0, FPST_A64) 3974 TRANS_FEAT(FCVT_hs, aa64_sve, gen_gvec_fpst_arg_zpz, 3975 gen_helper_sve_fcvt_hs, a, 0, FPST_A64_F16) 3976 3977 TRANS_FEAT(BFCVT, aa64_sve_bf16, gen_gvec_fpst_arg_zpz, 3978 gen_helper_sve_bfcvt, a, 0, 3979 s->fpcr_ah ? FPST_AH : FPST_A64) 3980 3981 TRANS_FEAT(FCVT_dh, aa64_sve, gen_gvec_fpst_arg_zpz, 3982 gen_helper_sve_fcvt_dh, a, 0, FPST_A64) 3983 TRANS_FEAT(FCVT_hd, aa64_sve, gen_gvec_fpst_arg_zpz, 3984 gen_helper_sve_fcvt_hd, a, 0, FPST_A64_F16) 3985 TRANS_FEAT(FCVT_ds, aa64_sve, gen_gvec_fpst_arg_zpz, 3986 gen_helper_sve_fcvt_ds, a, 0, FPST_A64) 3987 TRANS_FEAT(FCVT_sd, aa64_sve, gen_gvec_fpst_arg_zpz, 3988 gen_helper_sve_fcvt_sd, a, 0, FPST_A64) 3989 3990 TRANS_FEAT(FCVTZS_hh, aa64_sve, gen_gvec_fpst_arg_zpz, 3991 gen_helper_sve_fcvtzs_hh, a, 0, FPST_A64_F16) 3992 TRANS_FEAT(FCVTZU_hh, aa64_sve, gen_gvec_fpst_arg_zpz, 3993 gen_helper_sve_fcvtzu_hh, a, 0, FPST_A64_F16) 3994 TRANS_FEAT(FCVTZS_hs, aa64_sve, gen_gvec_fpst_arg_zpz, 3995 gen_helper_sve_fcvtzs_hs, a, 0, FPST_A64_F16) 3996 TRANS_FEAT(FCVTZU_hs, aa64_sve, gen_gvec_fpst_arg_zpz, 3997 gen_helper_sve_fcvtzu_hs, a, 0, FPST_A64_F16) 3998 TRANS_FEAT(FCVTZS_hd, aa64_sve, gen_gvec_fpst_arg_zpz, 3999 gen_helper_sve_fcvtzs_hd, a, 0, FPST_A64_F16) 4000 TRANS_FEAT(FCVTZU_hd, aa64_sve, gen_gvec_fpst_arg_zpz, 4001 gen_helper_sve_fcvtzu_hd, a, 0, FPST_A64_F16) 4002 4003 TRANS_FEAT(FCVTZS_ss, aa64_sve, gen_gvec_fpst_arg_zpz, 4004 gen_helper_sve_fcvtzs_ss, a, 0, FPST_A64) 4005 TRANS_FEAT(FCVTZU_ss, aa64_sve, gen_gvec_fpst_arg_zpz, 4006 gen_helper_sve_fcvtzu_ss, a, 0, FPST_A64) 4007 TRANS_FEAT(FCVTZS_sd, aa64_sve, gen_gvec_fpst_arg_zpz, 4008 gen_helper_sve_fcvtzs_sd, a, 0, FPST_A64) 4009 TRANS_FEAT(FCVTZU_sd, aa64_sve, gen_gvec_fpst_arg_zpz, 4010 gen_helper_sve_fcvtzu_sd, a, 0, FPST_A64) 4011 TRANS_FEAT(FCVTZS_ds, aa64_sve, gen_gvec_fpst_arg_zpz, 4012 gen_helper_sve_fcvtzs_ds, a, 0, FPST_A64) 4013 TRANS_FEAT(FCVTZU_ds, aa64_sve, gen_gvec_fpst_arg_zpz, 4014 gen_helper_sve_fcvtzu_ds, a, 0, FPST_A64) 4015 4016 TRANS_FEAT(FCVTZS_dd, aa64_sve, gen_gvec_fpst_arg_zpz, 4017 gen_helper_sve_fcvtzs_dd, a, 0, FPST_A64) 4018 TRANS_FEAT(FCVTZU_dd, aa64_sve, gen_gvec_fpst_arg_zpz, 4019 gen_helper_sve_fcvtzu_dd, a, 0, FPST_A64) 4020 4021 static gen_helper_gvec_3_ptr * const frint_fns[] = { 4022 NULL, 4023 gen_helper_sve_frint_h, 4024 gen_helper_sve_frint_s, 4025 gen_helper_sve_frint_d 4026 }; 4027 TRANS_FEAT(FRINTI, aa64_sve, gen_gvec_fpst_arg_zpz, frint_fns[a->esz], 4028 a, 0, a->esz == MO_16 ? FPST_A64_F16 : FPST_A64) 4029 4030 static gen_helper_gvec_3_ptr * const frintx_fns[] = { 4031 NULL, 4032 gen_helper_sve_frintx_h, 4033 gen_helper_sve_frintx_s, 4034 gen_helper_sve_frintx_d 4035 }; 4036 TRANS_FEAT(FRINTX, aa64_sve, gen_gvec_fpst_arg_zpz, frintx_fns[a->esz], 4037 a, 0, a->esz == MO_16 ? FPST_A64_F16 : FPST_A64); 4038 4039 static bool do_frint_mode(DisasContext *s, arg_rpr_esz *a, 4040 ARMFPRounding mode, gen_helper_gvec_3_ptr *fn) 4041 { 4042 unsigned vsz; 4043 TCGv_i32 tmode; 4044 TCGv_ptr status; 4045 4046 if (fn == NULL) { 4047 return false; 4048 } 4049 if (!sve_access_check(s)) { 4050 return true; 4051 } 4052 4053 vsz = vec_full_reg_size(s); 4054 status = fpstatus_ptr(a->esz == MO_16 ? FPST_A64_F16 : FPST_A64); 4055 tmode = gen_set_rmode(mode, status); 4056 4057 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, a->rd), 4058 vec_full_reg_offset(s, a->rn), 4059 pred_full_reg_offset(s, a->pg), 4060 status, vsz, vsz, 0, fn); 4061 4062 gen_restore_rmode(tmode, status); 4063 return true; 4064 } 4065 4066 TRANS_FEAT(FRINTN, aa64_sve, do_frint_mode, a, 4067 FPROUNDING_TIEEVEN, frint_fns[a->esz]) 4068 TRANS_FEAT(FRINTP, aa64_sve, do_frint_mode, a, 4069 FPROUNDING_POSINF, frint_fns[a->esz]) 4070 TRANS_FEAT(FRINTM, aa64_sve, do_frint_mode, a, 4071 FPROUNDING_NEGINF, frint_fns[a->esz]) 4072 TRANS_FEAT(FRINTZ, aa64_sve, do_frint_mode, a, 4073 FPROUNDING_ZERO, frint_fns[a->esz]) 4074 TRANS_FEAT(FRINTA, aa64_sve, do_frint_mode, a, 4075 FPROUNDING_TIEAWAY, frint_fns[a->esz]) 4076 4077 static gen_helper_gvec_3_ptr * const frecpx_fns[] = { 4078 NULL, gen_helper_sve_frecpx_h, 4079 gen_helper_sve_frecpx_s, gen_helper_sve_frecpx_d, 4080 }; 4081 TRANS_FEAT(FRECPX, aa64_sve, gen_gvec_fpst_arg_zpz, frecpx_fns[a->esz], 4082 a, 0, select_ah_fpst(s, a->esz)) 4083 4084 static gen_helper_gvec_3_ptr * const fsqrt_fns[] = { 4085 NULL, gen_helper_sve_fsqrt_h, 4086 gen_helper_sve_fsqrt_s, gen_helper_sve_fsqrt_d, 4087 }; 4088 TRANS_FEAT(FSQRT, aa64_sve, gen_gvec_fpst_arg_zpz, fsqrt_fns[a->esz], 4089 a, 0, a->esz == MO_16 ? FPST_A64_F16 : FPST_A64) 4090 4091 TRANS_FEAT(SCVTF_hh, aa64_sve, gen_gvec_fpst_arg_zpz, 4092 gen_helper_sve_scvt_hh, a, 0, FPST_A64_F16) 4093 TRANS_FEAT(SCVTF_sh, aa64_sve, gen_gvec_fpst_arg_zpz, 4094 gen_helper_sve_scvt_sh, a, 0, FPST_A64_F16) 4095 TRANS_FEAT(SCVTF_dh, aa64_sve, gen_gvec_fpst_arg_zpz, 4096 gen_helper_sve_scvt_dh, a, 0, FPST_A64_F16) 4097 4098 TRANS_FEAT(SCVTF_ss, aa64_sve, gen_gvec_fpst_arg_zpz, 4099 gen_helper_sve_scvt_ss, a, 0, FPST_A64) 4100 TRANS_FEAT(SCVTF_ds, aa64_sve, gen_gvec_fpst_arg_zpz, 4101 gen_helper_sve_scvt_ds, a, 0, FPST_A64) 4102 4103 TRANS_FEAT(SCVTF_sd, aa64_sve, gen_gvec_fpst_arg_zpz, 4104 gen_helper_sve_scvt_sd, a, 0, FPST_A64) 4105 TRANS_FEAT(SCVTF_dd, aa64_sve, gen_gvec_fpst_arg_zpz, 4106 gen_helper_sve_scvt_dd, a, 0, FPST_A64) 4107 4108 TRANS_FEAT(UCVTF_hh, aa64_sve, gen_gvec_fpst_arg_zpz, 4109 gen_helper_sve_ucvt_hh, a, 0, FPST_A64_F16) 4110 TRANS_FEAT(UCVTF_sh, aa64_sve, gen_gvec_fpst_arg_zpz, 4111 gen_helper_sve_ucvt_sh, a, 0, FPST_A64_F16) 4112 TRANS_FEAT(UCVTF_dh, aa64_sve, gen_gvec_fpst_arg_zpz, 4113 gen_helper_sve_ucvt_dh, a, 0, FPST_A64_F16) 4114 4115 TRANS_FEAT(UCVTF_ss, aa64_sve, gen_gvec_fpst_arg_zpz, 4116 gen_helper_sve_ucvt_ss, a, 0, FPST_A64) 4117 TRANS_FEAT(UCVTF_ds, aa64_sve, gen_gvec_fpst_arg_zpz, 4118 gen_helper_sve_ucvt_ds, a, 0, FPST_A64) 4119 TRANS_FEAT(UCVTF_sd, aa64_sve, gen_gvec_fpst_arg_zpz, 4120 gen_helper_sve_ucvt_sd, a, 0, FPST_A64) 4121 4122 TRANS_FEAT(UCVTF_dd, aa64_sve, gen_gvec_fpst_arg_zpz, 4123 gen_helper_sve_ucvt_dd, a, 0, FPST_A64) 4124 4125 /* 4126 *** SVE Memory - 32-bit Gather and Unsized Contiguous Group 4127 */ 4128 4129 /* Subroutine loading a vector register at VOFS of LEN bytes. 4130 * The load should begin at the address Rn + IMM. 4131 */ 4132 4133 void gen_sve_ldr(DisasContext *s, TCGv_ptr base, int vofs, 4134 int len, int rn, int imm) 4135 { 4136 int len_align = QEMU_ALIGN_DOWN(len, 16); 4137 int len_remain = len % 16; 4138 int nparts = len / 16 + ctpop8(len_remain); 4139 int midx = get_mem_index(s); 4140 TCGv_i64 dirty_addr, clean_addr, t0, t1; 4141 TCGv_i128 t16; 4142 4143 dirty_addr = tcg_temp_new_i64(); 4144 tcg_gen_addi_i64(dirty_addr, cpu_reg_sp(s, rn), imm); 4145 clean_addr = gen_mte_checkN(s, dirty_addr, false, rn != 31, len, MO_8); 4146 4147 /* 4148 * Note that unpredicated load/store of vector/predicate registers 4149 * are defined as a stream of bytes, which equates to little-endian 4150 * operations on larger quantities. 4151 * Attempt to keep code expansion to a minimum by limiting the 4152 * amount of unrolling done. 4153 */ 4154 if (nparts <= 4) { 4155 int i; 4156 4157 t0 = tcg_temp_new_i64(); 4158 t1 = tcg_temp_new_i64(); 4159 t16 = tcg_temp_new_i128(); 4160 4161 for (i = 0; i < len_align; i += 16) { 4162 tcg_gen_qemu_ld_i128(t16, clean_addr, midx, 4163 MO_LE | MO_128 | MO_ATOM_NONE); 4164 tcg_gen_extr_i128_i64(t0, t1, t16); 4165 tcg_gen_st_i64(t0, base, vofs + i); 4166 tcg_gen_st_i64(t1, base, vofs + i + 8); 4167 tcg_gen_addi_i64(clean_addr, clean_addr, 16); 4168 } 4169 } else { 4170 TCGLabel *loop = gen_new_label(); 4171 TCGv_ptr tp, i = tcg_temp_new_ptr(); 4172 4173 tcg_gen_movi_ptr(i, 0); 4174 gen_set_label(loop); 4175 4176 t16 = tcg_temp_new_i128(); 4177 tcg_gen_qemu_ld_i128(t16, clean_addr, midx, 4178 MO_LE | MO_128 | MO_ATOM_NONE); 4179 tcg_gen_addi_i64(clean_addr, clean_addr, 16); 4180 4181 tp = tcg_temp_new_ptr(); 4182 tcg_gen_add_ptr(tp, base, i); 4183 tcg_gen_addi_ptr(i, i, 16); 4184 4185 t0 = tcg_temp_new_i64(); 4186 t1 = tcg_temp_new_i64(); 4187 tcg_gen_extr_i128_i64(t0, t1, t16); 4188 4189 tcg_gen_st_i64(t0, tp, vofs); 4190 tcg_gen_st_i64(t1, tp, vofs + 8); 4191 4192 tcg_gen_brcondi_ptr(TCG_COND_LTU, i, len_align, loop); 4193 } 4194 4195 /* 4196 * Predicate register loads can be any multiple of 2. 4197 * Note that we still store the entire 64-bit unit into tcg_env. 4198 */ 4199 if (len_remain >= 8) { 4200 t0 = tcg_temp_new_i64(); 4201 tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEUQ | MO_ATOM_NONE); 4202 tcg_gen_st_i64(t0, base, vofs + len_align); 4203 len_remain -= 8; 4204 len_align += 8; 4205 if (len_remain) { 4206 tcg_gen_addi_i64(clean_addr, clean_addr, 8); 4207 } 4208 } 4209 if (len_remain) { 4210 t0 = tcg_temp_new_i64(); 4211 switch (len_remain) { 4212 case 2: 4213 case 4: 4214 case 8: 4215 tcg_gen_qemu_ld_i64(t0, clean_addr, midx, 4216 MO_LE | ctz32(len_remain) | MO_ATOM_NONE); 4217 break; 4218 4219 case 6: 4220 t1 = tcg_temp_new_i64(); 4221 tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEUL | MO_ATOM_NONE); 4222 tcg_gen_addi_i64(clean_addr, clean_addr, 4); 4223 tcg_gen_qemu_ld_i64(t1, clean_addr, midx, MO_LEUW | MO_ATOM_NONE); 4224 tcg_gen_deposit_i64(t0, t0, t1, 32, 32); 4225 break; 4226 4227 default: 4228 g_assert_not_reached(); 4229 } 4230 tcg_gen_st_i64(t0, base, vofs + len_align); 4231 } 4232 } 4233 4234 /* Similarly for stores. */ 4235 void gen_sve_str(DisasContext *s, TCGv_ptr base, int vofs, 4236 int len, int rn, int imm) 4237 { 4238 int len_align = QEMU_ALIGN_DOWN(len, 16); 4239 int len_remain = len % 16; 4240 int nparts = len / 16 + ctpop8(len_remain); 4241 int midx = get_mem_index(s); 4242 TCGv_i64 dirty_addr, clean_addr, t0, t1; 4243 TCGv_i128 t16; 4244 4245 dirty_addr = tcg_temp_new_i64(); 4246 tcg_gen_addi_i64(dirty_addr, cpu_reg_sp(s, rn), imm); 4247 clean_addr = gen_mte_checkN(s, dirty_addr, false, rn != 31, len, MO_8); 4248 4249 /* Note that unpredicated load/store of vector/predicate registers 4250 * are defined as a stream of bytes, which equates to little-endian 4251 * operations on larger quantities. There is no nice way to force 4252 * a little-endian store for aarch64_be-linux-user out of line. 4253 * 4254 * Attempt to keep code expansion to a minimum by limiting the 4255 * amount of unrolling done. 4256 */ 4257 if (nparts <= 4) { 4258 int i; 4259 4260 t0 = tcg_temp_new_i64(); 4261 t1 = tcg_temp_new_i64(); 4262 t16 = tcg_temp_new_i128(); 4263 for (i = 0; i < len_align; i += 16) { 4264 tcg_gen_ld_i64(t0, base, vofs + i); 4265 tcg_gen_ld_i64(t1, base, vofs + i + 8); 4266 tcg_gen_concat_i64_i128(t16, t0, t1); 4267 tcg_gen_qemu_st_i128(t16, clean_addr, midx, 4268 MO_LE | MO_128 | MO_ATOM_NONE); 4269 tcg_gen_addi_i64(clean_addr, clean_addr, 16); 4270 } 4271 } else { 4272 TCGLabel *loop = gen_new_label(); 4273 TCGv_ptr tp, i = tcg_temp_new_ptr(); 4274 4275 tcg_gen_movi_ptr(i, 0); 4276 gen_set_label(loop); 4277 4278 t0 = tcg_temp_new_i64(); 4279 t1 = tcg_temp_new_i64(); 4280 tp = tcg_temp_new_ptr(); 4281 tcg_gen_add_ptr(tp, base, i); 4282 tcg_gen_ld_i64(t0, tp, vofs); 4283 tcg_gen_ld_i64(t1, tp, vofs + 8); 4284 tcg_gen_addi_ptr(i, i, 16); 4285 4286 t16 = tcg_temp_new_i128(); 4287 tcg_gen_concat_i64_i128(t16, t0, t1); 4288 4289 tcg_gen_qemu_st_i128(t16, clean_addr, midx, 4290 MO_LE | MO_128 | MO_ATOM_NONE); 4291 tcg_gen_addi_i64(clean_addr, clean_addr, 16); 4292 4293 tcg_gen_brcondi_ptr(TCG_COND_LTU, i, len_align, loop); 4294 } 4295 4296 /* Predicate register stores can be any multiple of 2. */ 4297 if (len_remain >= 8) { 4298 t0 = tcg_temp_new_i64(); 4299 tcg_gen_ld_i64(t0, base, vofs + len_align); 4300 tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUQ | MO_ATOM_NONE); 4301 len_remain -= 8; 4302 len_align += 8; 4303 if (len_remain) { 4304 tcg_gen_addi_i64(clean_addr, clean_addr, 8); 4305 } 4306 } 4307 if (len_remain) { 4308 t0 = tcg_temp_new_i64(); 4309 tcg_gen_ld_i64(t0, base, vofs + len_align); 4310 4311 switch (len_remain) { 4312 case 2: 4313 case 4: 4314 case 8: 4315 tcg_gen_qemu_st_i64(t0, clean_addr, midx, 4316 MO_LE | ctz32(len_remain) | MO_ATOM_NONE); 4317 break; 4318 4319 case 6: 4320 tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUL | MO_ATOM_NONE); 4321 tcg_gen_addi_i64(clean_addr, clean_addr, 4); 4322 tcg_gen_shri_i64(t0, t0, 32); 4323 tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUW | MO_ATOM_NONE); 4324 break; 4325 4326 default: 4327 g_assert_not_reached(); 4328 } 4329 } 4330 } 4331 4332 static bool trans_LDR_zri(DisasContext *s, arg_rri *a) 4333 { 4334 if (!dc_isar_feature(aa64_sve, s)) { 4335 return false; 4336 } 4337 if (sve_access_check(s)) { 4338 int size = vec_full_reg_size(s); 4339 int off = vec_full_reg_offset(s, a->rd); 4340 gen_sve_ldr(s, tcg_env, off, size, a->rn, a->imm * size); 4341 } 4342 return true; 4343 } 4344 4345 static bool trans_LDR_pri(DisasContext *s, arg_rri *a) 4346 { 4347 if (!dc_isar_feature(aa64_sve, s)) { 4348 return false; 4349 } 4350 if (sve_access_check(s)) { 4351 int size = pred_full_reg_size(s); 4352 int off = pred_full_reg_offset(s, a->rd); 4353 gen_sve_ldr(s, tcg_env, off, size, a->rn, a->imm * size); 4354 } 4355 return true; 4356 } 4357 4358 static bool trans_STR_zri(DisasContext *s, arg_rri *a) 4359 { 4360 if (!dc_isar_feature(aa64_sve, s)) { 4361 return false; 4362 } 4363 if (sve_access_check(s)) { 4364 int size = vec_full_reg_size(s); 4365 int off = vec_full_reg_offset(s, a->rd); 4366 gen_sve_str(s, tcg_env, off, size, a->rn, a->imm * size); 4367 } 4368 return true; 4369 } 4370 4371 static bool trans_STR_pri(DisasContext *s, arg_rri *a) 4372 { 4373 if (!dc_isar_feature(aa64_sve, s)) { 4374 return false; 4375 } 4376 if (sve_access_check(s)) { 4377 int size = pred_full_reg_size(s); 4378 int off = pred_full_reg_offset(s, a->rd); 4379 gen_sve_str(s, tcg_env, off, size, a->rn, a->imm * size); 4380 } 4381 return true; 4382 } 4383 4384 /* 4385 *** SVE Memory - Contiguous Load Group 4386 */ 4387 4388 /* The memory mode of the dtype. */ 4389 static const MemOp dtype_mop[16] = { 4390 MO_UB, MO_UB, MO_UB, MO_UB, 4391 MO_SL, MO_UW, MO_UW, MO_UW, 4392 MO_SW, MO_SW, MO_UL, MO_UL, 4393 MO_SB, MO_SB, MO_SB, MO_UQ 4394 }; 4395 4396 #define dtype_msz(x) (dtype_mop[x] & MO_SIZE) 4397 4398 /* The vector element size of dtype. */ 4399 static const uint8_t dtype_esz[16] = { 4400 0, 1, 2, 3, 4401 3, 1, 2, 3, 4402 3, 2, 2, 3, 4403 3, 2, 1, 3 4404 }; 4405 4406 uint32_t make_svemte_desc(DisasContext *s, unsigned vsz, uint32_t nregs, 4407 uint32_t msz, bool is_write, uint32_t data) 4408 { 4409 uint32_t sizem1; 4410 uint32_t desc = 0; 4411 4412 /* Assert all of the data fits, with or without MTE enabled. */ 4413 assert(nregs >= 1 && nregs <= 4); 4414 sizem1 = (nregs << msz) - 1; 4415 assert(sizem1 <= R_MTEDESC_SIZEM1_MASK >> R_MTEDESC_SIZEM1_SHIFT); 4416 assert(data < 1u << SVE_MTEDESC_SHIFT); 4417 4418 if (s->mte_active[0]) { 4419 desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s)); 4420 desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid); 4421 desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma); 4422 desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write); 4423 desc = FIELD_DP32(desc, MTEDESC, SIZEM1, sizem1); 4424 desc <<= SVE_MTEDESC_SHIFT; 4425 } 4426 return simd_desc(vsz, vsz, desc | data); 4427 } 4428 4429 static void do_mem_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr, 4430 int dtype, uint32_t nregs, bool is_write, 4431 gen_helper_gvec_mem *fn) 4432 { 4433 TCGv_ptr t_pg; 4434 uint32_t desc; 4435 4436 if (!s->mte_active[0]) { 4437 addr = clean_data_tbi(s, addr); 4438 } 4439 4440 /* 4441 * For e.g. LD4, there are not enough arguments to pass all 4 4442 * registers as pointers, so encode the regno into the data field. 4443 * For consistency, do this even for LD1. 4444 */ 4445 desc = make_svemte_desc(s, vec_full_reg_size(s), nregs, 4446 dtype_msz(dtype), is_write, zt); 4447 t_pg = tcg_temp_new_ptr(); 4448 4449 tcg_gen_addi_ptr(t_pg, tcg_env, pred_full_reg_offset(s, pg)); 4450 fn(tcg_env, t_pg, addr, tcg_constant_i32(desc)); 4451 } 4452 4453 /* Indexed by [mte][be][dtype][nreg] */ 4454 static gen_helper_gvec_mem * const ldr_fns[2][2][16][4] = { 4455 { /* mte inactive, little-endian */ 4456 { { gen_helper_sve_ld1bb_r, gen_helper_sve_ld2bb_r, 4457 gen_helper_sve_ld3bb_r, gen_helper_sve_ld4bb_r }, 4458 { gen_helper_sve_ld1bhu_r, NULL, NULL, NULL }, 4459 { gen_helper_sve_ld1bsu_r, NULL, NULL, NULL }, 4460 { gen_helper_sve_ld1bdu_r, NULL, NULL, NULL }, 4461 4462 { gen_helper_sve_ld1sds_le_r, NULL, NULL, NULL }, 4463 { gen_helper_sve_ld1hh_le_r, gen_helper_sve_ld2hh_le_r, 4464 gen_helper_sve_ld3hh_le_r, gen_helper_sve_ld4hh_le_r }, 4465 { gen_helper_sve_ld1hsu_le_r, NULL, NULL, NULL }, 4466 { gen_helper_sve_ld1hdu_le_r, NULL, NULL, NULL }, 4467 4468 { gen_helper_sve_ld1hds_le_r, NULL, NULL, NULL }, 4469 { gen_helper_sve_ld1hss_le_r, NULL, NULL, NULL }, 4470 { gen_helper_sve_ld1ss_le_r, gen_helper_sve_ld2ss_le_r, 4471 gen_helper_sve_ld3ss_le_r, gen_helper_sve_ld4ss_le_r }, 4472 { gen_helper_sve_ld1sdu_le_r, NULL, NULL, NULL }, 4473 4474 { gen_helper_sve_ld1bds_r, NULL, NULL, NULL }, 4475 { gen_helper_sve_ld1bss_r, NULL, NULL, NULL }, 4476 { gen_helper_sve_ld1bhs_r, NULL, NULL, NULL }, 4477 { gen_helper_sve_ld1dd_le_r, gen_helper_sve_ld2dd_le_r, 4478 gen_helper_sve_ld3dd_le_r, gen_helper_sve_ld4dd_le_r } }, 4479 4480 /* mte inactive, big-endian */ 4481 { { gen_helper_sve_ld1bb_r, gen_helper_sve_ld2bb_r, 4482 gen_helper_sve_ld3bb_r, gen_helper_sve_ld4bb_r }, 4483 { gen_helper_sve_ld1bhu_r, NULL, NULL, NULL }, 4484 { gen_helper_sve_ld1bsu_r, NULL, NULL, NULL }, 4485 { gen_helper_sve_ld1bdu_r, NULL, NULL, NULL }, 4486 4487 { gen_helper_sve_ld1sds_be_r, NULL, NULL, NULL }, 4488 { gen_helper_sve_ld1hh_be_r, gen_helper_sve_ld2hh_be_r, 4489 gen_helper_sve_ld3hh_be_r, gen_helper_sve_ld4hh_be_r }, 4490 { gen_helper_sve_ld1hsu_be_r, NULL, NULL, NULL }, 4491 { gen_helper_sve_ld1hdu_be_r, NULL, NULL, NULL }, 4492 4493 { gen_helper_sve_ld1hds_be_r, NULL, NULL, NULL }, 4494 { gen_helper_sve_ld1hss_be_r, NULL, NULL, NULL }, 4495 { gen_helper_sve_ld1ss_be_r, gen_helper_sve_ld2ss_be_r, 4496 gen_helper_sve_ld3ss_be_r, gen_helper_sve_ld4ss_be_r }, 4497 { gen_helper_sve_ld1sdu_be_r, NULL, NULL, NULL }, 4498 4499 { gen_helper_sve_ld1bds_r, NULL, NULL, NULL }, 4500 { gen_helper_sve_ld1bss_r, NULL, NULL, NULL }, 4501 { gen_helper_sve_ld1bhs_r, NULL, NULL, NULL }, 4502 { gen_helper_sve_ld1dd_be_r, gen_helper_sve_ld2dd_be_r, 4503 gen_helper_sve_ld3dd_be_r, gen_helper_sve_ld4dd_be_r } } }, 4504 4505 { /* mte active, little-endian */ 4506 { { gen_helper_sve_ld1bb_r_mte, 4507 gen_helper_sve_ld2bb_r_mte, 4508 gen_helper_sve_ld3bb_r_mte, 4509 gen_helper_sve_ld4bb_r_mte }, 4510 { gen_helper_sve_ld1bhu_r_mte, NULL, NULL, NULL }, 4511 { gen_helper_sve_ld1bsu_r_mte, NULL, NULL, NULL }, 4512 { gen_helper_sve_ld1bdu_r_mte, NULL, NULL, NULL }, 4513 4514 { gen_helper_sve_ld1sds_le_r_mte, NULL, NULL, NULL }, 4515 { gen_helper_sve_ld1hh_le_r_mte, 4516 gen_helper_sve_ld2hh_le_r_mte, 4517 gen_helper_sve_ld3hh_le_r_mte, 4518 gen_helper_sve_ld4hh_le_r_mte }, 4519 { gen_helper_sve_ld1hsu_le_r_mte, NULL, NULL, NULL }, 4520 { gen_helper_sve_ld1hdu_le_r_mte, NULL, NULL, NULL }, 4521 4522 { gen_helper_sve_ld1hds_le_r_mte, NULL, NULL, NULL }, 4523 { gen_helper_sve_ld1hss_le_r_mte, NULL, NULL, NULL }, 4524 { gen_helper_sve_ld1ss_le_r_mte, 4525 gen_helper_sve_ld2ss_le_r_mte, 4526 gen_helper_sve_ld3ss_le_r_mte, 4527 gen_helper_sve_ld4ss_le_r_mte }, 4528 { gen_helper_sve_ld1sdu_le_r_mte, NULL, NULL, NULL }, 4529 4530 { gen_helper_sve_ld1bds_r_mte, NULL, NULL, NULL }, 4531 { gen_helper_sve_ld1bss_r_mte, NULL, NULL, NULL }, 4532 { gen_helper_sve_ld1bhs_r_mte, NULL, NULL, NULL }, 4533 { gen_helper_sve_ld1dd_le_r_mte, 4534 gen_helper_sve_ld2dd_le_r_mte, 4535 gen_helper_sve_ld3dd_le_r_mte, 4536 gen_helper_sve_ld4dd_le_r_mte } }, 4537 4538 /* mte active, big-endian */ 4539 { { gen_helper_sve_ld1bb_r_mte, 4540 gen_helper_sve_ld2bb_r_mte, 4541 gen_helper_sve_ld3bb_r_mte, 4542 gen_helper_sve_ld4bb_r_mte }, 4543 { gen_helper_sve_ld1bhu_r_mte, NULL, NULL, NULL }, 4544 { gen_helper_sve_ld1bsu_r_mte, NULL, NULL, NULL }, 4545 { gen_helper_sve_ld1bdu_r_mte, NULL, NULL, NULL }, 4546 4547 { gen_helper_sve_ld1sds_be_r_mte, NULL, NULL, NULL }, 4548 { gen_helper_sve_ld1hh_be_r_mte, 4549 gen_helper_sve_ld2hh_be_r_mte, 4550 gen_helper_sve_ld3hh_be_r_mte, 4551 gen_helper_sve_ld4hh_be_r_mte }, 4552 { gen_helper_sve_ld1hsu_be_r_mte, NULL, NULL, NULL }, 4553 { gen_helper_sve_ld1hdu_be_r_mte, NULL, NULL, NULL }, 4554 4555 { gen_helper_sve_ld1hds_be_r_mte, NULL, NULL, NULL }, 4556 { gen_helper_sve_ld1hss_be_r_mte, NULL, NULL, NULL }, 4557 { gen_helper_sve_ld1ss_be_r_mte, 4558 gen_helper_sve_ld2ss_be_r_mte, 4559 gen_helper_sve_ld3ss_be_r_mte, 4560 gen_helper_sve_ld4ss_be_r_mte }, 4561 { gen_helper_sve_ld1sdu_be_r_mte, NULL, NULL, NULL }, 4562 4563 { gen_helper_sve_ld1bds_r_mte, NULL, NULL, NULL }, 4564 { gen_helper_sve_ld1bss_r_mte, NULL, NULL, NULL }, 4565 { gen_helper_sve_ld1bhs_r_mte, NULL, NULL, NULL }, 4566 { gen_helper_sve_ld1dd_be_r_mte, 4567 gen_helper_sve_ld2dd_be_r_mte, 4568 gen_helper_sve_ld3dd_be_r_mte, 4569 gen_helper_sve_ld4dd_be_r_mte } } }, 4570 }; 4571 4572 static void do_ld_zpa(DisasContext *s, int zt, int pg, 4573 TCGv_i64 addr, int dtype, int nreg) 4574 { 4575 gen_helper_gvec_mem *fn 4576 = ldr_fns[s->mte_active[0]][s->be_data == MO_BE][dtype][nreg]; 4577 4578 /* 4579 * While there are holes in the table, they are not 4580 * accessible via the instruction encoding. 4581 */ 4582 assert(fn != NULL); 4583 do_mem_zpa(s, zt, pg, addr, dtype, nreg + 1, false, fn); 4584 } 4585 4586 static bool trans_LD_zprr(DisasContext *s, arg_rprr_load *a) 4587 { 4588 if (a->rm == 31 || !dc_isar_feature(aa64_sve, s)) { 4589 return false; 4590 } 4591 if (sve_access_check(s)) { 4592 TCGv_i64 addr = tcg_temp_new_i64(); 4593 tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype)); 4594 tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn)); 4595 do_ld_zpa(s, a->rd, a->pg, addr, a->dtype, a->nreg); 4596 } 4597 return true; 4598 } 4599 4600 static bool trans_LD_zpri(DisasContext *s, arg_rpri_load *a) 4601 { 4602 if (!dc_isar_feature(aa64_sve, s)) { 4603 return false; 4604 } 4605 if (sve_access_check(s)) { 4606 int vsz = vec_full_reg_size(s); 4607 int elements = vsz >> dtype_esz[a->dtype]; 4608 TCGv_i64 addr = tcg_temp_new_i64(); 4609 4610 tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), 4611 (a->imm * elements * (a->nreg + 1)) 4612 << dtype_msz(a->dtype)); 4613 do_ld_zpa(s, a->rd, a->pg, addr, a->dtype, a->nreg); 4614 } 4615 return true; 4616 } 4617 4618 static bool trans_LDFF1_zprr(DisasContext *s, arg_rprr_load *a) 4619 { 4620 static gen_helper_gvec_mem * const fns[2][2][16] = { 4621 { /* mte inactive, little-endian */ 4622 { gen_helper_sve_ldff1bb_r, 4623 gen_helper_sve_ldff1bhu_r, 4624 gen_helper_sve_ldff1bsu_r, 4625 gen_helper_sve_ldff1bdu_r, 4626 4627 gen_helper_sve_ldff1sds_le_r, 4628 gen_helper_sve_ldff1hh_le_r, 4629 gen_helper_sve_ldff1hsu_le_r, 4630 gen_helper_sve_ldff1hdu_le_r, 4631 4632 gen_helper_sve_ldff1hds_le_r, 4633 gen_helper_sve_ldff1hss_le_r, 4634 gen_helper_sve_ldff1ss_le_r, 4635 gen_helper_sve_ldff1sdu_le_r, 4636 4637 gen_helper_sve_ldff1bds_r, 4638 gen_helper_sve_ldff1bss_r, 4639 gen_helper_sve_ldff1bhs_r, 4640 gen_helper_sve_ldff1dd_le_r }, 4641 4642 /* mte inactive, big-endian */ 4643 { gen_helper_sve_ldff1bb_r, 4644 gen_helper_sve_ldff1bhu_r, 4645 gen_helper_sve_ldff1bsu_r, 4646 gen_helper_sve_ldff1bdu_r, 4647 4648 gen_helper_sve_ldff1sds_be_r, 4649 gen_helper_sve_ldff1hh_be_r, 4650 gen_helper_sve_ldff1hsu_be_r, 4651 gen_helper_sve_ldff1hdu_be_r, 4652 4653 gen_helper_sve_ldff1hds_be_r, 4654 gen_helper_sve_ldff1hss_be_r, 4655 gen_helper_sve_ldff1ss_be_r, 4656 gen_helper_sve_ldff1sdu_be_r, 4657 4658 gen_helper_sve_ldff1bds_r, 4659 gen_helper_sve_ldff1bss_r, 4660 gen_helper_sve_ldff1bhs_r, 4661 gen_helper_sve_ldff1dd_be_r } }, 4662 4663 { /* mte active, little-endian */ 4664 { gen_helper_sve_ldff1bb_r_mte, 4665 gen_helper_sve_ldff1bhu_r_mte, 4666 gen_helper_sve_ldff1bsu_r_mte, 4667 gen_helper_sve_ldff1bdu_r_mte, 4668 4669 gen_helper_sve_ldff1sds_le_r_mte, 4670 gen_helper_sve_ldff1hh_le_r_mte, 4671 gen_helper_sve_ldff1hsu_le_r_mte, 4672 gen_helper_sve_ldff1hdu_le_r_mte, 4673 4674 gen_helper_sve_ldff1hds_le_r_mte, 4675 gen_helper_sve_ldff1hss_le_r_mte, 4676 gen_helper_sve_ldff1ss_le_r_mte, 4677 gen_helper_sve_ldff1sdu_le_r_mte, 4678 4679 gen_helper_sve_ldff1bds_r_mte, 4680 gen_helper_sve_ldff1bss_r_mte, 4681 gen_helper_sve_ldff1bhs_r_mte, 4682 gen_helper_sve_ldff1dd_le_r_mte }, 4683 4684 /* mte active, big-endian */ 4685 { gen_helper_sve_ldff1bb_r_mte, 4686 gen_helper_sve_ldff1bhu_r_mte, 4687 gen_helper_sve_ldff1bsu_r_mte, 4688 gen_helper_sve_ldff1bdu_r_mte, 4689 4690 gen_helper_sve_ldff1sds_be_r_mte, 4691 gen_helper_sve_ldff1hh_be_r_mte, 4692 gen_helper_sve_ldff1hsu_be_r_mte, 4693 gen_helper_sve_ldff1hdu_be_r_mte, 4694 4695 gen_helper_sve_ldff1hds_be_r_mte, 4696 gen_helper_sve_ldff1hss_be_r_mte, 4697 gen_helper_sve_ldff1ss_be_r_mte, 4698 gen_helper_sve_ldff1sdu_be_r_mte, 4699 4700 gen_helper_sve_ldff1bds_r_mte, 4701 gen_helper_sve_ldff1bss_r_mte, 4702 gen_helper_sve_ldff1bhs_r_mte, 4703 gen_helper_sve_ldff1dd_be_r_mte } }, 4704 }; 4705 4706 if (!dc_isar_feature(aa64_sve, s)) { 4707 return false; 4708 } 4709 s->is_nonstreaming = true; 4710 if (sve_access_check(s)) { 4711 TCGv_i64 addr = tcg_temp_new_i64(); 4712 tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype)); 4713 tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn)); 4714 do_mem_zpa(s, a->rd, a->pg, addr, a->dtype, 1, false, 4715 fns[s->mte_active[0]][s->be_data == MO_BE][a->dtype]); 4716 } 4717 return true; 4718 } 4719 4720 static bool trans_LDNF1_zpri(DisasContext *s, arg_rpri_load *a) 4721 { 4722 static gen_helper_gvec_mem * const fns[2][2][16] = { 4723 { /* mte inactive, little-endian */ 4724 { gen_helper_sve_ldnf1bb_r, 4725 gen_helper_sve_ldnf1bhu_r, 4726 gen_helper_sve_ldnf1bsu_r, 4727 gen_helper_sve_ldnf1bdu_r, 4728 4729 gen_helper_sve_ldnf1sds_le_r, 4730 gen_helper_sve_ldnf1hh_le_r, 4731 gen_helper_sve_ldnf1hsu_le_r, 4732 gen_helper_sve_ldnf1hdu_le_r, 4733 4734 gen_helper_sve_ldnf1hds_le_r, 4735 gen_helper_sve_ldnf1hss_le_r, 4736 gen_helper_sve_ldnf1ss_le_r, 4737 gen_helper_sve_ldnf1sdu_le_r, 4738 4739 gen_helper_sve_ldnf1bds_r, 4740 gen_helper_sve_ldnf1bss_r, 4741 gen_helper_sve_ldnf1bhs_r, 4742 gen_helper_sve_ldnf1dd_le_r }, 4743 4744 /* mte inactive, big-endian */ 4745 { gen_helper_sve_ldnf1bb_r, 4746 gen_helper_sve_ldnf1bhu_r, 4747 gen_helper_sve_ldnf1bsu_r, 4748 gen_helper_sve_ldnf1bdu_r, 4749 4750 gen_helper_sve_ldnf1sds_be_r, 4751 gen_helper_sve_ldnf1hh_be_r, 4752 gen_helper_sve_ldnf1hsu_be_r, 4753 gen_helper_sve_ldnf1hdu_be_r, 4754 4755 gen_helper_sve_ldnf1hds_be_r, 4756 gen_helper_sve_ldnf1hss_be_r, 4757 gen_helper_sve_ldnf1ss_be_r, 4758 gen_helper_sve_ldnf1sdu_be_r, 4759 4760 gen_helper_sve_ldnf1bds_r, 4761 gen_helper_sve_ldnf1bss_r, 4762 gen_helper_sve_ldnf1bhs_r, 4763 gen_helper_sve_ldnf1dd_be_r } }, 4764 4765 { /* mte inactive, little-endian */ 4766 { gen_helper_sve_ldnf1bb_r_mte, 4767 gen_helper_sve_ldnf1bhu_r_mte, 4768 gen_helper_sve_ldnf1bsu_r_mte, 4769 gen_helper_sve_ldnf1bdu_r_mte, 4770 4771 gen_helper_sve_ldnf1sds_le_r_mte, 4772 gen_helper_sve_ldnf1hh_le_r_mte, 4773 gen_helper_sve_ldnf1hsu_le_r_mte, 4774 gen_helper_sve_ldnf1hdu_le_r_mte, 4775 4776 gen_helper_sve_ldnf1hds_le_r_mte, 4777 gen_helper_sve_ldnf1hss_le_r_mte, 4778 gen_helper_sve_ldnf1ss_le_r_mte, 4779 gen_helper_sve_ldnf1sdu_le_r_mte, 4780 4781 gen_helper_sve_ldnf1bds_r_mte, 4782 gen_helper_sve_ldnf1bss_r_mte, 4783 gen_helper_sve_ldnf1bhs_r_mte, 4784 gen_helper_sve_ldnf1dd_le_r_mte }, 4785 4786 /* mte inactive, big-endian */ 4787 { gen_helper_sve_ldnf1bb_r_mte, 4788 gen_helper_sve_ldnf1bhu_r_mte, 4789 gen_helper_sve_ldnf1bsu_r_mte, 4790 gen_helper_sve_ldnf1bdu_r_mte, 4791 4792 gen_helper_sve_ldnf1sds_be_r_mte, 4793 gen_helper_sve_ldnf1hh_be_r_mte, 4794 gen_helper_sve_ldnf1hsu_be_r_mte, 4795 gen_helper_sve_ldnf1hdu_be_r_mte, 4796 4797 gen_helper_sve_ldnf1hds_be_r_mte, 4798 gen_helper_sve_ldnf1hss_be_r_mte, 4799 gen_helper_sve_ldnf1ss_be_r_mte, 4800 gen_helper_sve_ldnf1sdu_be_r_mte, 4801 4802 gen_helper_sve_ldnf1bds_r_mte, 4803 gen_helper_sve_ldnf1bss_r_mte, 4804 gen_helper_sve_ldnf1bhs_r_mte, 4805 gen_helper_sve_ldnf1dd_be_r_mte } }, 4806 }; 4807 4808 if (!dc_isar_feature(aa64_sve, s)) { 4809 return false; 4810 } 4811 s->is_nonstreaming = true; 4812 if (sve_access_check(s)) { 4813 int vsz = vec_full_reg_size(s); 4814 int elements = vsz >> dtype_esz[a->dtype]; 4815 int off = (a->imm * elements) << dtype_msz(a->dtype); 4816 TCGv_i64 addr = tcg_temp_new_i64(); 4817 4818 tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), off); 4819 do_mem_zpa(s, a->rd, a->pg, addr, a->dtype, 1, false, 4820 fns[s->mte_active[0]][s->be_data == MO_BE][a->dtype]); 4821 } 4822 return true; 4823 } 4824 4825 static void do_ldrq(DisasContext *s, int zt, int pg, TCGv_i64 addr, int dtype) 4826 { 4827 unsigned vsz = vec_full_reg_size(s); 4828 TCGv_ptr t_pg; 4829 int poff; 4830 uint32_t desc; 4831 4832 /* Load the first quadword using the normal predicated load helpers. */ 4833 if (!s->mte_active[0]) { 4834 addr = clean_data_tbi(s, addr); 4835 } 4836 4837 poff = pred_full_reg_offset(s, pg); 4838 if (vsz > 16) { 4839 /* 4840 * Zero-extend the first 16 bits of the predicate into a temporary. 4841 * This avoids triggering an assert making sure we don't have bits 4842 * set within a predicate beyond VQ, but we have lowered VQ to 1 4843 * for this load operation. 4844 */ 4845 TCGv_i64 tmp = tcg_temp_new_i64(); 4846 #if HOST_BIG_ENDIAN 4847 poff += 6; 4848 #endif 4849 tcg_gen_ld16u_i64(tmp, tcg_env, poff); 4850 4851 poff = offsetof(CPUARMState, vfp.preg_tmp); 4852 tcg_gen_st_i64(tmp, tcg_env, poff); 4853 } 4854 4855 t_pg = tcg_temp_new_ptr(); 4856 tcg_gen_addi_ptr(t_pg, tcg_env, poff); 4857 4858 gen_helper_gvec_mem *fn 4859 = ldr_fns[s->mte_active[0]][s->be_data == MO_BE][dtype][0]; 4860 desc = make_svemte_desc(s, 16, 1, dtype_msz(dtype), false, zt); 4861 fn(tcg_env, t_pg, addr, tcg_constant_i32(desc)); 4862 4863 /* Replicate that first quadword. */ 4864 if (vsz > 16) { 4865 int doff = vec_full_reg_offset(s, zt); 4866 tcg_gen_gvec_dup_mem(4, doff + 16, doff, vsz - 16, vsz - 16); 4867 } 4868 } 4869 4870 static bool trans_LD1RQ_zprr(DisasContext *s, arg_rprr_load *a) 4871 { 4872 if (a->rm == 31 || !dc_isar_feature(aa64_sve, s)) { 4873 return false; 4874 } 4875 if (sve_access_check(s)) { 4876 int msz = dtype_msz(a->dtype); 4877 TCGv_i64 addr = tcg_temp_new_i64(); 4878 tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), msz); 4879 tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn)); 4880 do_ldrq(s, a->rd, a->pg, addr, a->dtype); 4881 } 4882 return true; 4883 } 4884 4885 static bool trans_LD1RQ_zpri(DisasContext *s, arg_rpri_load *a) 4886 { 4887 if (!dc_isar_feature(aa64_sve, s)) { 4888 return false; 4889 } 4890 if (sve_access_check(s)) { 4891 TCGv_i64 addr = tcg_temp_new_i64(); 4892 tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), a->imm * 16); 4893 do_ldrq(s, a->rd, a->pg, addr, a->dtype); 4894 } 4895 return true; 4896 } 4897 4898 static void do_ldro(DisasContext *s, int zt, int pg, TCGv_i64 addr, int dtype) 4899 { 4900 unsigned vsz = vec_full_reg_size(s); 4901 unsigned vsz_r32; 4902 TCGv_ptr t_pg; 4903 int poff, doff; 4904 uint32_t desc; 4905 4906 if (vsz < 32) { 4907 /* 4908 * Note that this UNDEFINED check comes after CheckSVEEnabled() 4909 * in the ARM pseudocode, which is the sve_access_check() done 4910 * in our caller. We should not now return false from the caller. 4911 */ 4912 unallocated_encoding(s); 4913 return; 4914 } 4915 4916 /* Load the first octaword using the normal predicated load helpers. */ 4917 if (!s->mte_active[0]) { 4918 addr = clean_data_tbi(s, addr); 4919 } 4920 4921 poff = pred_full_reg_offset(s, pg); 4922 if (vsz > 32) { 4923 /* 4924 * Zero-extend the first 32 bits of the predicate into a temporary. 4925 * This avoids triggering an assert making sure we don't have bits 4926 * set within a predicate beyond VQ, but we have lowered VQ to 2 4927 * for this load operation. 4928 */ 4929 TCGv_i64 tmp = tcg_temp_new_i64(); 4930 #if HOST_BIG_ENDIAN 4931 poff += 4; 4932 #endif 4933 tcg_gen_ld32u_i64(tmp, tcg_env, poff); 4934 4935 poff = offsetof(CPUARMState, vfp.preg_tmp); 4936 tcg_gen_st_i64(tmp, tcg_env, poff); 4937 } 4938 4939 t_pg = tcg_temp_new_ptr(); 4940 tcg_gen_addi_ptr(t_pg, tcg_env, poff); 4941 4942 gen_helper_gvec_mem *fn 4943 = ldr_fns[s->mte_active[0]][s->be_data == MO_BE][dtype][0]; 4944 desc = make_svemte_desc(s, 32, 1, dtype_msz(dtype), false, zt); 4945 fn(tcg_env, t_pg, addr, tcg_constant_i32(desc)); 4946 4947 /* 4948 * Replicate that first octaword. 4949 * The replication happens in units of 32; if the full vector size 4950 * is not a multiple of 32, the final bits are zeroed. 4951 */ 4952 doff = vec_full_reg_offset(s, zt); 4953 vsz_r32 = QEMU_ALIGN_DOWN(vsz, 32); 4954 if (vsz >= 64) { 4955 tcg_gen_gvec_dup_mem(5, doff + 32, doff, vsz_r32 - 32, vsz_r32 - 32); 4956 } 4957 vsz -= vsz_r32; 4958 if (vsz) { 4959 tcg_gen_gvec_dup_imm(MO_64, doff + vsz_r32, vsz, vsz, 0); 4960 } 4961 } 4962 4963 static bool trans_LD1RO_zprr(DisasContext *s, arg_rprr_load *a) 4964 { 4965 if (!dc_isar_feature(aa64_sve_f64mm, s)) { 4966 return false; 4967 } 4968 if (a->rm == 31) { 4969 return false; 4970 } 4971 s->is_nonstreaming = true; 4972 if (sve_access_check(s)) { 4973 TCGv_i64 addr = tcg_temp_new_i64(); 4974 tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype)); 4975 tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn)); 4976 do_ldro(s, a->rd, a->pg, addr, a->dtype); 4977 } 4978 return true; 4979 } 4980 4981 static bool trans_LD1RO_zpri(DisasContext *s, arg_rpri_load *a) 4982 { 4983 if (!dc_isar_feature(aa64_sve_f64mm, s)) { 4984 return false; 4985 } 4986 s->is_nonstreaming = true; 4987 if (sve_access_check(s)) { 4988 TCGv_i64 addr = tcg_temp_new_i64(); 4989 tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), a->imm * 32); 4990 do_ldro(s, a->rd, a->pg, addr, a->dtype); 4991 } 4992 return true; 4993 } 4994 4995 /* Load and broadcast element. */ 4996 static bool trans_LD1R_zpri(DisasContext *s, arg_rpri_load *a) 4997 { 4998 unsigned vsz = vec_full_reg_size(s); 4999 unsigned psz = pred_full_reg_size(s); 5000 unsigned esz = dtype_esz[a->dtype]; 5001 unsigned msz = dtype_msz(a->dtype); 5002 TCGLabel *over; 5003 TCGv_i64 temp, clean_addr; 5004 MemOp memop; 5005 5006 if (!dc_isar_feature(aa64_sve, s)) { 5007 return false; 5008 } 5009 if (!sve_access_check(s)) { 5010 return true; 5011 } 5012 5013 over = gen_new_label(); 5014 5015 /* If the guarding predicate has no bits set, no load occurs. */ 5016 if (psz <= 8) { 5017 /* Reduce the pred_esz_masks value simply to reduce the 5018 * size of the code generated here. 5019 */ 5020 uint64_t psz_mask = MAKE_64BIT_MASK(0, psz * 8); 5021 temp = tcg_temp_new_i64(); 5022 tcg_gen_ld_i64(temp, tcg_env, pred_full_reg_offset(s, a->pg)); 5023 tcg_gen_andi_i64(temp, temp, pred_esz_masks[esz] & psz_mask); 5024 tcg_gen_brcondi_i64(TCG_COND_EQ, temp, 0, over); 5025 } else { 5026 TCGv_i32 t32 = tcg_temp_new_i32(); 5027 find_last_active(s, t32, esz, a->pg); 5028 tcg_gen_brcondi_i32(TCG_COND_LT, t32, 0, over); 5029 } 5030 5031 /* Load the data. */ 5032 temp = tcg_temp_new_i64(); 5033 tcg_gen_addi_i64(temp, cpu_reg_sp(s, a->rn), a->imm << msz); 5034 5035 memop = finalize_memop(s, dtype_mop[a->dtype]); 5036 clean_addr = gen_mte_check1(s, temp, false, true, memop); 5037 tcg_gen_qemu_ld_i64(temp, clean_addr, get_mem_index(s), memop); 5038 5039 /* Broadcast to *all* elements. */ 5040 tcg_gen_gvec_dup_i64(esz, vec_full_reg_offset(s, a->rd), 5041 vsz, vsz, temp); 5042 5043 /* Zero the inactive elements. */ 5044 gen_set_label(over); 5045 return do_movz_zpz(s, a->rd, a->rd, a->pg, esz, false); 5046 } 5047 5048 static void do_st_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr, 5049 int msz, int esz, int nreg) 5050 { 5051 static gen_helper_gvec_mem * const fn_single[2][2][4][4] = { 5052 { { { gen_helper_sve_st1bb_r, 5053 gen_helper_sve_st1bh_r, 5054 gen_helper_sve_st1bs_r, 5055 gen_helper_sve_st1bd_r }, 5056 { NULL, 5057 gen_helper_sve_st1hh_le_r, 5058 gen_helper_sve_st1hs_le_r, 5059 gen_helper_sve_st1hd_le_r }, 5060 { NULL, NULL, 5061 gen_helper_sve_st1ss_le_r, 5062 gen_helper_sve_st1sd_le_r }, 5063 { NULL, NULL, NULL, 5064 gen_helper_sve_st1dd_le_r } }, 5065 { { gen_helper_sve_st1bb_r, 5066 gen_helper_sve_st1bh_r, 5067 gen_helper_sve_st1bs_r, 5068 gen_helper_sve_st1bd_r }, 5069 { NULL, 5070 gen_helper_sve_st1hh_be_r, 5071 gen_helper_sve_st1hs_be_r, 5072 gen_helper_sve_st1hd_be_r }, 5073 { NULL, NULL, 5074 gen_helper_sve_st1ss_be_r, 5075 gen_helper_sve_st1sd_be_r }, 5076 { NULL, NULL, NULL, 5077 gen_helper_sve_st1dd_be_r } } }, 5078 5079 { { { gen_helper_sve_st1bb_r_mte, 5080 gen_helper_sve_st1bh_r_mte, 5081 gen_helper_sve_st1bs_r_mte, 5082 gen_helper_sve_st1bd_r_mte }, 5083 { NULL, 5084 gen_helper_sve_st1hh_le_r_mte, 5085 gen_helper_sve_st1hs_le_r_mte, 5086 gen_helper_sve_st1hd_le_r_mte }, 5087 { NULL, NULL, 5088 gen_helper_sve_st1ss_le_r_mte, 5089 gen_helper_sve_st1sd_le_r_mte }, 5090 { NULL, NULL, NULL, 5091 gen_helper_sve_st1dd_le_r_mte } }, 5092 { { gen_helper_sve_st1bb_r_mte, 5093 gen_helper_sve_st1bh_r_mte, 5094 gen_helper_sve_st1bs_r_mte, 5095 gen_helper_sve_st1bd_r_mte }, 5096 { NULL, 5097 gen_helper_sve_st1hh_be_r_mte, 5098 gen_helper_sve_st1hs_be_r_mte, 5099 gen_helper_sve_st1hd_be_r_mte }, 5100 { NULL, NULL, 5101 gen_helper_sve_st1ss_be_r_mte, 5102 gen_helper_sve_st1sd_be_r_mte }, 5103 { NULL, NULL, NULL, 5104 gen_helper_sve_st1dd_be_r_mte } } }, 5105 }; 5106 static gen_helper_gvec_mem * const fn_multiple[2][2][3][4] = { 5107 { { { gen_helper_sve_st2bb_r, 5108 gen_helper_sve_st2hh_le_r, 5109 gen_helper_sve_st2ss_le_r, 5110 gen_helper_sve_st2dd_le_r }, 5111 { gen_helper_sve_st3bb_r, 5112 gen_helper_sve_st3hh_le_r, 5113 gen_helper_sve_st3ss_le_r, 5114 gen_helper_sve_st3dd_le_r }, 5115 { gen_helper_sve_st4bb_r, 5116 gen_helper_sve_st4hh_le_r, 5117 gen_helper_sve_st4ss_le_r, 5118 gen_helper_sve_st4dd_le_r } }, 5119 { { gen_helper_sve_st2bb_r, 5120 gen_helper_sve_st2hh_be_r, 5121 gen_helper_sve_st2ss_be_r, 5122 gen_helper_sve_st2dd_be_r }, 5123 { gen_helper_sve_st3bb_r, 5124 gen_helper_sve_st3hh_be_r, 5125 gen_helper_sve_st3ss_be_r, 5126 gen_helper_sve_st3dd_be_r }, 5127 { gen_helper_sve_st4bb_r, 5128 gen_helper_sve_st4hh_be_r, 5129 gen_helper_sve_st4ss_be_r, 5130 gen_helper_sve_st4dd_be_r } } }, 5131 { { { gen_helper_sve_st2bb_r_mte, 5132 gen_helper_sve_st2hh_le_r_mte, 5133 gen_helper_sve_st2ss_le_r_mte, 5134 gen_helper_sve_st2dd_le_r_mte }, 5135 { gen_helper_sve_st3bb_r_mte, 5136 gen_helper_sve_st3hh_le_r_mte, 5137 gen_helper_sve_st3ss_le_r_mte, 5138 gen_helper_sve_st3dd_le_r_mte }, 5139 { gen_helper_sve_st4bb_r_mte, 5140 gen_helper_sve_st4hh_le_r_mte, 5141 gen_helper_sve_st4ss_le_r_mte, 5142 gen_helper_sve_st4dd_le_r_mte } }, 5143 { { gen_helper_sve_st2bb_r_mte, 5144 gen_helper_sve_st2hh_be_r_mte, 5145 gen_helper_sve_st2ss_be_r_mte, 5146 gen_helper_sve_st2dd_be_r_mte }, 5147 { gen_helper_sve_st3bb_r_mte, 5148 gen_helper_sve_st3hh_be_r_mte, 5149 gen_helper_sve_st3ss_be_r_mte, 5150 gen_helper_sve_st3dd_be_r_mte }, 5151 { gen_helper_sve_st4bb_r_mte, 5152 gen_helper_sve_st4hh_be_r_mte, 5153 gen_helper_sve_st4ss_be_r_mte, 5154 gen_helper_sve_st4dd_be_r_mte } } }, 5155 }; 5156 gen_helper_gvec_mem *fn; 5157 int be = s->be_data == MO_BE; 5158 5159 if (nreg == 0) { 5160 /* ST1 */ 5161 fn = fn_single[s->mte_active[0]][be][msz][esz]; 5162 } else { 5163 /* ST2, ST3, ST4 -- msz == esz, enforced by encoding */ 5164 assert(msz == esz); 5165 fn = fn_multiple[s->mte_active[0]][be][nreg - 1][msz]; 5166 } 5167 assert(fn != NULL); 5168 do_mem_zpa(s, zt, pg, addr, msz_dtype(s, msz), nreg + 1, true, fn); 5169 } 5170 5171 static bool trans_ST_zprr(DisasContext *s, arg_rprr_store *a) 5172 { 5173 if (!dc_isar_feature(aa64_sve, s)) { 5174 return false; 5175 } 5176 if (a->rm == 31 || a->msz > a->esz) { 5177 return false; 5178 } 5179 if (sve_access_check(s)) { 5180 TCGv_i64 addr = tcg_temp_new_i64(); 5181 tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), a->msz); 5182 tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn)); 5183 do_st_zpa(s, a->rd, a->pg, addr, a->msz, a->esz, a->nreg); 5184 } 5185 return true; 5186 } 5187 5188 static bool trans_ST_zpri(DisasContext *s, arg_rpri_store *a) 5189 { 5190 if (!dc_isar_feature(aa64_sve, s)) { 5191 return false; 5192 } 5193 if (a->msz > a->esz) { 5194 return false; 5195 } 5196 if (sve_access_check(s)) { 5197 int vsz = vec_full_reg_size(s); 5198 int elements = vsz >> a->esz; 5199 TCGv_i64 addr = tcg_temp_new_i64(); 5200 5201 tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), 5202 (a->imm * elements * (a->nreg + 1)) << a->msz); 5203 do_st_zpa(s, a->rd, a->pg, addr, a->msz, a->esz, a->nreg); 5204 } 5205 return true; 5206 } 5207 5208 /* 5209 *** SVE gather loads / scatter stores 5210 */ 5211 5212 static void do_mem_zpz(DisasContext *s, int zt, int pg, int zm, 5213 int scale, TCGv_i64 scalar, int msz, bool is_write, 5214 gen_helper_gvec_mem_scatter *fn) 5215 { 5216 TCGv_ptr t_zm = tcg_temp_new_ptr(); 5217 TCGv_ptr t_pg = tcg_temp_new_ptr(); 5218 TCGv_ptr t_zt = tcg_temp_new_ptr(); 5219 uint32_t desc; 5220 5221 tcg_gen_addi_ptr(t_pg, tcg_env, pred_full_reg_offset(s, pg)); 5222 tcg_gen_addi_ptr(t_zm, tcg_env, vec_full_reg_offset(s, zm)); 5223 tcg_gen_addi_ptr(t_zt, tcg_env, vec_full_reg_offset(s, zt)); 5224 5225 desc = make_svemte_desc(s, vec_full_reg_size(s), 1, msz, is_write, scale); 5226 fn(tcg_env, t_zt, t_pg, t_zm, scalar, tcg_constant_i32(desc)); 5227 } 5228 5229 /* Indexed by [mte][be][ff][xs][u][msz]. */ 5230 static gen_helper_gvec_mem_scatter * const 5231 gather_load_fn32[2][2][2][2][2][3] = { 5232 { /* MTE Inactive */ 5233 { /* Little-endian */ 5234 { { { gen_helper_sve_ldbss_zsu, 5235 gen_helper_sve_ldhss_le_zsu, 5236 NULL, }, 5237 { gen_helper_sve_ldbsu_zsu, 5238 gen_helper_sve_ldhsu_le_zsu, 5239 gen_helper_sve_ldss_le_zsu, } }, 5240 { { gen_helper_sve_ldbss_zss, 5241 gen_helper_sve_ldhss_le_zss, 5242 NULL, }, 5243 { gen_helper_sve_ldbsu_zss, 5244 gen_helper_sve_ldhsu_le_zss, 5245 gen_helper_sve_ldss_le_zss, } } }, 5246 5247 /* First-fault */ 5248 { { { gen_helper_sve_ldffbss_zsu, 5249 gen_helper_sve_ldffhss_le_zsu, 5250 NULL, }, 5251 { gen_helper_sve_ldffbsu_zsu, 5252 gen_helper_sve_ldffhsu_le_zsu, 5253 gen_helper_sve_ldffss_le_zsu, } }, 5254 { { gen_helper_sve_ldffbss_zss, 5255 gen_helper_sve_ldffhss_le_zss, 5256 NULL, }, 5257 { gen_helper_sve_ldffbsu_zss, 5258 gen_helper_sve_ldffhsu_le_zss, 5259 gen_helper_sve_ldffss_le_zss, } } } }, 5260 5261 { /* Big-endian */ 5262 { { { gen_helper_sve_ldbss_zsu, 5263 gen_helper_sve_ldhss_be_zsu, 5264 NULL, }, 5265 { gen_helper_sve_ldbsu_zsu, 5266 gen_helper_sve_ldhsu_be_zsu, 5267 gen_helper_sve_ldss_be_zsu, } }, 5268 { { gen_helper_sve_ldbss_zss, 5269 gen_helper_sve_ldhss_be_zss, 5270 NULL, }, 5271 { gen_helper_sve_ldbsu_zss, 5272 gen_helper_sve_ldhsu_be_zss, 5273 gen_helper_sve_ldss_be_zss, } } }, 5274 5275 /* First-fault */ 5276 { { { gen_helper_sve_ldffbss_zsu, 5277 gen_helper_sve_ldffhss_be_zsu, 5278 NULL, }, 5279 { gen_helper_sve_ldffbsu_zsu, 5280 gen_helper_sve_ldffhsu_be_zsu, 5281 gen_helper_sve_ldffss_be_zsu, } }, 5282 { { gen_helper_sve_ldffbss_zss, 5283 gen_helper_sve_ldffhss_be_zss, 5284 NULL, }, 5285 { gen_helper_sve_ldffbsu_zss, 5286 gen_helper_sve_ldffhsu_be_zss, 5287 gen_helper_sve_ldffss_be_zss, } } } } }, 5288 { /* MTE Active */ 5289 { /* Little-endian */ 5290 { { { gen_helper_sve_ldbss_zsu_mte, 5291 gen_helper_sve_ldhss_le_zsu_mte, 5292 NULL, }, 5293 { gen_helper_sve_ldbsu_zsu_mte, 5294 gen_helper_sve_ldhsu_le_zsu_mte, 5295 gen_helper_sve_ldss_le_zsu_mte, } }, 5296 { { gen_helper_sve_ldbss_zss_mte, 5297 gen_helper_sve_ldhss_le_zss_mte, 5298 NULL, }, 5299 { gen_helper_sve_ldbsu_zss_mte, 5300 gen_helper_sve_ldhsu_le_zss_mte, 5301 gen_helper_sve_ldss_le_zss_mte, } } }, 5302 5303 /* First-fault */ 5304 { { { gen_helper_sve_ldffbss_zsu_mte, 5305 gen_helper_sve_ldffhss_le_zsu_mte, 5306 NULL, }, 5307 { gen_helper_sve_ldffbsu_zsu_mte, 5308 gen_helper_sve_ldffhsu_le_zsu_mte, 5309 gen_helper_sve_ldffss_le_zsu_mte, } }, 5310 { { gen_helper_sve_ldffbss_zss_mte, 5311 gen_helper_sve_ldffhss_le_zss_mte, 5312 NULL, }, 5313 { gen_helper_sve_ldffbsu_zss_mte, 5314 gen_helper_sve_ldffhsu_le_zss_mte, 5315 gen_helper_sve_ldffss_le_zss_mte, } } } }, 5316 5317 { /* Big-endian */ 5318 { { { gen_helper_sve_ldbss_zsu_mte, 5319 gen_helper_sve_ldhss_be_zsu_mte, 5320 NULL, }, 5321 { gen_helper_sve_ldbsu_zsu_mte, 5322 gen_helper_sve_ldhsu_be_zsu_mte, 5323 gen_helper_sve_ldss_be_zsu_mte, } }, 5324 { { gen_helper_sve_ldbss_zss_mte, 5325 gen_helper_sve_ldhss_be_zss_mte, 5326 NULL, }, 5327 { gen_helper_sve_ldbsu_zss_mte, 5328 gen_helper_sve_ldhsu_be_zss_mte, 5329 gen_helper_sve_ldss_be_zss_mte, } } }, 5330 5331 /* First-fault */ 5332 { { { gen_helper_sve_ldffbss_zsu_mte, 5333 gen_helper_sve_ldffhss_be_zsu_mte, 5334 NULL, }, 5335 { gen_helper_sve_ldffbsu_zsu_mte, 5336 gen_helper_sve_ldffhsu_be_zsu_mte, 5337 gen_helper_sve_ldffss_be_zsu_mte, } }, 5338 { { gen_helper_sve_ldffbss_zss_mte, 5339 gen_helper_sve_ldffhss_be_zss_mte, 5340 NULL, }, 5341 { gen_helper_sve_ldffbsu_zss_mte, 5342 gen_helper_sve_ldffhsu_be_zss_mte, 5343 gen_helper_sve_ldffss_be_zss_mte, } } } } }, 5344 }; 5345 5346 /* Note that we overload xs=2 to indicate 64-bit offset. */ 5347 static gen_helper_gvec_mem_scatter * const 5348 gather_load_fn64[2][2][2][3][2][4] = { 5349 { /* MTE Inactive */ 5350 { /* Little-endian */ 5351 { { { gen_helper_sve_ldbds_zsu, 5352 gen_helper_sve_ldhds_le_zsu, 5353 gen_helper_sve_ldsds_le_zsu, 5354 NULL, }, 5355 { gen_helper_sve_ldbdu_zsu, 5356 gen_helper_sve_ldhdu_le_zsu, 5357 gen_helper_sve_ldsdu_le_zsu, 5358 gen_helper_sve_lddd_le_zsu, } }, 5359 { { gen_helper_sve_ldbds_zss, 5360 gen_helper_sve_ldhds_le_zss, 5361 gen_helper_sve_ldsds_le_zss, 5362 NULL, }, 5363 { gen_helper_sve_ldbdu_zss, 5364 gen_helper_sve_ldhdu_le_zss, 5365 gen_helper_sve_ldsdu_le_zss, 5366 gen_helper_sve_lddd_le_zss, } }, 5367 { { gen_helper_sve_ldbds_zd, 5368 gen_helper_sve_ldhds_le_zd, 5369 gen_helper_sve_ldsds_le_zd, 5370 NULL, }, 5371 { gen_helper_sve_ldbdu_zd, 5372 gen_helper_sve_ldhdu_le_zd, 5373 gen_helper_sve_ldsdu_le_zd, 5374 gen_helper_sve_lddd_le_zd, } } }, 5375 5376 /* First-fault */ 5377 { { { gen_helper_sve_ldffbds_zsu, 5378 gen_helper_sve_ldffhds_le_zsu, 5379 gen_helper_sve_ldffsds_le_zsu, 5380 NULL, }, 5381 { gen_helper_sve_ldffbdu_zsu, 5382 gen_helper_sve_ldffhdu_le_zsu, 5383 gen_helper_sve_ldffsdu_le_zsu, 5384 gen_helper_sve_ldffdd_le_zsu, } }, 5385 { { gen_helper_sve_ldffbds_zss, 5386 gen_helper_sve_ldffhds_le_zss, 5387 gen_helper_sve_ldffsds_le_zss, 5388 NULL, }, 5389 { gen_helper_sve_ldffbdu_zss, 5390 gen_helper_sve_ldffhdu_le_zss, 5391 gen_helper_sve_ldffsdu_le_zss, 5392 gen_helper_sve_ldffdd_le_zss, } }, 5393 { { gen_helper_sve_ldffbds_zd, 5394 gen_helper_sve_ldffhds_le_zd, 5395 gen_helper_sve_ldffsds_le_zd, 5396 NULL, }, 5397 { gen_helper_sve_ldffbdu_zd, 5398 gen_helper_sve_ldffhdu_le_zd, 5399 gen_helper_sve_ldffsdu_le_zd, 5400 gen_helper_sve_ldffdd_le_zd, } } } }, 5401 { /* Big-endian */ 5402 { { { gen_helper_sve_ldbds_zsu, 5403 gen_helper_sve_ldhds_be_zsu, 5404 gen_helper_sve_ldsds_be_zsu, 5405 NULL, }, 5406 { gen_helper_sve_ldbdu_zsu, 5407 gen_helper_sve_ldhdu_be_zsu, 5408 gen_helper_sve_ldsdu_be_zsu, 5409 gen_helper_sve_lddd_be_zsu, } }, 5410 { { gen_helper_sve_ldbds_zss, 5411 gen_helper_sve_ldhds_be_zss, 5412 gen_helper_sve_ldsds_be_zss, 5413 NULL, }, 5414 { gen_helper_sve_ldbdu_zss, 5415 gen_helper_sve_ldhdu_be_zss, 5416 gen_helper_sve_ldsdu_be_zss, 5417 gen_helper_sve_lddd_be_zss, } }, 5418 { { gen_helper_sve_ldbds_zd, 5419 gen_helper_sve_ldhds_be_zd, 5420 gen_helper_sve_ldsds_be_zd, 5421 NULL, }, 5422 { gen_helper_sve_ldbdu_zd, 5423 gen_helper_sve_ldhdu_be_zd, 5424 gen_helper_sve_ldsdu_be_zd, 5425 gen_helper_sve_lddd_be_zd, } } }, 5426 5427 /* First-fault */ 5428 { { { gen_helper_sve_ldffbds_zsu, 5429 gen_helper_sve_ldffhds_be_zsu, 5430 gen_helper_sve_ldffsds_be_zsu, 5431 NULL, }, 5432 { gen_helper_sve_ldffbdu_zsu, 5433 gen_helper_sve_ldffhdu_be_zsu, 5434 gen_helper_sve_ldffsdu_be_zsu, 5435 gen_helper_sve_ldffdd_be_zsu, } }, 5436 { { gen_helper_sve_ldffbds_zss, 5437 gen_helper_sve_ldffhds_be_zss, 5438 gen_helper_sve_ldffsds_be_zss, 5439 NULL, }, 5440 { gen_helper_sve_ldffbdu_zss, 5441 gen_helper_sve_ldffhdu_be_zss, 5442 gen_helper_sve_ldffsdu_be_zss, 5443 gen_helper_sve_ldffdd_be_zss, } }, 5444 { { gen_helper_sve_ldffbds_zd, 5445 gen_helper_sve_ldffhds_be_zd, 5446 gen_helper_sve_ldffsds_be_zd, 5447 NULL, }, 5448 { gen_helper_sve_ldffbdu_zd, 5449 gen_helper_sve_ldffhdu_be_zd, 5450 gen_helper_sve_ldffsdu_be_zd, 5451 gen_helper_sve_ldffdd_be_zd, } } } } }, 5452 { /* MTE Active */ 5453 { /* Little-endian */ 5454 { { { gen_helper_sve_ldbds_zsu_mte, 5455 gen_helper_sve_ldhds_le_zsu_mte, 5456 gen_helper_sve_ldsds_le_zsu_mte, 5457 NULL, }, 5458 { gen_helper_sve_ldbdu_zsu_mte, 5459 gen_helper_sve_ldhdu_le_zsu_mte, 5460 gen_helper_sve_ldsdu_le_zsu_mte, 5461 gen_helper_sve_lddd_le_zsu_mte, } }, 5462 { { gen_helper_sve_ldbds_zss_mte, 5463 gen_helper_sve_ldhds_le_zss_mte, 5464 gen_helper_sve_ldsds_le_zss_mte, 5465 NULL, }, 5466 { gen_helper_sve_ldbdu_zss_mte, 5467 gen_helper_sve_ldhdu_le_zss_mte, 5468 gen_helper_sve_ldsdu_le_zss_mte, 5469 gen_helper_sve_lddd_le_zss_mte, } }, 5470 { { gen_helper_sve_ldbds_zd_mte, 5471 gen_helper_sve_ldhds_le_zd_mte, 5472 gen_helper_sve_ldsds_le_zd_mte, 5473 NULL, }, 5474 { gen_helper_sve_ldbdu_zd_mte, 5475 gen_helper_sve_ldhdu_le_zd_mte, 5476 gen_helper_sve_ldsdu_le_zd_mte, 5477 gen_helper_sve_lddd_le_zd_mte, } } }, 5478 5479 /* First-fault */ 5480 { { { gen_helper_sve_ldffbds_zsu_mte, 5481 gen_helper_sve_ldffhds_le_zsu_mte, 5482 gen_helper_sve_ldffsds_le_zsu_mte, 5483 NULL, }, 5484 { gen_helper_sve_ldffbdu_zsu_mte, 5485 gen_helper_sve_ldffhdu_le_zsu_mte, 5486 gen_helper_sve_ldffsdu_le_zsu_mte, 5487 gen_helper_sve_ldffdd_le_zsu_mte, } }, 5488 { { gen_helper_sve_ldffbds_zss_mte, 5489 gen_helper_sve_ldffhds_le_zss_mte, 5490 gen_helper_sve_ldffsds_le_zss_mte, 5491 NULL, }, 5492 { gen_helper_sve_ldffbdu_zss_mte, 5493 gen_helper_sve_ldffhdu_le_zss_mte, 5494 gen_helper_sve_ldffsdu_le_zss_mte, 5495 gen_helper_sve_ldffdd_le_zss_mte, } }, 5496 { { gen_helper_sve_ldffbds_zd_mte, 5497 gen_helper_sve_ldffhds_le_zd_mte, 5498 gen_helper_sve_ldffsds_le_zd_mte, 5499 NULL, }, 5500 { gen_helper_sve_ldffbdu_zd_mte, 5501 gen_helper_sve_ldffhdu_le_zd_mte, 5502 gen_helper_sve_ldffsdu_le_zd_mte, 5503 gen_helper_sve_ldffdd_le_zd_mte, } } } }, 5504 { /* Big-endian */ 5505 { { { gen_helper_sve_ldbds_zsu_mte, 5506 gen_helper_sve_ldhds_be_zsu_mte, 5507 gen_helper_sve_ldsds_be_zsu_mte, 5508 NULL, }, 5509 { gen_helper_sve_ldbdu_zsu_mte, 5510 gen_helper_sve_ldhdu_be_zsu_mte, 5511 gen_helper_sve_ldsdu_be_zsu_mte, 5512 gen_helper_sve_lddd_be_zsu_mte, } }, 5513 { { gen_helper_sve_ldbds_zss_mte, 5514 gen_helper_sve_ldhds_be_zss_mte, 5515 gen_helper_sve_ldsds_be_zss_mte, 5516 NULL, }, 5517 { gen_helper_sve_ldbdu_zss_mte, 5518 gen_helper_sve_ldhdu_be_zss_mte, 5519 gen_helper_sve_ldsdu_be_zss_mte, 5520 gen_helper_sve_lddd_be_zss_mte, } }, 5521 { { gen_helper_sve_ldbds_zd_mte, 5522 gen_helper_sve_ldhds_be_zd_mte, 5523 gen_helper_sve_ldsds_be_zd_mte, 5524 NULL, }, 5525 { gen_helper_sve_ldbdu_zd_mte, 5526 gen_helper_sve_ldhdu_be_zd_mte, 5527 gen_helper_sve_ldsdu_be_zd_mte, 5528 gen_helper_sve_lddd_be_zd_mte, } } }, 5529 5530 /* First-fault */ 5531 { { { gen_helper_sve_ldffbds_zsu_mte, 5532 gen_helper_sve_ldffhds_be_zsu_mte, 5533 gen_helper_sve_ldffsds_be_zsu_mte, 5534 NULL, }, 5535 { gen_helper_sve_ldffbdu_zsu_mte, 5536 gen_helper_sve_ldffhdu_be_zsu_mte, 5537 gen_helper_sve_ldffsdu_be_zsu_mte, 5538 gen_helper_sve_ldffdd_be_zsu_mte, } }, 5539 { { gen_helper_sve_ldffbds_zss_mte, 5540 gen_helper_sve_ldffhds_be_zss_mte, 5541 gen_helper_sve_ldffsds_be_zss_mte, 5542 NULL, }, 5543 { gen_helper_sve_ldffbdu_zss_mte, 5544 gen_helper_sve_ldffhdu_be_zss_mte, 5545 gen_helper_sve_ldffsdu_be_zss_mte, 5546 gen_helper_sve_ldffdd_be_zss_mte, } }, 5547 { { gen_helper_sve_ldffbds_zd_mte, 5548 gen_helper_sve_ldffhds_be_zd_mte, 5549 gen_helper_sve_ldffsds_be_zd_mte, 5550 NULL, }, 5551 { gen_helper_sve_ldffbdu_zd_mte, 5552 gen_helper_sve_ldffhdu_be_zd_mte, 5553 gen_helper_sve_ldffsdu_be_zd_mte, 5554 gen_helper_sve_ldffdd_be_zd_mte, } } } } }, 5555 }; 5556 5557 static bool trans_LD1_zprz(DisasContext *s, arg_LD1_zprz *a) 5558 { 5559 gen_helper_gvec_mem_scatter *fn = NULL; 5560 bool be = s->be_data == MO_BE; 5561 bool mte = s->mte_active[0]; 5562 5563 if (!dc_isar_feature(aa64_sve, s)) { 5564 return false; 5565 } 5566 s->is_nonstreaming = true; 5567 if (!sve_access_check(s)) { 5568 return true; 5569 } 5570 5571 switch (a->esz) { 5572 case MO_32: 5573 fn = gather_load_fn32[mte][be][a->ff][a->xs][a->u][a->msz]; 5574 break; 5575 case MO_64: 5576 fn = gather_load_fn64[mte][be][a->ff][a->xs][a->u][a->msz]; 5577 break; 5578 } 5579 assert(fn != NULL); 5580 5581 do_mem_zpz(s, a->rd, a->pg, a->rm, a->scale * a->msz, 5582 cpu_reg_sp(s, a->rn), a->msz, false, fn); 5583 return true; 5584 } 5585 5586 static bool trans_LD1_zpiz(DisasContext *s, arg_LD1_zpiz *a) 5587 { 5588 gen_helper_gvec_mem_scatter *fn = NULL; 5589 bool be = s->be_data == MO_BE; 5590 bool mte = s->mte_active[0]; 5591 5592 if (a->esz < a->msz || (a->esz == a->msz && !a->u)) { 5593 return false; 5594 } 5595 if (!dc_isar_feature(aa64_sve, s)) { 5596 return false; 5597 } 5598 s->is_nonstreaming = true; 5599 if (!sve_access_check(s)) { 5600 return true; 5601 } 5602 5603 switch (a->esz) { 5604 case MO_32: 5605 fn = gather_load_fn32[mte][be][a->ff][0][a->u][a->msz]; 5606 break; 5607 case MO_64: 5608 fn = gather_load_fn64[mte][be][a->ff][2][a->u][a->msz]; 5609 break; 5610 } 5611 assert(fn != NULL); 5612 5613 /* Treat LD1_zpiz (zn[x] + imm) the same way as LD1_zprz (rn + zm[x]) 5614 * by loading the immediate into the scalar parameter. 5615 */ 5616 do_mem_zpz(s, a->rd, a->pg, a->rn, 0, 5617 tcg_constant_i64(a->imm << a->msz), a->msz, false, fn); 5618 return true; 5619 } 5620 5621 static bool trans_LDNT1_zprz(DisasContext *s, arg_LD1_zprz *a) 5622 { 5623 gen_helper_gvec_mem_scatter *fn = NULL; 5624 bool be = s->be_data == MO_BE; 5625 bool mte = s->mte_active[0]; 5626 5627 if (a->esz < a->msz + !a->u) { 5628 return false; 5629 } 5630 if (!dc_isar_feature(aa64_sve2, s)) { 5631 return false; 5632 } 5633 s->is_nonstreaming = true; 5634 if (!sve_access_check(s)) { 5635 return true; 5636 } 5637 5638 switch (a->esz) { 5639 case MO_32: 5640 fn = gather_load_fn32[mte][be][0][0][a->u][a->msz]; 5641 break; 5642 case MO_64: 5643 fn = gather_load_fn64[mte][be][0][2][a->u][a->msz]; 5644 break; 5645 } 5646 assert(fn != NULL); 5647 5648 do_mem_zpz(s, a->rd, a->pg, a->rn, 0, 5649 cpu_reg(s, a->rm), a->msz, false, fn); 5650 return true; 5651 } 5652 5653 /* Indexed by [mte][be][xs][msz]. */ 5654 static gen_helper_gvec_mem_scatter * const scatter_store_fn32[2][2][2][3] = { 5655 { /* MTE Inactive */ 5656 { /* Little-endian */ 5657 { gen_helper_sve_stbs_zsu, 5658 gen_helper_sve_sths_le_zsu, 5659 gen_helper_sve_stss_le_zsu, }, 5660 { gen_helper_sve_stbs_zss, 5661 gen_helper_sve_sths_le_zss, 5662 gen_helper_sve_stss_le_zss, } }, 5663 { /* Big-endian */ 5664 { gen_helper_sve_stbs_zsu, 5665 gen_helper_sve_sths_be_zsu, 5666 gen_helper_sve_stss_be_zsu, }, 5667 { gen_helper_sve_stbs_zss, 5668 gen_helper_sve_sths_be_zss, 5669 gen_helper_sve_stss_be_zss, } } }, 5670 { /* MTE Active */ 5671 { /* Little-endian */ 5672 { gen_helper_sve_stbs_zsu_mte, 5673 gen_helper_sve_sths_le_zsu_mte, 5674 gen_helper_sve_stss_le_zsu_mte, }, 5675 { gen_helper_sve_stbs_zss_mte, 5676 gen_helper_sve_sths_le_zss_mte, 5677 gen_helper_sve_stss_le_zss_mte, } }, 5678 { /* Big-endian */ 5679 { gen_helper_sve_stbs_zsu_mte, 5680 gen_helper_sve_sths_be_zsu_mte, 5681 gen_helper_sve_stss_be_zsu_mte, }, 5682 { gen_helper_sve_stbs_zss_mte, 5683 gen_helper_sve_sths_be_zss_mte, 5684 gen_helper_sve_stss_be_zss_mte, } } }, 5685 }; 5686 5687 /* Note that we overload xs=2 to indicate 64-bit offset. */ 5688 static gen_helper_gvec_mem_scatter * const scatter_store_fn64[2][2][3][4] = { 5689 { /* MTE Inactive */ 5690 { /* Little-endian */ 5691 { gen_helper_sve_stbd_zsu, 5692 gen_helper_sve_sthd_le_zsu, 5693 gen_helper_sve_stsd_le_zsu, 5694 gen_helper_sve_stdd_le_zsu, }, 5695 { gen_helper_sve_stbd_zss, 5696 gen_helper_sve_sthd_le_zss, 5697 gen_helper_sve_stsd_le_zss, 5698 gen_helper_sve_stdd_le_zss, }, 5699 { gen_helper_sve_stbd_zd, 5700 gen_helper_sve_sthd_le_zd, 5701 gen_helper_sve_stsd_le_zd, 5702 gen_helper_sve_stdd_le_zd, } }, 5703 { /* Big-endian */ 5704 { gen_helper_sve_stbd_zsu, 5705 gen_helper_sve_sthd_be_zsu, 5706 gen_helper_sve_stsd_be_zsu, 5707 gen_helper_sve_stdd_be_zsu, }, 5708 { gen_helper_sve_stbd_zss, 5709 gen_helper_sve_sthd_be_zss, 5710 gen_helper_sve_stsd_be_zss, 5711 gen_helper_sve_stdd_be_zss, }, 5712 { gen_helper_sve_stbd_zd, 5713 gen_helper_sve_sthd_be_zd, 5714 gen_helper_sve_stsd_be_zd, 5715 gen_helper_sve_stdd_be_zd, } } }, 5716 { /* MTE Inactive */ 5717 { /* Little-endian */ 5718 { gen_helper_sve_stbd_zsu_mte, 5719 gen_helper_sve_sthd_le_zsu_mte, 5720 gen_helper_sve_stsd_le_zsu_mte, 5721 gen_helper_sve_stdd_le_zsu_mte, }, 5722 { gen_helper_sve_stbd_zss_mte, 5723 gen_helper_sve_sthd_le_zss_mte, 5724 gen_helper_sve_stsd_le_zss_mte, 5725 gen_helper_sve_stdd_le_zss_mte, }, 5726 { gen_helper_sve_stbd_zd_mte, 5727 gen_helper_sve_sthd_le_zd_mte, 5728 gen_helper_sve_stsd_le_zd_mte, 5729 gen_helper_sve_stdd_le_zd_mte, } }, 5730 { /* Big-endian */ 5731 { gen_helper_sve_stbd_zsu_mte, 5732 gen_helper_sve_sthd_be_zsu_mte, 5733 gen_helper_sve_stsd_be_zsu_mte, 5734 gen_helper_sve_stdd_be_zsu_mte, }, 5735 { gen_helper_sve_stbd_zss_mte, 5736 gen_helper_sve_sthd_be_zss_mte, 5737 gen_helper_sve_stsd_be_zss_mte, 5738 gen_helper_sve_stdd_be_zss_mte, }, 5739 { gen_helper_sve_stbd_zd_mte, 5740 gen_helper_sve_sthd_be_zd_mte, 5741 gen_helper_sve_stsd_be_zd_mte, 5742 gen_helper_sve_stdd_be_zd_mte, } } }, 5743 }; 5744 5745 static bool trans_ST1_zprz(DisasContext *s, arg_ST1_zprz *a) 5746 { 5747 gen_helper_gvec_mem_scatter *fn; 5748 bool be = s->be_data == MO_BE; 5749 bool mte = s->mte_active[0]; 5750 5751 if (a->esz < a->msz || (a->msz == 0 && a->scale)) { 5752 return false; 5753 } 5754 if (!dc_isar_feature(aa64_sve, s)) { 5755 return false; 5756 } 5757 s->is_nonstreaming = true; 5758 if (!sve_access_check(s)) { 5759 return true; 5760 } 5761 switch (a->esz) { 5762 case MO_32: 5763 fn = scatter_store_fn32[mte][be][a->xs][a->msz]; 5764 break; 5765 case MO_64: 5766 fn = scatter_store_fn64[mte][be][a->xs][a->msz]; 5767 break; 5768 default: 5769 g_assert_not_reached(); 5770 } 5771 do_mem_zpz(s, a->rd, a->pg, a->rm, a->scale * a->msz, 5772 cpu_reg_sp(s, a->rn), a->msz, true, fn); 5773 return true; 5774 } 5775 5776 static bool trans_ST1_zpiz(DisasContext *s, arg_ST1_zpiz *a) 5777 { 5778 gen_helper_gvec_mem_scatter *fn = NULL; 5779 bool be = s->be_data == MO_BE; 5780 bool mte = s->mte_active[0]; 5781 5782 if (a->esz < a->msz) { 5783 return false; 5784 } 5785 if (!dc_isar_feature(aa64_sve, s)) { 5786 return false; 5787 } 5788 s->is_nonstreaming = true; 5789 if (!sve_access_check(s)) { 5790 return true; 5791 } 5792 5793 switch (a->esz) { 5794 case MO_32: 5795 fn = scatter_store_fn32[mte][be][0][a->msz]; 5796 break; 5797 case MO_64: 5798 fn = scatter_store_fn64[mte][be][2][a->msz]; 5799 break; 5800 } 5801 assert(fn != NULL); 5802 5803 /* Treat ST1_zpiz (zn[x] + imm) the same way as ST1_zprz (rn + zm[x]) 5804 * by loading the immediate into the scalar parameter. 5805 */ 5806 do_mem_zpz(s, a->rd, a->pg, a->rn, 0, 5807 tcg_constant_i64(a->imm << a->msz), a->msz, true, fn); 5808 return true; 5809 } 5810 5811 static bool trans_STNT1_zprz(DisasContext *s, arg_ST1_zprz *a) 5812 { 5813 gen_helper_gvec_mem_scatter *fn; 5814 bool be = s->be_data == MO_BE; 5815 bool mte = s->mte_active[0]; 5816 5817 if (a->esz < a->msz) { 5818 return false; 5819 } 5820 if (!dc_isar_feature(aa64_sve2, s)) { 5821 return false; 5822 } 5823 s->is_nonstreaming = true; 5824 if (!sve_access_check(s)) { 5825 return true; 5826 } 5827 5828 switch (a->esz) { 5829 case MO_32: 5830 fn = scatter_store_fn32[mte][be][0][a->msz]; 5831 break; 5832 case MO_64: 5833 fn = scatter_store_fn64[mte][be][2][a->msz]; 5834 break; 5835 default: 5836 g_assert_not_reached(); 5837 } 5838 5839 do_mem_zpz(s, a->rd, a->pg, a->rn, 0, 5840 cpu_reg(s, a->rm), a->msz, true, fn); 5841 return true; 5842 } 5843 5844 /* 5845 * Prefetches 5846 */ 5847 5848 static bool trans_PRF(DisasContext *s, arg_PRF *a) 5849 { 5850 if (!dc_isar_feature(aa64_sve, s)) { 5851 return false; 5852 } 5853 /* Prefetch is a nop within QEMU. */ 5854 (void)sve_access_check(s); 5855 return true; 5856 } 5857 5858 static bool trans_PRF_rr(DisasContext *s, arg_PRF_rr *a) 5859 { 5860 if (a->rm == 31 || !dc_isar_feature(aa64_sve, s)) { 5861 return false; 5862 } 5863 /* Prefetch is a nop within QEMU. */ 5864 (void)sve_access_check(s); 5865 return true; 5866 } 5867 5868 static bool trans_PRF_ns(DisasContext *s, arg_PRF_ns *a) 5869 { 5870 if (!dc_isar_feature(aa64_sve, s)) { 5871 return false; 5872 } 5873 /* Prefetch is a nop within QEMU. */ 5874 s->is_nonstreaming = true; 5875 (void)sve_access_check(s); 5876 return true; 5877 } 5878 5879 /* 5880 * Move Prefix 5881 * 5882 * TODO: The implementation so far could handle predicated merging movprfx. 5883 * The helper functions as written take an extra source register to 5884 * use in the operation, but the result is only written when predication 5885 * succeeds. For unpredicated movprfx, we need to rearrange the helpers 5886 * to allow the final write back to the destination to be unconditional. 5887 * For predicated zeroing movprfx, we need to rearrange the helpers to 5888 * allow the final write back to zero inactives. 5889 * 5890 * In the meantime, just emit the moves. 5891 */ 5892 5893 TRANS_FEAT(MOVPRFX, aa64_sve, do_mov_z, a->rd, a->rn) 5894 TRANS_FEAT(MOVPRFX_m, aa64_sve, do_sel_z, a->rd, a->rn, a->rd, a->pg, a->esz) 5895 TRANS_FEAT(MOVPRFX_z, aa64_sve, do_movz_zpz, a->rd, a->rn, a->pg, a->esz, false) 5896 5897 /* 5898 * SVE2 Integer Multiply - Unpredicated 5899 */ 5900 5901 TRANS_FEAT(MUL_zzz, aa64_sve2, gen_gvec_fn_arg_zzz, tcg_gen_gvec_mul, a) 5902 5903 static gen_helper_gvec_3 * const smulh_zzz_fns[4] = { 5904 gen_helper_gvec_smulh_b, gen_helper_gvec_smulh_h, 5905 gen_helper_gvec_smulh_s, gen_helper_gvec_smulh_d, 5906 }; 5907 TRANS_FEAT(SMULH_zzz, aa64_sve2, gen_gvec_ool_arg_zzz, 5908 smulh_zzz_fns[a->esz], a, 0) 5909 5910 static gen_helper_gvec_3 * const umulh_zzz_fns[4] = { 5911 gen_helper_gvec_umulh_b, gen_helper_gvec_umulh_h, 5912 gen_helper_gvec_umulh_s, gen_helper_gvec_umulh_d, 5913 }; 5914 TRANS_FEAT(UMULH_zzz, aa64_sve2, gen_gvec_ool_arg_zzz, 5915 umulh_zzz_fns[a->esz], a, 0) 5916 5917 TRANS_FEAT(PMUL_zzz, aa64_sve2, gen_gvec_ool_arg_zzz, 5918 gen_helper_gvec_pmul_b, a, 0) 5919 5920 static gen_helper_gvec_3 * const sqdmulh_zzz_fns[4] = { 5921 gen_helper_sve2_sqdmulh_b, gen_helper_sve2_sqdmulh_h, 5922 gen_helper_sve2_sqdmulh_s, gen_helper_sve2_sqdmulh_d, 5923 }; 5924 TRANS_FEAT(SQDMULH_zzz, aa64_sve2, gen_gvec_ool_arg_zzz, 5925 sqdmulh_zzz_fns[a->esz], a, 0) 5926 5927 static gen_helper_gvec_3 * const sqrdmulh_zzz_fns[4] = { 5928 gen_helper_sve2_sqrdmulh_b, gen_helper_sve2_sqrdmulh_h, 5929 gen_helper_sve2_sqrdmulh_s, gen_helper_sve2_sqrdmulh_d, 5930 }; 5931 TRANS_FEAT(SQRDMULH_zzz, aa64_sve2, gen_gvec_ool_arg_zzz, 5932 sqrdmulh_zzz_fns[a->esz], a, 0) 5933 5934 /* 5935 * SVE2 Integer - Predicated 5936 */ 5937 5938 static gen_helper_gvec_4 * const sadlp_fns[4] = { 5939 NULL, gen_helper_sve2_sadalp_zpzz_h, 5940 gen_helper_sve2_sadalp_zpzz_s, gen_helper_sve2_sadalp_zpzz_d, 5941 }; 5942 TRANS_FEAT(SADALP_zpzz, aa64_sve2, gen_gvec_ool_arg_zpzz, 5943 sadlp_fns[a->esz], a, 0) 5944 5945 static gen_helper_gvec_4 * const uadlp_fns[4] = { 5946 NULL, gen_helper_sve2_uadalp_zpzz_h, 5947 gen_helper_sve2_uadalp_zpzz_s, gen_helper_sve2_uadalp_zpzz_d, 5948 }; 5949 TRANS_FEAT(UADALP_zpzz, aa64_sve2, gen_gvec_ool_arg_zpzz, 5950 uadlp_fns[a->esz], a, 0) 5951 5952 /* 5953 * SVE2 integer unary operations (predicated) 5954 */ 5955 5956 TRANS_FEAT(URECPE, aa64_sve2, gen_gvec_ool_arg_zpz, 5957 a->esz == 2 ? gen_helper_sve2_urecpe_s : NULL, a, 0) 5958 5959 TRANS_FEAT(URSQRTE, aa64_sve2, gen_gvec_ool_arg_zpz, 5960 a->esz == 2 ? gen_helper_sve2_ursqrte_s : NULL, a, 0) 5961 5962 static gen_helper_gvec_3 * const sqabs_fns[4] = { 5963 gen_helper_sve2_sqabs_b, gen_helper_sve2_sqabs_h, 5964 gen_helper_sve2_sqabs_s, gen_helper_sve2_sqabs_d, 5965 }; 5966 TRANS_FEAT(SQABS, aa64_sve2, gen_gvec_ool_arg_zpz, sqabs_fns[a->esz], a, 0) 5967 5968 static gen_helper_gvec_3 * const sqneg_fns[4] = { 5969 gen_helper_sve2_sqneg_b, gen_helper_sve2_sqneg_h, 5970 gen_helper_sve2_sqneg_s, gen_helper_sve2_sqneg_d, 5971 }; 5972 TRANS_FEAT(SQNEG, aa64_sve2, gen_gvec_ool_arg_zpz, sqneg_fns[a->esz], a, 0) 5973 5974 DO_ZPZZ(SQSHL, aa64_sve2, sve2_sqshl) 5975 DO_ZPZZ(SQRSHL, aa64_sve2, sve2_sqrshl) 5976 DO_ZPZZ(SRSHL, aa64_sve2, sve2_srshl) 5977 5978 DO_ZPZZ(UQSHL, aa64_sve2, sve2_uqshl) 5979 DO_ZPZZ(UQRSHL, aa64_sve2, sve2_uqrshl) 5980 DO_ZPZZ(URSHL, aa64_sve2, sve2_urshl) 5981 5982 DO_ZPZZ(SHADD, aa64_sve2, sve2_shadd) 5983 DO_ZPZZ(SRHADD, aa64_sve2, sve2_srhadd) 5984 DO_ZPZZ(SHSUB, aa64_sve2, sve2_shsub) 5985 5986 DO_ZPZZ(UHADD, aa64_sve2, sve2_uhadd) 5987 DO_ZPZZ(URHADD, aa64_sve2, sve2_urhadd) 5988 DO_ZPZZ(UHSUB, aa64_sve2, sve2_uhsub) 5989 5990 DO_ZPZZ(ADDP, aa64_sve2, sve2_addp) 5991 DO_ZPZZ(SMAXP, aa64_sve2, sve2_smaxp) 5992 DO_ZPZZ(UMAXP, aa64_sve2, sve2_umaxp) 5993 DO_ZPZZ(SMINP, aa64_sve2, sve2_sminp) 5994 DO_ZPZZ(UMINP, aa64_sve2, sve2_uminp) 5995 5996 DO_ZPZZ(SQADD_zpzz, aa64_sve2, sve2_sqadd) 5997 DO_ZPZZ(UQADD_zpzz, aa64_sve2, sve2_uqadd) 5998 DO_ZPZZ(SQSUB_zpzz, aa64_sve2, sve2_sqsub) 5999 DO_ZPZZ(UQSUB_zpzz, aa64_sve2, sve2_uqsub) 6000 DO_ZPZZ(SUQADD, aa64_sve2, sve2_suqadd) 6001 DO_ZPZZ(USQADD, aa64_sve2, sve2_usqadd) 6002 6003 /* 6004 * SVE2 Widening Integer Arithmetic 6005 */ 6006 6007 static gen_helper_gvec_3 * const saddl_fns[4] = { 6008 NULL, gen_helper_sve2_saddl_h, 6009 gen_helper_sve2_saddl_s, gen_helper_sve2_saddl_d, 6010 }; 6011 TRANS_FEAT(SADDLB, aa64_sve2, gen_gvec_ool_arg_zzz, 6012 saddl_fns[a->esz], a, 0) 6013 TRANS_FEAT(SADDLT, aa64_sve2, gen_gvec_ool_arg_zzz, 6014 saddl_fns[a->esz], a, 3) 6015 TRANS_FEAT(SADDLBT, aa64_sve2, gen_gvec_ool_arg_zzz, 6016 saddl_fns[a->esz], a, 2) 6017 6018 static gen_helper_gvec_3 * const ssubl_fns[4] = { 6019 NULL, gen_helper_sve2_ssubl_h, 6020 gen_helper_sve2_ssubl_s, gen_helper_sve2_ssubl_d, 6021 }; 6022 TRANS_FEAT(SSUBLB, aa64_sve2, gen_gvec_ool_arg_zzz, 6023 ssubl_fns[a->esz], a, 0) 6024 TRANS_FEAT(SSUBLT, aa64_sve2, gen_gvec_ool_arg_zzz, 6025 ssubl_fns[a->esz], a, 3) 6026 TRANS_FEAT(SSUBLBT, aa64_sve2, gen_gvec_ool_arg_zzz, 6027 ssubl_fns[a->esz], a, 2) 6028 TRANS_FEAT(SSUBLTB, aa64_sve2, gen_gvec_ool_arg_zzz, 6029 ssubl_fns[a->esz], a, 1) 6030 6031 static gen_helper_gvec_3 * const sabdl_fns[4] = { 6032 NULL, gen_helper_sve2_sabdl_h, 6033 gen_helper_sve2_sabdl_s, gen_helper_sve2_sabdl_d, 6034 }; 6035 TRANS_FEAT(SABDLB, aa64_sve2, gen_gvec_ool_arg_zzz, 6036 sabdl_fns[a->esz], a, 0) 6037 TRANS_FEAT(SABDLT, aa64_sve2, gen_gvec_ool_arg_zzz, 6038 sabdl_fns[a->esz], a, 3) 6039 6040 static gen_helper_gvec_3 * const uaddl_fns[4] = { 6041 NULL, gen_helper_sve2_uaddl_h, 6042 gen_helper_sve2_uaddl_s, gen_helper_sve2_uaddl_d, 6043 }; 6044 TRANS_FEAT(UADDLB, aa64_sve2, gen_gvec_ool_arg_zzz, 6045 uaddl_fns[a->esz], a, 0) 6046 TRANS_FEAT(UADDLT, aa64_sve2, gen_gvec_ool_arg_zzz, 6047 uaddl_fns[a->esz], a, 3) 6048 6049 static gen_helper_gvec_3 * const usubl_fns[4] = { 6050 NULL, gen_helper_sve2_usubl_h, 6051 gen_helper_sve2_usubl_s, gen_helper_sve2_usubl_d, 6052 }; 6053 TRANS_FEAT(USUBLB, aa64_sve2, gen_gvec_ool_arg_zzz, 6054 usubl_fns[a->esz], a, 0) 6055 TRANS_FEAT(USUBLT, aa64_sve2, gen_gvec_ool_arg_zzz, 6056 usubl_fns[a->esz], a, 3) 6057 6058 static gen_helper_gvec_3 * const uabdl_fns[4] = { 6059 NULL, gen_helper_sve2_uabdl_h, 6060 gen_helper_sve2_uabdl_s, gen_helper_sve2_uabdl_d, 6061 }; 6062 TRANS_FEAT(UABDLB, aa64_sve2, gen_gvec_ool_arg_zzz, 6063 uabdl_fns[a->esz], a, 0) 6064 TRANS_FEAT(UABDLT, aa64_sve2, gen_gvec_ool_arg_zzz, 6065 uabdl_fns[a->esz], a, 3) 6066 6067 static gen_helper_gvec_3 * const sqdmull_fns[4] = { 6068 NULL, gen_helper_sve2_sqdmull_zzz_h, 6069 gen_helper_sve2_sqdmull_zzz_s, gen_helper_sve2_sqdmull_zzz_d, 6070 }; 6071 TRANS_FEAT(SQDMULLB_zzz, aa64_sve2, gen_gvec_ool_arg_zzz, 6072 sqdmull_fns[a->esz], a, 0) 6073 TRANS_FEAT(SQDMULLT_zzz, aa64_sve2, gen_gvec_ool_arg_zzz, 6074 sqdmull_fns[a->esz], a, 3) 6075 6076 static gen_helper_gvec_3 * const smull_fns[4] = { 6077 NULL, gen_helper_sve2_smull_zzz_h, 6078 gen_helper_sve2_smull_zzz_s, gen_helper_sve2_smull_zzz_d, 6079 }; 6080 TRANS_FEAT(SMULLB_zzz, aa64_sve2, gen_gvec_ool_arg_zzz, 6081 smull_fns[a->esz], a, 0) 6082 TRANS_FEAT(SMULLT_zzz, aa64_sve2, gen_gvec_ool_arg_zzz, 6083 smull_fns[a->esz], a, 3) 6084 6085 static gen_helper_gvec_3 * const umull_fns[4] = { 6086 NULL, gen_helper_sve2_umull_zzz_h, 6087 gen_helper_sve2_umull_zzz_s, gen_helper_sve2_umull_zzz_d, 6088 }; 6089 TRANS_FEAT(UMULLB_zzz, aa64_sve2, gen_gvec_ool_arg_zzz, 6090 umull_fns[a->esz], a, 0) 6091 TRANS_FEAT(UMULLT_zzz, aa64_sve2, gen_gvec_ool_arg_zzz, 6092 umull_fns[a->esz], a, 3) 6093 6094 static gen_helper_gvec_3 * const eoril_fns[4] = { 6095 gen_helper_sve2_eoril_b, gen_helper_sve2_eoril_h, 6096 gen_helper_sve2_eoril_s, gen_helper_sve2_eoril_d, 6097 }; 6098 TRANS_FEAT(EORBT, aa64_sve2, gen_gvec_ool_arg_zzz, eoril_fns[a->esz], a, 2) 6099 TRANS_FEAT(EORTB, aa64_sve2, gen_gvec_ool_arg_zzz, eoril_fns[a->esz], a, 1) 6100 6101 static bool do_trans_pmull(DisasContext *s, arg_rrr_esz *a, bool sel) 6102 { 6103 static gen_helper_gvec_3 * const fns[4] = { 6104 gen_helper_gvec_pmull_q, gen_helper_sve2_pmull_h, 6105 NULL, gen_helper_sve2_pmull_d, 6106 }; 6107 6108 if (a->esz == 0) { 6109 if (!dc_isar_feature(aa64_sve2_pmull128, s)) { 6110 return false; 6111 } 6112 s->is_nonstreaming = true; 6113 } else if (!dc_isar_feature(aa64_sve, s)) { 6114 return false; 6115 } 6116 return gen_gvec_ool_arg_zzz(s, fns[a->esz], a, sel); 6117 } 6118 6119 TRANS_FEAT(PMULLB, aa64_sve2, do_trans_pmull, a, false) 6120 TRANS_FEAT(PMULLT, aa64_sve2, do_trans_pmull, a, true) 6121 6122 static gen_helper_gvec_3 * const saddw_fns[4] = { 6123 NULL, gen_helper_sve2_saddw_h, 6124 gen_helper_sve2_saddw_s, gen_helper_sve2_saddw_d, 6125 }; 6126 TRANS_FEAT(SADDWB, aa64_sve2, gen_gvec_ool_arg_zzz, saddw_fns[a->esz], a, 0) 6127 TRANS_FEAT(SADDWT, aa64_sve2, gen_gvec_ool_arg_zzz, saddw_fns[a->esz], a, 1) 6128 6129 static gen_helper_gvec_3 * const ssubw_fns[4] = { 6130 NULL, gen_helper_sve2_ssubw_h, 6131 gen_helper_sve2_ssubw_s, gen_helper_sve2_ssubw_d, 6132 }; 6133 TRANS_FEAT(SSUBWB, aa64_sve2, gen_gvec_ool_arg_zzz, ssubw_fns[a->esz], a, 0) 6134 TRANS_FEAT(SSUBWT, aa64_sve2, gen_gvec_ool_arg_zzz, ssubw_fns[a->esz], a, 1) 6135 6136 static gen_helper_gvec_3 * const uaddw_fns[4] = { 6137 NULL, gen_helper_sve2_uaddw_h, 6138 gen_helper_sve2_uaddw_s, gen_helper_sve2_uaddw_d, 6139 }; 6140 TRANS_FEAT(UADDWB, aa64_sve2, gen_gvec_ool_arg_zzz, uaddw_fns[a->esz], a, 0) 6141 TRANS_FEAT(UADDWT, aa64_sve2, gen_gvec_ool_arg_zzz, uaddw_fns[a->esz], a, 1) 6142 6143 static gen_helper_gvec_3 * const usubw_fns[4] = { 6144 NULL, gen_helper_sve2_usubw_h, 6145 gen_helper_sve2_usubw_s, gen_helper_sve2_usubw_d, 6146 }; 6147 TRANS_FEAT(USUBWB, aa64_sve2, gen_gvec_ool_arg_zzz, usubw_fns[a->esz], a, 0) 6148 TRANS_FEAT(USUBWT, aa64_sve2, gen_gvec_ool_arg_zzz, usubw_fns[a->esz], a, 1) 6149 6150 static void gen_sshll_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t imm) 6151 { 6152 int top = imm & 1; 6153 int shl = imm >> 1; 6154 int halfbits = 4 << vece; 6155 6156 if (top) { 6157 if (shl == halfbits) { 6158 tcg_gen_and_vec(vece, d, n, 6159 tcg_constant_vec_matching(d, vece, 6160 MAKE_64BIT_MASK(halfbits, halfbits))); 6161 } else { 6162 tcg_gen_sari_vec(vece, d, n, halfbits); 6163 tcg_gen_shli_vec(vece, d, d, shl); 6164 } 6165 } else { 6166 tcg_gen_shli_vec(vece, d, n, halfbits); 6167 tcg_gen_sari_vec(vece, d, d, halfbits - shl); 6168 } 6169 } 6170 6171 static void gen_ushll_i64(unsigned vece, TCGv_i64 d, TCGv_i64 n, int imm) 6172 { 6173 int halfbits = 4 << vece; 6174 int top = imm & 1; 6175 int shl = (imm >> 1); 6176 int shift; 6177 uint64_t mask; 6178 6179 mask = MAKE_64BIT_MASK(0, halfbits); 6180 mask <<= shl; 6181 mask = dup_const(vece, mask); 6182 6183 shift = shl - top * halfbits; 6184 if (shift < 0) { 6185 tcg_gen_shri_i64(d, n, -shift); 6186 } else { 6187 tcg_gen_shli_i64(d, n, shift); 6188 } 6189 tcg_gen_andi_i64(d, d, mask); 6190 } 6191 6192 static void gen_ushll16_i64(TCGv_i64 d, TCGv_i64 n, int64_t imm) 6193 { 6194 gen_ushll_i64(MO_16, d, n, imm); 6195 } 6196 6197 static void gen_ushll32_i64(TCGv_i64 d, TCGv_i64 n, int64_t imm) 6198 { 6199 gen_ushll_i64(MO_32, d, n, imm); 6200 } 6201 6202 static void gen_ushll64_i64(TCGv_i64 d, TCGv_i64 n, int64_t imm) 6203 { 6204 gen_ushll_i64(MO_64, d, n, imm); 6205 } 6206 6207 static void gen_ushll_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t imm) 6208 { 6209 int halfbits = 4 << vece; 6210 int top = imm & 1; 6211 int shl = imm >> 1; 6212 6213 if (top) { 6214 if (shl == halfbits) { 6215 tcg_gen_and_vec(vece, d, n, 6216 tcg_constant_vec_matching(d, vece, 6217 MAKE_64BIT_MASK(halfbits, halfbits))); 6218 } else { 6219 tcg_gen_shri_vec(vece, d, n, halfbits); 6220 tcg_gen_shli_vec(vece, d, d, shl); 6221 } 6222 } else { 6223 if (shl == 0) { 6224 tcg_gen_and_vec(vece, d, n, 6225 tcg_constant_vec_matching(d, vece, 6226 MAKE_64BIT_MASK(0, halfbits))); 6227 } else { 6228 tcg_gen_shli_vec(vece, d, n, halfbits); 6229 tcg_gen_shri_vec(vece, d, d, halfbits - shl); 6230 } 6231 } 6232 } 6233 6234 static bool do_shll_tb(DisasContext *s, arg_rri_esz *a, 6235 const GVecGen2i ops[3], bool sel) 6236 { 6237 6238 if (a->esz < 0 || a->esz > 2) { 6239 return false; 6240 } 6241 if (sve_access_check(s)) { 6242 unsigned vsz = vec_full_reg_size(s); 6243 tcg_gen_gvec_2i(vec_full_reg_offset(s, a->rd), 6244 vec_full_reg_offset(s, a->rn), 6245 vsz, vsz, (a->imm << 1) | sel, 6246 &ops[a->esz]); 6247 } 6248 return true; 6249 } 6250 6251 static const TCGOpcode sshll_list[] = { 6252 INDEX_op_shli_vec, INDEX_op_sari_vec, 0 6253 }; 6254 static const GVecGen2i sshll_ops[3] = { 6255 { .fniv = gen_sshll_vec, 6256 .opt_opc = sshll_list, 6257 .fno = gen_helper_sve2_sshll_h, 6258 .vece = MO_16 }, 6259 { .fniv = gen_sshll_vec, 6260 .opt_opc = sshll_list, 6261 .fno = gen_helper_sve2_sshll_s, 6262 .vece = MO_32 }, 6263 { .fniv = gen_sshll_vec, 6264 .opt_opc = sshll_list, 6265 .fno = gen_helper_sve2_sshll_d, 6266 .vece = MO_64 } 6267 }; 6268 TRANS_FEAT(SSHLLB, aa64_sve2, do_shll_tb, a, sshll_ops, false) 6269 TRANS_FEAT(SSHLLT, aa64_sve2, do_shll_tb, a, sshll_ops, true) 6270 6271 static const TCGOpcode ushll_list[] = { 6272 INDEX_op_shli_vec, INDEX_op_shri_vec, 0 6273 }; 6274 static const GVecGen2i ushll_ops[3] = { 6275 { .fni8 = gen_ushll16_i64, 6276 .fniv = gen_ushll_vec, 6277 .opt_opc = ushll_list, 6278 .fno = gen_helper_sve2_ushll_h, 6279 .vece = MO_16 }, 6280 { .fni8 = gen_ushll32_i64, 6281 .fniv = gen_ushll_vec, 6282 .opt_opc = ushll_list, 6283 .fno = gen_helper_sve2_ushll_s, 6284 .vece = MO_32 }, 6285 { .fni8 = gen_ushll64_i64, 6286 .fniv = gen_ushll_vec, 6287 .opt_opc = ushll_list, 6288 .fno = gen_helper_sve2_ushll_d, 6289 .vece = MO_64 }, 6290 }; 6291 TRANS_FEAT(USHLLB, aa64_sve2, do_shll_tb, a, ushll_ops, false) 6292 TRANS_FEAT(USHLLT, aa64_sve2, do_shll_tb, a, ushll_ops, true) 6293 6294 static gen_helper_gvec_3 * const bext_fns[4] = { 6295 gen_helper_sve2_bext_b, gen_helper_sve2_bext_h, 6296 gen_helper_sve2_bext_s, gen_helper_sve2_bext_d, 6297 }; 6298 TRANS_FEAT_NONSTREAMING(BEXT, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz, 6299 bext_fns[a->esz], a, 0) 6300 6301 static gen_helper_gvec_3 * const bdep_fns[4] = { 6302 gen_helper_sve2_bdep_b, gen_helper_sve2_bdep_h, 6303 gen_helper_sve2_bdep_s, gen_helper_sve2_bdep_d, 6304 }; 6305 TRANS_FEAT_NONSTREAMING(BDEP, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz, 6306 bdep_fns[a->esz], a, 0) 6307 6308 static gen_helper_gvec_3 * const bgrp_fns[4] = { 6309 gen_helper_sve2_bgrp_b, gen_helper_sve2_bgrp_h, 6310 gen_helper_sve2_bgrp_s, gen_helper_sve2_bgrp_d, 6311 }; 6312 TRANS_FEAT_NONSTREAMING(BGRP, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz, 6313 bgrp_fns[a->esz], a, 0) 6314 6315 static gen_helper_gvec_3 * const cadd_fns[4] = { 6316 gen_helper_sve2_cadd_b, gen_helper_sve2_cadd_h, 6317 gen_helper_sve2_cadd_s, gen_helper_sve2_cadd_d, 6318 }; 6319 TRANS_FEAT(CADD_rot90, aa64_sve2, gen_gvec_ool_arg_zzz, 6320 cadd_fns[a->esz], a, 0) 6321 TRANS_FEAT(CADD_rot270, aa64_sve2, gen_gvec_ool_arg_zzz, 6322 cadd_fns[a->esz], a, 1) 6323 6324 static gen_helper_gvec_3 * const sqcadd_fns[4] = { 6325 gen_helper_sve2_sqcadd_b, gen_helper_sve2_sqcadd_h, 6326 gen_helper_sve2_sqcadd_s, gen_helper_sve2_sqcadd_d, 6327 }; 6328 TRANS_FEAT(SQCADD_rot90, aa64_sve2, gen_gvec_ool_arg_zzz, 6329 sqcadd_fns[a->esz], a, 0) 6330 TRANS_FEAT(SQCADD_rot270, aa64_sve2, gen_gvec_ool_arg_zzz, 6331 sqcadd_fns[a->esz], a, 1) 6332 6333 static gen_helper_gvec_4 * const sabal_fns[4] = { 6334 NULL, gen_helper_sve2_sabal_h, 6335 gen_helper_sve2_sabal_s, gen_helper_sve2_sabal_d, 6336 }; 6337 TRANS_FEAT(SABALB, aa64_sve2, gen_gvec_ool_arg_zzzz, sabal_fns[a->esz], a, 0) 6338 TRANS_FEAT(SABALT, aa64_sve2, gen_gvec_ool_arg_zzzz, sabal_fns[a->esz], a, 1) 6339 6340 static gen_helper_gvec_4 * const uabal_fns[4] = { 6341 NULL, gen_helper_sve2_uabal_h, 6342 gen_helper_sve2_uabal_s, gen_helper_sve2_uabal_d, 6343 }; 6344 TRANS_FEAT(UABALB, aa64_sve2, gen_gvec_ool_arg_zzzz, uabal_fns[a->esz], a, 0) 6345 TRANS_FEAT(UABALT, aa64_sve2, gen_gvec_ool_arg_zzzz, uabal_fns[a->esz], a, 1) 6346 6347 static bool do_adcl(DisasContext *s, arg_rrrr_esz *a, bool sel) 6348 { 6349 static gen_helper_gvec_4 * const fns[2] = { 6350 gen_helper_sve2_adcl_s, 6351 gen_helper_sve2_adcl_d, 6352 }; 6353 /* 6354 * Note that in this case the ESZ field encodes both size and sign. 6355 * Split out 'subtract' into bit 1 of the data field for the helper. 6356 */ 6357 return gen_gvec_ool_arg_zzzz(s, fns[a->esz & 1], a, (a->esz & 2) | sel); 6358 } 6359 6360 TRANS_FEAT(ADCLB, aa64_sve2, do_adcl, a, false) 6361 TRANS_FEAT(ADCLT, aa64_sve2, do_adcl, a, true) 6362 6363 TRANS_FEAT(SSRA, aa64_sve2, gen_gvec_fn_arg_zzi, gen_gvec_ssra, a) 6364 TRANS_FEAT(USRA, aa64_sve2, gen_gvec_fn_arg_zzi, gen_gvec_usra, a) 6365 TRANS_FEAT(SRSRA, aa64_sve2, gen_gvec_fn_arg_zzi, gen_gvec_srsra, a) 6366 TRANS_FEAT(URSRA, aa64_sve2, gen_gvec_fn_arg_zzi, gen_gvec_ursra, a) 6367 TRANS_FEAT(SRI, aa64_sve2, gen_gvec_fn_arg_zzi, gen_gvec_sri, a) 6368 TRANS_FEAT(SLI, aa64_sve2, gen_gvec_fn_arg_zzi, gen_gvec_sli, a) 6369 6370 TRANS_FEAT(SABA, aa64_sve2, gen_gvec_fn_arg_zzz, gen_gvec_saba, a) 6371 TRANS_FEAT(UABA, aa64_sve2, gen_gvec_fn_arg_zzz, gen_gvec_uaba, a) 6372 6373 static bool do_narrow_extract(DisasContext *s, arg_rri_esz *a, 6374 const GVecGen2 ops[3]) 6375 { 6376 if (a->esz < 0 || a->esz > MO_32 || a->imm != 0) { 6377 return false; 6378 } 6379 if (sve_access_check(s)) { 6380 unsigned vsz = vec_full_reg_size(s); 6381 tcg_gen_gvec_2(vec_full_reg_offset(s, a->rd), 6382 vec_full_reg_offset(s, a->rn), 6383 vsz, vsz, &ops[a->esz]); 6384 } 6385 return true; 6386 } 6387 6388 static const TCGOpcode sqxtn_list[] = { 6389 INDEX_op_shli_vec, INDEX_op_smin_vec, INDEX_op_smax_vec, 0 6390 }; 6391 6392 static void gen_sqxtnb_vec(unsigned vece, TCGv_vec d, TCGv_vec n) 6393 { 6394 int halfbits = 4 << vece; 6395 int64_t mask = (1ull << halfbits) - 1; 6396 int64_t min = -1ull << (halfbits - 1); 6397 int64_t max = -min - 1; 6398 6399 tcg_gen_smax_vec(vece, d, n, tcg_constant_vec_matching(d, vece, min)); 6400 tcg_gen_smin_vec(vece, d, d, tcg_constant_vec_matching(d, vece, max)); 6401 tcg_gen_and_vec(vece, d, d, tcg_constant_vec_matching(d, vece, mask)); 6402 } 6403 6404 static const GVecGen2 sqxtnb_ops[3] = { 6405 { .fniv = gen_sqxtnb_vec, 6406 .opt_opc = sqxtn_list, 6407 .fno = gen_helper_sve2_sqxtnb_h, 6408 .vece = MO_16 }, 6409 { .fniv = gen_sqxtnb_vec, 6410 .opt_opc = sqxtn_list, 6411 .fno = gen_helper_sve2_sqxtnb_s, 6412 .vece = MO_32 }, 6413 { .fniv = gen_sqxtnb_vec, 6414 .opt_opc = sqxtn_list, 6415 .fno = gen_helper_sve2_sqxtnb_d, 6416 .vece = MO_64 }, 6417 }; 6418 TRANS_FEAT(SQXTNB, aa64_sve2, do_narrow_extract, a, sqxtnb_ops) 6419 6420 static void gen_sqxtnt_vec(unsigned vece, TCGv_vec d, TCGv_vec n) 6421 { 6422 int halfbits = 4 << vece; 6423 int64_t mask = (1ull << halfbits) - 1; 6424 int64_t min = -1ull << (halfbits - 1); 6425 int64_t max = -min - 1; 6426 6427 tcg_gen_smax_vec(vece, n, n, tcg_constant_vec_matching(d, vece, min)); 6428 tcg_gen_smin_vec(vece, n, n, tcg_constant_vec_matching(d, vece, max)); 6429 tcg_gen_shli_vec(vece, n, n, halfbits); 6430 tcg_gen_bitsel_vec(vece, d, tcg_constant_vec_matching(d, vece, mask), d, n); 6431 } 6432 6433 static const GVecGen2 sqxtnt_ops[3] = { 6434 { .fniv = gen_sqxtnt_vec, 6435 .opt_opc = sqxtn_list, 6436 .load_dest = true, 6437 .fno = gen_helper_sve2_sqxtnt_h, 6438 .vece = MO_16 }, 6439 { .fniv = gen_sqxtnt_vec, 6440 .opt_opc = sqxtn_list, 6441 .load_dest = true, 6442 .fno = gen_helper_sve2_sqxtnt_s, 6443 .vece = MO_32 }, 6444 { .fniv = gen_sqxtnt_vec, 6445 .opt_opc = sqxtn_list, 6446 .load_dest = true, 6447 .fno = gen_helper_sve2_sqxtnt_d, 6448 .vece = MO_64 }, 6449 }; 6450 TRANS_FEAT(SQXTNT, aa64_sve2, do_narrow_extract, a, sqxtnt_ops) 6451 6452 static const TCGOpcode uqxtn_list[] = { 6453 INDEX_op_shli_vec, INDEX_op_umin_vec, 0 6454 }; 6455 6456 static void gen_uqxtnb_vec(unsigned vece, TCGv_vec d, TCGv_vec n) 6457 { 6458 int halfbits = 4 << vece; 6459 int64_t max = (1ull << halfbits) - 1; 6460 6461 tcg_gen_umin_vec(vece, d, n, tcg_constant_vec_matching(d, vece, max)); 6462 } 6463 6464 static const GVecGen2 uqxtnb_ops[3] = { 6465 { .fniv = gen_uqxtnb_vec, 6466 .opt_opc = uqxtn_list, 6467 .fno = gen_helper_sve2_uqxtnb_h, 6468 .vece = MO_16 }, 6469 { .fniv = gen_uqxtnb_vec, 6470 .opt_opc = uqxtn_list, 6471 .fno = gen_helper_sve2_uqxtnb_s, 6472 .vece = MO_32 }, 6473 { .fniv = gen_uqxtnb_vec, 6474 .opt_opc = uqxtn_list, 6475 .fno = gen_helper_sve2_uqxtnb_d, 6476 .vece = MO_64 }, 6477 }; 6478 TRANS_FEAT(UQXTNB, aa64_sve2, do_narrow_extract, a, uqxtnb_ops) 6479 6480 static void gen_uqxtnt_vec(unsigned vece, TCGv_vec d, TCGv_vec n) 6481 { 6482 int halfbits = 4 << vece; 6483 int64_t max = (1ull << halfbits) - 1; 6484 TCGv_vec maxv = tcg_constant_vec_matching(d, vece, max); 6485 6486 tcg_gen_umin_vec(vece, n, n, maxv); 6487 tcg_gen_shli_vec(vece, n, n, halfbits); 6488 tcg_gen_bitsel_vec(vece, d, maxv, d, n); 6489 } 6490 6491 static const GVecGen2 uqxtnt_ops[3] = { 6492 { .fniv = gen_uqxtnt_vec, 6493 .opt_opc = uqxtn_list, 6494 .load_dest = true, 6495 .fno = gen_helper_sve2_uqxtnt_h, 6496 .vece = MO_16 }, 6497 { .fniv = gen_uqxtnt_vec, 6498 .opt_opc = uqxtn_list, 6499 .load_dest = true, 6500 .fno = gen_helper_sve2_uqxtnt_s, 6501 .vece = MO_32 }, 6502 { .fniv = gen_uqxtnt_vec, 6503 .opt_opc = uqxtn_list, 6504 .load_dest = true, 6505 .fno = gen_helper_sve2_uqxtnt_d, 6506 .vece = MO_64 }, 6507 }; 6508 TRANS_FEAT(UQXTNT, aa64_sve2, do_narrow_extract, a, uqxtnt_ops) 6509 6510 static const TCGOpcode sqxtun_list[] = { 6511 INDEX_op_shli_vec, INDEX_op_umin_vec, INDEX_op_smax_vec, 0 6512 }; 6513 6514 static void gen_sqxtunb_vec(unsigned vece, TCGv_vec d, TCGv_vec n) 6515 { 6516 int halfbits = 4 << vece; 6517 int64_t max = (1ull << halfbits) - 1; 6518 6519 tcg_gen_smax_vec(vece, d, n, tcg_constant_vec_matching(d, vece, 0)); 6520 tcg_gen_umin_vec(vece, d, d, tcg_constant_vec_matching(d, vece, max)); 6521 } 6522 6523 static const GVecGen2 sqxtunb_ops[3] = { 6524 { .fniv = gen_sqxtunb_vec, 6525 .opt_opc = sqxtun_list, 6526 .fno = gen_helper_sve2_sqxtunb_h, 6527 .vece = MO_16 }, 6528 { .fniv = gen_sqxtunb_vec, 6529 .opt_opc = sqxtun_list, 6530 .fno = gen_helper_sve2_sqxtunb_s, 6531 .vece = MO_32 }, 6532 { .fniv = gen_sqxtunb_vec, 6533 .opt_opc = sqxtun_list, 6534 .fno = gen_helper_sve2_sqxtunb_d, 6535 .vece = MO_64 }, 6536 }; 6537 TRANS_FEAT(SQXTUNB, aa64_sve2, do_narrow_extract, a, sqxtunb_ops) 6538 6539 static void gen_sqxtunt_vec(unsigned vece, TCGv_vec d, TCGv_vec n) 6540 { 6541 int halfbits = 4 << vece; 6542 int64_t max = (1ull << halfbits) - 1; 6543 TCGv_vec maxv = tcg_constant_vec_matching(d, vece, max); 6544 6545 tcg_gen_smax_vec(vece, n, n, tcg_constant_vec_matching(d, vece, 0)); 6546 tcg_gen_umin_vec(vece, n, n, maxv); 6547 tcg_gen_shli_vec(vece, n, n, halfbits); 6548 tcg_gen_bitsel_vec(vece, d, maxv, d, n); 6549 } 6550 6551 static const GVecGen2 sqxtunt_ops[3] = { 6552 { .fniv = gen_sqxtunt_vec, 6553 .opt_opc = sqxtun_list, 6554 .load_dest = true, 6555 .fno = gen_helper_sve2_sqxtunt_h, 6556 .vece = MO_16 }, 6557 { .fniv = gen_sqxtunt_vec, 6558 .opt_opc = sqxtun_list, 6559 .load_dest = true, 6560 .fno = gen_helper_sve2_sqxtunt_s, 6561 .vece = MO_32 }, 6562 { .fniv = gen_sqxtunt_vec, 6563 .opt_opc = sqxtun_list, 6564 .load_dest = true, 6565 .fno = gen_helper_sve2_sqxtunt_d, 6566 .vece = MO_64 }, 6567 }; 6568 TRANS_FEAT(SQXTUNT, aa64_sve2, do_narrow_extract, a, sqxtunt_ops) 6569 6570 static bool do_shr_narrow(DisasContext *s, arg_rri_esz *a, 6571 const GVecGen2i ops[3]) 6572 { 6573 if (a->esz < 0 || a->esz > MO_32) { 6574 return false; 6575 } 6576 assert(a->imm > 0 && a->imm <= (8 << a->esz)); 6577 if (sve_access_check(s)) { 6578 unsigned vsz = vec_full_reg_size(s); 6579 tcg_gen_gvec_2i(vec_full_reg_offset(s, a->rd), 6580 vec_full_reg_offset(s, a->rn), 6581 vsz, vsz, a->imm, &ops[a->esz]); 6582 } 6583 return true; 6584 } 6585 6586 static void gen_shrnb_i64(unsigned vece, TCGv_i64 d, TCGv_i64 n, int shr) 6587 { 6588 int halfbits = 4 << vece; 6589 uint64_t mask = dup_const(vece, MAKE_64BIT_MASK(0, halfbits)); 6590 6591 tcg_gen_shri_i64(d, n, shr); 6592 tcg_gen_andi_i64(d, d, mask); 6593 } 6594 6595 static void gen_shrnb16_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr) 6596 { 6597 gen_shrnb_i64(MO_16, d, n, shr); 6598 } 6599 6600 static void gen_shrnb32_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr) 6601 { 6602 gen_shrnb_i64(MO_32, d, n, shr); 6603 } 6604 6605 static void gen_shrnb64_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr) 6606 { 6607 gen_shrnb_i64(MO_64, d, n, shr); 6608 } 6609 6610 static void gen_shrnb_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t shr) 6611 { 6612 int halfbits = 4 << vece; 6613 uint64_t mask = MAKE_64BIT_MASK(0, halfbits); 6614 6615 tcg_gen_shri_vec(vece, n, n, shr); 6616 tcg_gen_and_vec(vece, d, n, tcg_constant_vec_matching(d, vece, mask)); 6617 } 6618 6619 static const TCGOpcode shrnb_vec_list[] = { INDEX_op_shri_vec, 0 }; 6620 static const GVecGen2i shrnb_ops[3] = { 6621 { .fni8 = gen_shrnb16_i64, 6622 .fniv = gen_shrnb_vec, 6623 .opt_opc = shrnb_vec_list, 6624 .fno = gen_helper_sve2_shrnb_h, 6625 .vece = MO_16 }, 6626 { .fni8 = gen_shrnb32_i64, 6627 .fniv = gen_shrnb_vec, 6628 .opt_opc = shrnb_vec_list, 6629 .fno = gen_helper_sve2_shrnb_s, 6630 .vece = MO_32 }, 6631 { .fni8 = gen_shrnb64_i64, 6632 .fniv = gen_shrnb_vec, 6633 .opt_opc = shrnb_vec_list, 6634 .fno = gen_helper_sve2_shrnb_d, 6635 .vece = MO_64 }, 6636 }; 6637 TRANS_FEAT(SHRNB, aa64_sve2, do_shr_narrow, a, shrnb_ops) 6638 6639 static void gen_shrnt_i64(unsigned vece, TCGv_i64 d, TCGv_i64 n, int shr) 6640 { 6641 int halfbits = 4 << vece; 6642 uint64_t mask = dup_const(vece, MAKE_64BIT_MASK(0, halfbits)); 6643 6644 tcg_gen_shli_i64(n, n, halfbits - shr); 6645 tcg_gen_andi_i64(n, n, ~mask); 6646 tcg_gen_andi_i64(d, d, mask); 6647 tcg_gen_or_i64(d, d, n); 6648 } 6649 6650 static void gen_shrnt16_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr) 6651 { 6652 gen_shrnt_i64(MO_16, d, n, shr); 6653 } 6654 6655 static void gen_shrnt32_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr) 6656 { 6657 gen_shrnt_i64(MO_32, d, n, shr); 6658 } 6659 6660 static void gen_shrnt64_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr) 6661 { 6662 tcg_gen_shri_i64(n, n, shr); 6663 tcg_gen_deposit_i64(d, d, n, 32, 32); 6664 } 6665 6666 static void gen_shrnt_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t shr) 6667 { 6668 int halfbits = 4 << vece; 6669 uint64_t mask = MAKE_64BIT_MASK(0, halfbits); 6670 6671 tcg_gen_shli_vec(vece, n, n, halfbits - shr); 6672 tcg_gen_bitsel_vec(vece, d, tcg_constant_vec_matching(d, vece, mask), d, n); 6673 } 6674 6675 static const TCGOpcode shrnt_vec_list[] = { INDEX_op_shli_vec, 0 }; 6676 static const GVecGen2i shrnt_ops[3] = { 6677 { .fni8 = gen_shrnt16_i64, 6678 .fniv = gen_shrnt_vec, 6679 .opt_opc = shrnt_vec_list, 6680 .load_dest = true, 6681 .fno = gen_helper_sve2_shrnt_h, 6682 .vece = MO_16 }, 6683 { .fni8 = gen_shrnt32_i64, 6684 .fniv = gen_shrnt_vec, 6685 .opt_opc = shrnt_vec_list, 6686 .load_dest = true, 6687 .fno = gen_helper_sve2_shrnt_s, 6688 .vece = MO_32 }, 6689 { .fni8 = gen_shrnt64_i64, 6690 .fniv = gen_shrnt_vec, 6691 .opt_opc = shrnt_vec_list, 6692 .load_dest = true, 6693 .fno = gen_helper_sve2_shrnt_d, 6694 .vece = MO_64 }, 6695 }; 6696 TRANS_FEAT(SHRNT, aa64_sve2, do_shr_narrow, a, shrnt_ops) 6697 6698 static const GVecGen2i rshrnb_ops[3] = { 6699 { .fno = gen_helper_sve2_rshrnb_h }, 6700 { .fno = gen_helper_sve2_rshrnb_s }, 6701 { .fno = gen_helper_sve2_rshrnb_d }, 6702 }; 6703 TRANS_FEAT(RSHRNB, aa64_sve2, do_shr_narrow, a, rshrnb_ops) 6704 6705 static const GVecGen2i rshrnt_ops[3] = { 6706 { .fno = gen_helper_sve2_rshrnt_h }, 6707 { .fno = gen_helper_sve2_rshrnt_s }, 6708 { .fno = gen_helper_sve2_rshrnt_d }, 6709 }; 6710 TRANS_FEAT(RSHRNT, aa64_sve2, do_shr_narrow, a, rshrnt_ops) 6711 6712 static void gen_sqshrunb_vec(unsigned vece, TCGv_vec d, 6713 TCGv_vec n, int64_t shr) 6714 { 6715 int halfbits = 4 << vece; 6716 uint64_t max = MAKE_64BIT_MASK(0, halfbits); 6717 6718 tcg_gen_sari_vec(vece, n, n, shr); 6719 tcg_gen_smax_vec(vece, n, n, tcg_constant_vec_matching(d, vece, 0)); 6720 tcg_gen_umin_vec(vece, d, n, tcg_constant_vec_matching(d, vece, max)); 6721 } 6722 6723 static const TCGOpcode sqshrunb_vec_list[] = { 6724 INDEX_op_sari_vec, INDEX_op_smax_vec, INDEX_op_umin_vec, 0 6725 }; 6726 static const GVecGen2i sqshrunb_ops[3] = { 6727 { .fniv = gen_sqshrunb_vec, 6728 .opt_opc = sqshrunb_vec_list, 6729 .fno = gen_helper_sve2_sqshrunb_h, 6730 .vece = MO_16 }, 6731 { .fniv = gen_sqshrunb_vec, 6732 .opt_opc = sqshrunb_vec_list, 6733 .fno = gen_helper_sve2_sqshrunb_s, 6734 .vece = MO_32 }, 6735 { .fniv = gen_sqshrunb_vec, 6736 .opt_opc = sqshrunb_vec_list, 6737 .fno = gen_helper_sve2_sqshrunb_d, 6738 .vece = MO_64 }, 6739 }; 6740 TRANS_FEAT(SQSHRUNB, aa64_sve2, do_shr_narrow, a, sqshrunb_ops) 6741 6742 static void gen_sqshrunt_vec(unsigned vece, TCGv_vec d, 6743 TCGv_vec n, int64_t shr) 6744 { 6745 int halfbits = 4 << vece; 6746 uint64_t max = MAKE_64BIT_MASK(0, halfbits); 6747 TCGv_vec maxv = tcg_constant_vec_matching(d, vece, max); 6748 6749 tcg_gen_sari_vec(vece, n, n, shr); 6750 tcg_gen_smax_vec(vece, n, n, tcg_constant_vec_matching(d, vece, 0)); 6751 tcg_gen_umin_vec(vece, n, n, maxv); 6752 tcg_gen_shli_vec(vece, n, n, halfbits); 6753 tcg_gen_bitsel_vec(vece, d, maxv, d, n); 6754 } 6755 6756 static const TCGOpcode sqshrunt_vec_list[] = { 6757 INDEX_op_shli_vec, INDEX_op_sari_vec, 6758 INDEX_op_smax_vec, INDEX_op_umin_vec, 0 6759 }; 6760 static const GVecGen2i sqshrunt_ops[3] = { 6761 { .fniv = gen_sqshrunt_vec, 6762 .opt_opc = sqshrunt_vec_list, 6763 .load_dest = true, 6764 .fno = gen_helper_sve2_sqshrunt_h, 6765 .vece = MO_16 }, 6766 { .fniv = gen_sqshrunt_vec, 6767 .opt_opc = sqshrunt_vec_list, 6768 .load_dest = true, 6769 .fno = gen_helper_sve2_sqshrunt_s, 6770 .vece = MO_32 }, 6771 { .fniv = gen_sqshrunt_vec, 6772 .opt_opc = sqshrunt_vec_list, 6773 .load_dest = true, 6774 .fno = gen_helper_sve2_sqshrunt_d, 6775 .vece = MO_64 }, 6776 }; 6777 TRANS_FEAT(SQSHRUNT, aa64_sve2, do_shr_narrow, a, sqshrunt_ops) 6778 6779 static const GVecGen2i sqrshrunb_ops[3] = { 6780 { .fno = gen_helper_sve2_sqrshrunb_h }, 6781 { .fno = gen_helper_sve2_sqrshrunb_s }, 6782 { .fno = gen_helper_sve2_sqrshrunb_d }, 6783 }; 6784 TRANS_FEAT(SQRSHRUNB, aa64_sve2, do_shr_narrow, a, sqrshrunb_ops) 6785 6786 static const GVecGen2i sqrshrunt_ops[3] = { 6787 { .fno = gen_helper_sve2_sqrshrunt_h }, 6788 { .fno = gen_helper_sve2_sqrshrunt_s }, 6789 { .fno = gen_helper_sve2_sqrshrunt_d }, 6790 }; 6791 TRANS_FEAT(SQRSHRUNT, aa64_sve2, do_shr_narrow, a, sqrshrunt_ops) 6792 6793 static void gen_sqshrnb_vec(unsigned vece, TCGv_vec d, 6794 TCGv_vec n, int64_t shr) 6795 { 6796 int halfbits = 4 << vece; 6797 int64_t max = MAKE_64BIT_MASK(0, halfbits - 1); 6798 int64_t min = -max - 1; 6799 int64_t mask = MAKE_64BIT_MASK(0, halfbits); 6800 6801 tcg_gen_sari_vec(vece, n, n, shr); 6802 tcg_gen_smax_vec(vece, n, n, tcg_constant_vec_matching(d, vece, min)); 6803 tcg_gen_smin_vec(vece, n, n, tcg_constant_vec_matching(d, vece, max)); 6804 tcg_gen_and_vec(vece, d, n, tcg_constant_vec_matching(d, vece, mask)); 6805 } 6806 6807 static const TCGOpcode sqshrnb_vec_list[] = { 6808 INDEX_op_sari_vec, INDEX_op_smax_vec, INDEX_op_smin_vec, 0 6809 }; 6810 static const GVecGen2i sqshrnb_ops[3] = { 6811 { .fniv = gen_sqshrnb_vec, 6812 .opt_opc = sqshrnb_vec_list, 6813 .fno = gen_helper_sve2_sqshrnb_h, 6814 .vece = MO_16 }, 6815 { .fniv = gen_sqshrnb_vec, 6816 .opt_opc = sqshrnb_vec_list, 6817 .fno = gen_helper_sve2_sqshrnb_s, 6818 .vece = MO_32 }, 6819 { .fniv = gen_sqshrnb_vec, 6820 .opt_opc = sqshrnb_vec_list, 6821 .fno = gen_helper_sve2_sqshrnb_d, 6822 .vece = MO_64 }, 6823 }; 6824 TRANS_FEAT(SQSHRNB, aa64_sve2, do_shr_narrow, a, sqshrnb_ops) 6825 6826 static void gen_sqshrnt_vec(unsigned vece, TCGv_vec d, 6827 TCGv_vec n, int64_t shr) 6828 { 6829 int halfbits = 4 << vece; 6830 int64_t max = MAKE_64BIT_MASK(0, halfbits - 1); 6831 int64_t min = -max - 1; 6832 int64_t mask = MAKE_64BIT_MASK(0, halfbits); 6833 6834 tcg_gen_sari_vec(vece, n, n, shr); 6835 tcg_gen_smax_vec(vece, n, n, tcg_constant_vec_matching(d, vece, min)); 6836 tcg_gen_smin_vec(vece, n, n, tcg_constant_vec_matching(d, vece, max)); 6837 tcg_gen_shli_vec(vece, n, n, halfbits); 6838 tcg_gen_bitsel_vec(vece, d, tcg_constant_vec_matching(d, vece, mask), d, n); 6839 } 6840 6841 static const TCGOpcode sqshrnt_vec_list[] = { 6842 INDEX_op_shli_vec, INDEX_op_sari_vec, 6843 INDEX_op_smax_vec, INDEX_op_smin_vec, 0 6844 }; 6845 static const GVecGen2i sqshrnt_ops[3] = { 6846 { .fniv = gen_sqshrnt_vec, 6847 .opt_opc = sqshrnt_vec_list, 6848 .load_dest = true, 6849 .fno = gen_helper_sve2_sqshrnt_h, 6850 .vece = MO_16 }, 6851 { .fniv = gen_sqshrnt_vec, 6852 .opt_opc = sqshrnt_vec_list, 6853 .load_dest = true, 6854 .fno = gen_helper_sve2_sqshrnt_s, 6855 .vece = MO_32 }, 6856 { .fniv = gen_sqshrnt_vec, 6857 .opt_opc = sqshrnt_vec_list, 6858 .load_dest = true, 6859 .fno = gen_helper_sve2_sqshrnt_d, 6860 .vece = MO_64 }, 6861 }; 6862 TRANS_FEAT(SQSHRNT, aa64_sve2, do_shr_narrow, a, sqshrnt_ops) 6863 6864 static const GVecGen2i sqrshrnb_ops[3] = { 6865 { .fno = gen_helper_sve2_sqrshrnb_h }, 6866 { .fno = gen_helper_sve2_sqrshrnb_s }, 6867 { .fno = gen_helper_sve2_sqrshrnb_d }, 6868 }; 6869 TRANS_FEAT(SQRSHRNB, aa64_sve2, do_shr_narrow, a, sqrshrnb_ops) 6870 6871 static const GVecGen2i sqrshrnt_ops[3] = { 6872 { .fno = gen_helper_sve2_sqrshrnt_h }, 6873 { .fno = gen_helper_sve2_sqrshrnt_s }, 6874 { .fno = gen_helper_sve2_sqrshrnt_d }, 6875 }; 6876 TRANS_FEAT(SQRSHRNT, aa64_sve2, do_shr_narrow, a, sqrshrnt_ops) 6877 6878 static void gen_uqshrnb_vec(unsigned vece, TCGv_vec d, 6879 TCGv_vec n, int64_t shr) 6880 { 6881 int halfbits = 4 << vece; 6882 int64_t max = MAKE_64BIT_MASK(0, halfbits); 6883 6884 tcg_gen_shri_vec(vece, n, n, shr); 6885 tcg_gen_umin_vec(vece, d, n, tcg_constant_vec_matching(d, vece, max)); 6886 } 6887 6888 static const TCGOpcode uqshrnb_vec_list[] = { 6889 INDEX_op_shri_vec, INDEX_op_umin_vec, 0 6890 }; 6891 static const GVecGen2i uqshrnb_ops[3] = { 6892 { .fniv = gen_uqshrnb_vec, 6893 .opt_opc = uqshrnb_vec_list, 6894 .fno = gen_helper_sve2_uqshrnb_h, 6895 .vece = MO_16 }, 6896 { .fniv = gen_uqshrnb_vec, 6897 .opt_opc = uqshrnb_vec_list, 6898 .fno = gen_helper_sve2_uqshrnb_s, 6899 .vece = MO_32 }, 6900 { .fniv = gen_uqshrnb_vec, 6901 .opt_opc = uqshrnb_vec_list, 6902 .fno = gen_helper_sve2_uqshrnb_d, 6903 .vece = MO_64 }, 6904 }; 6905 TRANS_FEAT(UQSHRNB, aa64_sve2, do_shr_narrow, a, uqshrnb_ops) 6906 6907 static void gen_uqshrnt_vec(unsigned vece, TCGv_vec d, 6908 TCGv_vec n, int64_t shr) 6909 { 6910 int halfbits = 4 << vece; 6911 int64_t max = MAKE_64BIT_MASK(0, halfbits); 6912 TCGv_vec maxv = tcg_constant_vec_matching(d, vece, max); 6913 6914 tcg_gen_shri_vec(vece, n, n, shr); 6915 tcg_gen_umin_vec(vece, n, n, maxv); 6916 tcg_gen_shli_vec(vece, n, n, halfbits); 6917 tcg_gen_bitsel_vec(vece, d, maxv, d, n); 6918 } 6919 6920 static const TCGOpcode uqshrnt_vec_list[] = { 6921 INDEX_op_shli_vec, INDEX_op_shri_vec, INDEX_op_umin_vec, 0 6922 }; 6923 static const GVecGen2i uqshrnt_ops[3] = { 6924 { .fniv = gen_uqshrnt_vec, 6925 .opt_opc = uqshrnt_vec_list, 6926 .load_dest = true, 6927 .fno = gen_helper_sve2_uqshrnt_h, 6928 .vece = MO_16 }, 6929 { .fniv = gen_uqshrnt_vec, 6930 .opt_opc = uqshrnt_vec_list, 6931 .load_dest = true, 6932 .fno = gen_helper_sve2_uqshrnt_s, 6933 .vece = MO_32 }, 6934 { .fniv = gen_uqshrnt_vec, 6935 .opt_opc = uqshrnt_vec_list, 6936 .load_dest = true, 6937 .fno = gen_helper_sve2_uqshrnt_d, 6938 .vece = MO_64 }, 6939 }; 6940 TRANS_FEAT(UQSHRNT, aa64_sve2, do_shr_narrow, a, uqshrnt_ops) 6941 6942 static const GVecGen2i uqrshrnb_ops[3] = { 6943 { .fno = gen_helper_sve2_uqrshrnb_h }, 6944 { .fno = gen_helper_sve2_uqrshrnb_s }, 6945 { .fno = gen_helper_sve2_uqrshrnb_d }, 6946 }; 6947 TRANS_FEAT(UQRSHRNB, aa64_sve2, do_shr_narrow, a, uqrshrnb_ops) 6948 6949 static const GVecGen2i uqrshrnt_ops[3] = { 6950 { .fno = gen_helper_sve2_uqrshrnt_h }, 6951 { .fno = gen_helper_sve2_uqrshrnt_s }, 6952 { .fno = gen_helper_sve2_uqrshrnt_d }, 6953 }; 6954 TRANS_FEAT(UQRSHRNT, aa64_sve2, do_shr_narrow, a, uqrshrnt_ops) 6955 6956 #define DO_SVE2_ZZZ_NARROW(NAME, name) \ 6957 static gen_helper_gvec_3 * const name##_fns[4] = { \ 6958 NULL, gen_helper_sve2_##name##_h, \ 6959 gen_helper_sve2_##name##_s, gen_helper_sve2_##name##_d, \ 6960 }; \ 6961 TRANS_FEAT(NAME, aa64_sve2, gen_gvec_ool_arg_zzz, \ 6962 name##_fns[a->esz], a, 0) 6963 6964 DO_SVE2_ZZZ_NARROW(ADDHNB, addhnb) 6965 DO_SVE2_ZZZ_NARROW(ADDHNT, addhnt) 6966 DO_SVE2_ZZZ_NARROW(RADDHNB, raddhnb) 6967 DO_SVE2_ZZZ_NARROW(RADDHNT, raddhnt) 6968 6969 DO_SVE2_ZZZ_NARROW(SUBHNB, subhnb) 6970 DO_SVE2_ZZZ_NARROW(SUBHNT, subhnt) 6971 DO_SVE2_ZZZ_NARROW(RSUBHNB, rsubhnb) 6972 DO_SVE2_ZZZ_NARROW(RSUBHNT, rsubhnt) 6973 6974 static gen_helper_gvec_flags_4 * const match_fns[4] = { 6975 gen_helper_sve2_match_ppzz_b, gen_helper_sve2_match_ppzz_h, NULL, NULL 6976 }; 6977 TRANS_FEAT_NONSTREAMING(MATCH, aa64_sve2, do_ppzz_flags, a, match_fns[a->esz]) 6978 6979 static gen_helper_gvec_flags_4 * const nmatch_fns[4] = { 6980 gen_helper_sve2_nmatch_ppzz_b, gen_helper_sve2_nmatch_ppzz_h, NULL, NULL 6981 }; 6982 TRANS_FEAT_NONSTREAMING(NMATCH, aa64_sve2, do_ppzz_flags, a, nmatch_fns[a->esz]) 6983 6984 static gen_helper_gvec_4 * const histcnt_fns[4] = { 6985 NULL, NULL, gen_helper_sve2_histcnt_s, gen_helper_sve2_histcnt_d 6986 }; 6987 TRANS_FEAT_NONSTREAMING(HISTCNT, aa64_sve2, gen_gvec_ool_arg_zpzz, 6988 histcnt_fns[a->esz], a, 0) 6989 6990 TRANS_FEAT_NONSTREAMING(HISTSEG, aa64_sve2, gen_gvec_ool_arg_zzz, 6991 a->esz == 0 ? gen_helper_sve2_histseg : NULL, a, 0) 6992 6993 DO_ZPZZ_FP(FADDP, aa64_sve2, sve2_faddp_zpzz) 6994 DO_ZPZZ_FP(FMAXNMP, aa64_sve2, sve2_fmaxnmp_zpzz) 6995 DO_ZPZZ_FP(FMINNMP, aa64_sve2, sve2_fminnmp_zpzz) 6996 DO_ZPZZ_FP(FMAXP, aa64_sve2, sve2_fmaxp_zpzz) 6997 DO_ZPZZ_FP(FMINP, aa64_sve2, sve2_fminp_zpzz) 6998 6999 /* 7000 * SVE Integer Multiply-Add (unpredicated) 7001 */ 7002 7003 TRANS_FEAT_NONSTREAMING(FMMLA_s, aa64_sve_f32mm, gen_gvec_fpst_zzzz, 7004 gen_helper_fmmla_s, a->rd, a->rn, a->rm, a->ra, 7005 0, FPST_A64) 7006 TRANS_FEAT_NONSTREAMING(FMMLA_d, aa64_sve_f64mm, gen_gvec_fpst_zzzz, 7007 gen_helper_fmmla_d, a->rd, a->rn, a->rm, a->ra, 7008 0, FPST_A64) 7009 7010 static gen_helper_gvec_4 * const sqdmlal_zzzw_fns[] = { 7011 NULL, gen_helper_sve2_sqdmlal_zzzw_h, 7012 gen_helper_sve2_sqdmlal_zzzw_s, gen_helper_sve2_sqdmlal_zzzw_d, 7013 }; 7014 TRANS_FEAT(SQDMLALB_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz, 7015 sqdmlal_zzzw_fns[a->esz], a, 0) 7016 TRANS_FEAT(SQDMLALT_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz, 7017 sqdmlal_zzzw_fns[a->esz], a, 3) 7018 TRANS_FEAT(SQDMLALBT, aa64_sve2, gen_gvec_ool_arg_zzzz, 7019 sqdmlal_zzzw_fns[a->esz], a, 2) 7020 7021 static gen_helper_gvec_4 * const sqdmlsl_zzzw_fns[] = { 7022 NULL, gen_helper_sve2_sqdmlsl_zzzw_h, 7023 gen_helper_sve2_sqdmlsl_zzzw_s, gen_helper_sve2_sqdmlsl_zzzw_d, 7024 }; 7025 TRANS_FEAT(SQDMLSLB_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz, 7026 sqdmlsl_zzzw_fns[a->esz], a, 0) 7027 TRANS_FEAT(SQDMLSLT_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz, 7028 sqdmlsl_zzzw_fns[a->esz], a, 3) 7029 TRANS_FEAT(SQDMLSLBT, aa64_sve2, gen_gvec_ool_arg_zzzz, 7030 sqdmlsl_zzzw_fns[a->esz], a, 2) 7031 7032 static gen_helper_gvec_4 * const sqrdmlah_fns[] = { 7033 gen_helper_sve2_sqrdmlah_b, gen_helper_sve2_sqrdmlah_h, 7034 gen_helper_sve2_sqrdmlah_s, gen_helper_sve2_sqrdmlah_d, 7035 }; 7036 TRANS_FEAT(SQRDMLAH_zzzz, aa64_sve2, gen_gvec_ool_arg_zzzz, 7037 sqrdmlah_fns[a->esz], a, 0) 7038 7039 static gen_helper_gvec_4 * const sqrdmlsh_fns[] = { 7040 gen_helper_sve2_sqrdmlsh_b, gen_helper_sve2_sqrdmlsh_h, 7041 gen_helper_sve2_sqrdmlsh_s, gen_helper_sve2_sqrdmlsh_d, 7042 }; 7043 TRANS_FEAT(SQRDMLSH_zzzz, aa64_sve2, gen_gvec_ool_arg_zzzz, 7044 sqrdmlsh_fns[a->esz], a, 0) 7045 7046 static gen_helper_gvec_4 * const smlal_zzzw_fns[] = { 7047 NULL, gen_helper_sve2_smlal_zzzw_h, 7048 gen_helper_sve2_smlal_zzzw_s, gen_helper_sve2_smlal_zzzw_d, 7049 }; 7050 TRANS_FEAT(SMLALB_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz, 7051 smlal_zzzw_fns[a->esz], a, 0) 7052 TRANS_FEAT(SMLALT_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz, 7053 smlal_zzzw_fns[a->esz], a, 1) 7054 7055 static gen_helper_gvec_4 * const umlal_zzzw_fns[] = { 7056 NULL, gen_helper_sve2_umlal_zzzw_h, 7057 gen_helper_sve2_umlal_zzzw_s, gen_helper_sve2_umlal_zzzw_d, 7058 }; 7059 TRANS_FEAT(UMLALB_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz, 7060 umlal_zzzw_fns[a->esz], a, 0) 7061 TRANS_FEAT(UMLALT_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz, 7062 umlal_zzzw_fns[a->esz], a, 1) 7063 7064 static gen_helper_gvec_4 * const smlsl_zzzw_fns[] = { 7065 NULL, gen_helper_sve2_smlsl_zzzw_h, 7066 gen_helper_sve2_smlsl_zzzw_s, gen_helper_sve2_smlsl_zzzw_d, 7067 }; 7068 TRANS_FEAT(SMLSLB_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz, 7069 smlsl_zzzw_fns[a->esz], a, 0) 7070 TRANS_FEAT(SMLSLT_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz, 7071 smlsl_zzzw_fns[a->esz], a, 1) 7072 7073 static gen_helper_gvec_4 * const umlsl_zzzw_fns[] = { 7074 NULL, gen_helper_sve2_umlsl_zzzw_h, 7075 gen_helper_sve2_umlsl_zzzw_s, gen_helper_sve2_umlsl_zzzw_d, 7076 }; 7077 TRANS_FEAT(UMLSLB_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz, 7078 umlsl_zzzw_fns[a->esz], a, 0) 7079 TRANS_FEAT(UMLSLT_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz, 7080 umlsl_zzzw_fns[a->esz], a, 1) 7081 7082 static gen_helper_gvec_4 * const cmla_fns[] = { 7083 gen_helper_sve2_cmla_zzzz_b, gen_helper_sve2_cmla_zzzz_h, 7084 gen_helper_sve2_cmla_zzzz_s, gen_helper_sve2_cmla_zzzz_d, 7085 }; 7086 TRANS_FEAT(CMLA_zzzz, aa64_sve2, gen_gvec_ool_zzzz, 7087 cmla_fns[a->esz], a->rd, a->rn, a->rm, a->ra, a->rot) 7088 7089 static gen_helper_gvec_4 * const cdot_fns[] = { 7090 NULL, NULL, gen_helper_sve2_cdot_zzzz_s, gen_helper_sve2_cdot_zzzz_d 7091 }; 7092 TRANS_FEAT(CDOT_zzzz, aa64_sve2, gen_gvec_ool_zzzz, 7093 cdot_fns[a->esz], a->rd, a->rn, a->rm, a->ra, a->rot) 7094 7095 static gen_helper_gvec_4 * const sqrdcmlah_fns[] = { 7096 gen_helper_sve2_sqrdcmlah_zzzz_b, gen_helper_sve2_sqrdcmlah_zzzz_h, 7097 gen_helper_sve2_sqrdcmlah_zzzz_s, gen_helper_sve2_sqrdcmlah_zzzz_d, 7098 }; 7099 TRANS_FEAT(SQRDCMLAH_zzzz, aa64_sve2, gen_gvec_ool_zzzz, 7100 sqrdcmlah_fns[a->esz], a->rd, a->rn, a->rm, a->ra, a->rot) 7101 7102 TRANS_FEAT(USDOT_zzzz, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz, 7103 a->esz == 2 ? gen_helper_gvec_usdot_b : NULL, a, 0) 7104 7105 TRANS_FEAT_NONSTREAMING(AESMC, aa64_sve2_aes, gen_gvec_ool_zz, 7106 gen_helper_crypto_aesmc, a->rd, a->rd, 0) 7107 TRANS_FEAT_NONSTREAMING(AESIMC, aa64_sve2_aes, gen_gvec_ool_zz, 7108 gen_helper_crypto_aesimc, a->rd, a->rd, 0) 7109 7110 TRANS_FEAT_NONSTREAMING(AESE, aa64_sve2_aes, gen_gvec_ool_arg_zzz, 7111 gen_helper_crypto_aese, a, 0) 7112 TRANS_FEAT_NONSTREAMING(AESD, aa64_sve2_aes, gen_gvec_ool_arg_zzz, 7113 gen_helper_crypto_aesd, a, 0) 7114 7115 TRANS_FEAT_NONSTREAMING(SM4E, aa64_sve2_sm4, gen_gvec_ool_arg_zzz, 7116 gen_helper_crypto_sm4e, a, 0) 7117 TRANS_FEAT_NONSTREAMING(SM4EKEY, aa64_sve2_sm4, gen_gvec_ool_arg_zzz, 7118 gen_helper_crypto_sm4ekey, a, 0) 7119 7120 TRANS_FEAT_NONSTREAMING(RAX1, aa64_sve2_sha3, gen_gvec_fn_arg_zzz, 7121 gen_gvec_rax1, a) 7122 7123 TRANS_FEAT(FCVTNT_sh, aa64_sve2, gen_gvec_fpst_arg_zpz, 7124 gen_helper_sve2_fcvtnt_sh, a, 0, FPST_A64) 7125 TRANS_FEAT(FCVTNT_ds, aa64_sve2, gen_gvec_fpst_arg_zpz, 7126 gen_helper_sve2_fcvtnt_ds, a, 0, FPST_A64) 7127 7128 TRANS_FEAT(BFCVTNT, aa64_sve_bf16, gen_gvec_fpst_arg_zpz, 7129 gen_helper_sve_bfcvtnt, a, 0, 7130 s->fpcr_ah ? FPST_AH : FPST_A64) 7131 7132 TRANS_FEAT(FCVTLT_hs, aa64_sve2, gen_gvec_fpst_arg_zpz, 7133 gen_helper_sve2_fcvtlt_hs, a, 0, FPST_A64) 7134 TRANS_FEAT(FCVTLT_sd, aa64_sve2, gen_gvec_fpst_arg_zpz, 7135 gen_helper_sve2_fcvtlt_sd, a, 0, FPST_A64) 7136 7137 TRANS_FEAT(FCVTX_ds, aa64_sve2, do_frint_mode, a, 7138 FPROUNDING_ODD, gen_helper_sve_fcvt_ds) 7139 TRANS_FEAT(FCVTXNT_ds, aa64_sve2, do_frint_mode, a, 7140 FPROUNDING_ODD, gen_helper_sve2_fcvtnt_ds) 7141 7142 static gen_helper_gvec_3_ptr * const flogb_fns[] = { 7143 NULL, gen_helper_flogb_h, 7144 gen_helper_flogb_s, gen_helper_flogb_d 7145 }; 7146 TRANS_FEAT(FLOGB, aa64_sve2, gen_gvec_fpst_arg_zpz, flogb_fns[a->esz], 7147 a, 0, a->esz == MO_16 ? FPST_A64_F16 : FPST_A64) 7148 7149 static bool do_FMLAL_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sub, bool sel) 7150 { 7151 return gen_gvec_ptr_zzzz(s, gen_helper_sve2_fmlal_zzzw_s, 7152 a->rd, a->rn, a->rm, a->ra, 7153 (sel << 1) | sub, tcg_env); 7154 } 7155 7156 TRANS_FEAT(FMLALB_zzzw, aa64_sve2, do_FMLAL_zzzw, a, false, false) 7157 TRANS_FEAT(FMLALT_zzzw, aa64_sve2, do_FMLAL_zzzw, a, false, true) 7158 TRANS_FEAT(FMLSLB_zzzw, aa64_sve2, do_FMLAL_zzzw, a, true, false) 7159 TRANS_FEAT(FMLSLT_zzzw, aa64_sve2, do_FMLAL_zzzw, a, true, true) 7160 7161 static bool do_FMLAL_zzxw(DisasContext *s, arg_rrxr_esz *a, bool sub, bool sel) 7162 { 7163 return gen_gvec_ptr_zzzz(s, gen_helper_sve2_fmlal_zzxw_s, 7164 a->rd, a->rn, a->rm, a->ra, 7165 (a->index << 2) | (sel << 1) | sub, tcg_env); 7166 } 7167 7168 TRANS_FEAT(FMLALB_zzxw, aa64_sve2, do_FMLAL_zzxw, a, false, false) 7169 TRANS_FEAT(FMLALT_zzxw, aa64_sve2, do_FMLAL_zzxw, a, false, true) 7170 TRANS_FEAT(FMLSLB_zzxw, aa64_sve2, do_FMLAL_zzxw, a, true, false) 7171 TRANS_FEAT(FMLSLT_zzxw, aa64_sve2, do_FMLAL_zzxw, a, true, true) 7172 7173 TRANS_FEAT_NONSTREAMING(SMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz, 7174 gen_helper_gvec_smmla_b, a, 0) 7175 TRANS_FEAT_NONSTREAMING(USMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz, 7176 gen_helper_gvec_usmmla_b, a, 0) 7177 TRANS_FEAT_NONSTREAMING(UMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz, 7178 gen_helper_gvec_ummla_b, a, 0) 7179 7180 TRANS_FEAT(BFDOT_zzzz, aa64_sve_bf16, gen_gvec_env_arg_zzzz, 7181 gen_helper_gvec_bfdot, a, 0) 7182 TRANS_FEAT(BFDOT_zzxz, aa64_sve_bf16, gen_gvec_env_arg_zzxz, 7183 gen_helper_gvec_bfdot_idx, a) 7184 7185 TRANS_FEAT_NONSTREAMING(BFMMLA, aa64_sve_bf16, gen_gvec_env_arg_zzzz, 7186 gen_helper_gvec_bfmmla, a, 0) 7187 7188 static bool do_BFMLAL_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sel) 7189 { 7190 return gen_gvec_fpst_zzzz(s, gen_helper_gvec_bfmlal, 7191 a->rd, a->rn, a->rm, a->ra, sel, 7192 s->fpcr_ah ? FPST_AH : FPST_A64); 7193 } 7194 7195 TRANS_FEAT(BFMLALB_zzzw, aa64_sve_bf16, do_BFMLAL_zzzw, a, false) 7196 TRANS_FEAT(BFMLALT_zzzw, aa64_sve_bf16, do_BFMLAL_zzzw, a, true) 7197 7198 static bool do_BFMLAL_zzxw(DisasContext *s, arg_rrxr_esz *a, bool sel) 7199 { 7200 return gen_gvec_fpst_zzzz(s, gen_helper_gvec_bfmlal_idx, 7201 a->rd, a->rn, a->rm, a->ra, 7202 (a->index << 1) | sel, 7203 s->fpcr_ah ? FPST_AH : FPST_A64); 7204 } 7205 7206 TRANS_FEAT(BFMLALB_zzxw, aa64_sve_bf16, do_BFMLAL_zzxw, a, false) 7207 TRANS_FEAT(BFMLALT_zzxw, aa64_sve_bf16, do_BFMLAL_zzxw, a, true) 7208 7209 static bool trans_PSEL(DisasContext *s, arg_psel *a) 7210 { 7211 int vl = vec_full_reg_size(s); 7212 int pl = pred_gvec_reg_size(s); 7213 int elements = vl >> a->esz; 7214 TCGv_i64 tmp, didx, dbit; 7215 TCGv_ptr ptr; 7216 7217 if (!dc_isar_feature(aa64_sme, s)) { 7218 return false; 7219 } 7220 if (!sve_access_check(s)) { 7221 return true; 7222 } 7223 7224 tmp = tcg_temp_new_i64(); 7225 dbit = tcg_temp_new_i64(); 7226 didx = tcg_temp_new_i64(); 7227 ptr = tcg_temp_new_ptr(); 7228 7229 /* Compute the predicate element. */ 7230 tcg_gen_addi_i64(tmp, cpu_reg(s, a->rv), a->imm); 7231 if (is_power_of_2(elements)) { 7232 tcg_gen_andi_i64(tmp, tmp, elements - 1); 7233 } else { 7234 tcg_gen_remu_i64(tmp, tmp, tcg_constant_i64(elements)); 7235 } 7236 7237 /* Extract the predicate byte and bit indices. */ 7238 tcg_gen_shli_i64(tmp, tmp, a->esz); 7239 tcg_gen_andi_i64(dbit, tmp, 7); 7240 tcg_gen_shri_i64(didx, tmp, 3); 7241 if (HOST_BIG_ENDIAN) { 7242 tcg_gen_xori_i64(didx, didx, 7); 7243 } 7244 7245 /* Load the predicate word. */ 7246 tcg_gen_trunc_i64_ptr(ptr, didx); 7247 tcg_gen_add_ptr(ptr, ptr, tcg_env); 7248 tcg_gen_ld8u_i64(tmp, ptr, pred_full_reg_offset(s, a->pm)); 7249 7250 /* Extract the predicate bit and replicate to MO_64. */ 7251 tcg_gen_shr_i64(tmp, tmp, dbit); 7252 tcg_gen_andi_i64(tmp, tmp, 1); 7253 tcg_gen_neg_i64(tmp, tmp); 7254 7255 /* Apply to either copy the source, or write zeros. */ 7256 tcg_gen_gvec_ands(MO_64, pred_full_reg_offset(s, a->pd), 7257 pred_full_reg_offset(s, a->pn), tmp, pl, pl); 7258 return true; 7259 } 7260 7261 static void gen_sclamp_i32(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, TCGv_i32 a) 7262 { 7263 tcg_gen_smax_i32(d, a, n); 7264 tcg_gen_smin_i32(d, d, m); 7265 } 7266 7267 static void gen_sclamp_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 a) 7268 { 7269 tcg_gen_smax_i64(d, a, n); 7270 tcg_gen_smin_i64(d, d, m); 7271 } 7272 7273 static void gen_sclamp_vec(unsigned vece, TCGv_vec d, TCGv_vec n, 7274 TCGv_vec m, TCGv_vec a) 7275 { 7276 tcg_gen_smax_vec(vece, d, a, n); 7277 tcg_gen_smin_vec(vece, d, d, m); 7278 } 7279 7280 static void gen_sclamp(unsigned vece, uint32_t d, uint32_t n, uint32_t m, 7281 uint32_t a, uint32_t oprsz, uint32_t maxsz) 7282 { 7283 static const TCGOpcode vecop[] = { 7284 INDEX_op_smin_vec, INDEX_op_smax_vec, 0 7285 }; 7286 static const GVecGen4 ops[4] = { 7287 { .fniv = gen_sclamp_vec, 7288 .fno = gen_helper_gvec_sclamp_b, 7289 .opt_opc = vecop, 7290 .vece = MO_8 }, 7291 { .fniv = gen_sclamp_vec, 7292 .fno = gen_helper_gvec_sclamp_h, 7293 .opt_opc = vecop, 7294 .vece = MO_16 }, 7295 { .fni4 = gen_sclamp_i32, 7296 .fniv = gen_sclamp_vec, 7297 .fno = gen_helper_gvec_sclamp_s, 7298 .opt_opc = vecop, 7299 .vece = MO_32 }, 7300 { .fni8 = gen_sclamp_i64, 7301 .fniv = gen_sclamp_vec, 7302 .fno = gen_helper_gvec_sclamp_d, 7303 .opt_opc = vecop, 7304 .vece = MO_64, 7305 .prefer_i64 = TCG_TARGET_REG_BITS == 64 } 7306 }; 7307 tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &ops[vece]); 7308 } 7309 7310 TRANS_FEAT(SCLAMP, aa64_sme, gen_gvec_fn_arg_zzzz, gen_sclamp, a) 7311 7312 static void gen_uclamp_i32(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, TCGv_i32 a) 7313 { 7314 tcg_gen_umax_i32(d, a, n); 7315 tcg_gen_umin_i32(d, d, m); 7316 } 7317 7318 static void gen_uclamp_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 a) 7319 { 7320 tcg_gen_umax_i64(d, a, n); 7321 tcg_gen_umin_i64(d, d, m); 7322 } 7323 7324 static void gen_uclamp_vec(unsigned vece, TCGv_vec d, TCGv_vec n, 7325 TCGv_vec m, TCGv_vec a) 7326 { 7327 tcg_gen_umax_vec(vece, d, a, n); 7328 tcg_gen_umin_vec(vece, d, d, m); 7329 } 7330 7331 static void gen_uclamp(unsigned vece, uint32_t d, uint32_t n, uint32_t m, 7332 uint32_t a, uint32_t oprsz, uint32_t maxsz) 7333 { 7334 static const TCGOpcode vecop[] = { 7335 INDEX_op_umin_vec, INDEX_op_umax_vec, 0 7336 }; 7337 static const GVecGen4 ops[4] = { 7338 { .fniv = gen_uclamp_vec, 7339 .fno = gen_helper_gvec_uclamp_b, 7340 .opt_opc = vecop, 7341 .vece = MO_8 }, 7342 { .fniv = gen_uclamp_vec, 7343 .fno = gen_helper_gvec_uclamp_h, 7344 .opt_opc = vecop, 7345 .vece = MO_16 }, 7346 { .fni4 = gen_uclamp_i32, 7347 .fniv = gen_uclamp_vec, 7348 .fno = gen_helper_gvec_uclamp_s, 7349 .opt_opc = vecop, 7350 .vece = MO_32 }, 7351 { .fni8 = gen_uclamp_i64, 7352 .fniv = gen_uclamp_vec, 7353 .fno = gen_helper_gvec_uclamp_d, 7354 .opt_opc = vecop, 7355 .vece = MO_64, 7356 .prefer_i64 = TCG_TARGET_REG_BITS == 64 } 7357 }; 7358 tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &ops[vece]); 7359 } 7360 7361 TRANS_FEAT(UCLAMP, aa64_sme, gen_gvec_fn_arg_zzzz, gen_uclamp, a) 7362