1 /* 2 * M-profile MVE Operations 3 * 4 * Copyright (c) 2021 Linaro, Ltd. 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "cpu.h" 22 #include "internals.h" 23 #include "vec_internal.h" 24 #include "exec/helper-proto.h" 25 #include "exec/cpu_ldst.h" 26 #include "exec/exec-all.h" 27 #include "tcg/tcg.h" 28 #include "fpu/softfloat.h" 29 30 static uint16_t mve_eci_mask(CPUARMState *env) 31 { 32 /* 33 * Return the mask of which elements in the MVE vector correspond 34 * to beats being executed. The mask has 1 bits for executed lanes 35 * and 0 bits where ECI says this beat was already executed. 36 */ 37 int eci; 38 39 if ((env->condexec_bits & 0xf) != 0) { 40 return 0xffff; 41 } 42 43 eci = env->condexec_bits >> 4; 44 switch (eci) { 45 case ECI_NONE: 46 return 0xffff; 47 case ECI_A0: 48 return 0xfff0; 49 case ECI_A0A1: 50 return 0xff00; 51 case ECI_A0A1A2: 52 case ECI_A0A1A2B0: 53 return 0xf000; 54 default: 55 g_assert_not_reached(); 56 } 57 } 58 59 static uint16_t mve_element_mask(CPUARMState *env) 60 { 61 /* 62 * Return the mask of which elements in the MVE vector should be 63 * updated. This is a combination of multiple things: 64 * (1) by default, we update every lane in the vector 65 * (2) VPT predication stores its state in the VPR register; 66 * (3) low-overhead-branch tail predication will mask out part 67 * the vector on the final iteration of the loop 68 * (4) if EPSR.ECI is set then we must execute only some beats 69 * of the insn 70 * We combine all these into a 16-bit result with the same semantics 71 * as VPR.P0: 0 to mask the lane, 1 if it is active. 72 * 8-bit vector ops will look at all bits of the result; 73 * 16-bit ops will look at bits 0, 2, 4, ...; 74 * 32-bit ops will look at bits 0, 4, 8 and 12. 75 * Compare pseudocode GetCurInstrBeat(), though that only returns 76 * the 4-bit slice of the mask corresponding to a single beat. 77 */ 78 uint16_t mask = FIELD_EX32(env->v7m.vpr, V7M_VPR, P0); 79 80 if (!(env->v7m.vpr & R_V7M_VPR_MASK01_MASK)) { 81 mask |= 0xff; 82 } 83 if (!(env->v7m.vpr & R_V7M_VPR_MASK23_MASK)) { 84 mask |= 0xff00; 85 } 86 87 if (env->v7m.ltpsize < 4 && 88 env->regs[14] <= (1 << (4 - env->v7m.ltpsize))) { 89 /* 90 * Tail predication active, and this is the last loop iteration. 91 * The element size is (1 << ltpsize), and we only want to process 92 * loopcount elements, so we want to retain the least significant 93 * (loopcount * esize) predicate bits and zero out bits above that. 94 */ 95 int masklen = env->regs[14] << env->v7m.ltpsize; 96 assert(masklen <= 16); 97 uint16_t ltpmask = masklen ? MAKE_64BIT_MASK(0, masklen) : 0; 98 mask &= ltpmask; 99 } 100 101 /* 102 * ECI bits indicate which beats are already executed; 103 * we handle this by effectively predicating them out. 104 */ 105 mask &= mve_eci_mask(env); 106 return mask; 107 } 108 109 static void mve_advance_vpt(CPUARMState *env) 110 { 111 /* Advance the VPT and ECI state if necessary */ 112 uint32_t vpr = env->v7m.vpr; 113 unsigned mask01, mask23; 114 uint16_t inv_mask; 115 uint16_t eci_mask = mve_eci_mask(env); 116 117 if ((env->condexec_bits & 0xf) == 0) { 118 env->condexec_bits = (env->condexec_bits == (ECI_A0A1A2B0 << 4)) ? 119 (ECI_A0 << 4) : (ECI_NONE << 4); 120 } 121 122 if (!(vpr & (R_V7M_VPR_MASK01_MASK | R_V7M_VPR_MASK23_MASK))) { 123 /* VPT not enabled, nothing to do */ 124 return; 125 } 126 127 /* Invert P0 bits if needed, but only for beats we actually executed */ 128 mask01 = FIELD_EX32(vpr, V7M_VPR, MASK01); 129 mask23 = FIELD_EX32(vpr, V7M_VPR, MASK23); 130 /* Start by assuming we invert all bits corresponding to executed beats */ 131 inv_mask = eci_mask; 132 if (mask01 <= 8) { 133 /* MASK01 says don't invert low half of P0 */ 134 inv_mask &= ~0xff; 135 } 136 if (mask23 <= 8) { 137 /* MASK23 says don't invert high half of P0 */ 138 inv_mask &= ~0xff00; 139 } 140 vpr ^= inv_mask; 141 /* Only update MASK01 if beat 1 executed */ 142 if (eci_mask & 0xf0) { 143 vpr = FIELD_DP32(vpr, V7M_VPR, MASK01, mask01 << 1); 144 } 145 /* Beat 3 always executes, so update MASK23 */ 146 vpr = FIELD_DP32(vpr, V7M_VPR, MASK23, mask23 << 1); 147 env->v7m.vpr = vpr; 148 } 149 150 /* For loads, predicated lanes are zeroed instead of keeping their old values */ 151 #define DO_VLDR(OP, MSIZE, LDTYPE, ESIZE, TYPE) \ 152 void HELPER(mve_##OP)(CPUARMState *env, void *vd, uint32_t addr) \ 153 { \ 154 TYPE *d = vd; \ 155 uint16_t mask = mve_element_mask(env); \ 156 uint16_t eci_mask = mve_eci_mask(env); \ 157 unsigned b, e; \ 158 /* \ 159 * R_SXTM allows the dest reg to become UNKNOWN for abandoned \ 160 * beats so we don't care if we update part of the dest and \ 161 * then take an exception. \ 162 */ \ 163 for (b = 0, e = 0; b < 16; b += ESIZE, e++) { \ 164 if (eci_mask & (1 << b)) { \ 165 d[H##ESIZE(e)] = (mask & (1 << b)) ? \ 166 cpu_##LDTYPE##_data_ra(env, addr, GETPC()) : 0; \ 167 } \ 168 addr += MSIZE; \ 169 } \ 170 mve_advance_vpt(env); \ 171 } 172 173 #define DO_VSTR(OP, MSIZE, STTYPE, ESIZE, TYPE) \ 174 void HELPER(mve_##OP)(CPUARMState *env, void *vd, uint32_t addr) \ 175 { \ 176 TYPE *d = vd; \ 177 uint16_t mask = mve_element_mask(env); \ 178 unsigned b, e; \ 179 for (b = 0, e = 0; b < 16; b += ESIZE, e++) { \ 180 if (mask & (1 << b)) { \ 181 cpu_##STTYPE##_data_ra(env, addr, d[H##ESIZE(e)], GETPC()); \ 182 } \ 183 addr += MSIZE; \ 184 } \ 185 mve_advance_vpt(env); \ 186 } 187 188 DO_VLDR(vldrb, 1, ldub, 1, uint8_t) 189 DO_VLDR(vldrh, 2, lduw, 2, uint16_t) 190 DO_VLDR(vldrw, 4, ldl, 4, uint32_t) 191 192 DO_VSTR(vstrb, 1, stb, 1, uint8_t) 193 DO_VSTR(vstrh, 2, stw, 2, uint16_t) 194 DO_VSTR(vstrw, 4, stl, 4, uint32_t) 195 196 DO_VLDR(vldrb_sh, 1, ldsb, 2, int16_t) 197 DO_VLDR(vldrb_sw, 1, ldsb, 4, int32_t) 198 DO_VLDR(vldrb_uh, 1, ldub, 2, uint16_t) 199 DO_VLDR(vldrb_uw, 1, ldub, 4, uint32_t) 200 DO_VLDR(vldrh_sw, 2, ldsw, 4, int32_t) 201 DO_VLDR(vldrh_uw, 2, lduw, 4, uint32_t) 202 203 DO_VSTR(vstrb_h, 1, stb, 2, int16_t) 204 DO_VSTR(vstrb_w, 1, stb, 4, int32_t) 205 DO_VSTR(vstrh_w, 2, stw, 4, int32_t) 206 207 #undef DO_VLDR 208 #undef DO_VSTR 209 210 /* 211 * Gather loads/scatter stores. Here each element of Qm specifies 212 * an offset to use from the base register Rm. In the _os_ versions 213 * that offset is scaled by the element size. 214 * For loads, predicated lanes are zeroed instead of retaining 215 * their previous values. 216 */ 217 #define DO_VLDR_SG(OP, LDTYPE, ESIZE, TYPE, OFFTYPE, ADDRFN, WB) \ 218 void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \ 219 uint32_t base) \ 220 { \ 221 TYPE *d = vd; \ 222 OFFTYPE *m = vm; \ 223 uint16_t mask = mve_element_mask(env); \ 224 uint16_t eci_mask = mve_eci_mask(env); \ 225 unsigned e; \ 226 uint32_t addr; \ 227 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE, eci_mask >>= ESIZE) { \ 228 if (!(eci_mask & 1)) { \ 229 continue; \ 230 } \ 231 addr = ADDRFN(base, m[H##ESIZE(e)]); \ 232 d[H##ESIZE(e)] = (mask & 1) ? \ 233 cpu_##LDTYPE##_data_ra(env, addr, GETPC()) : 0; \ 234 if (WB) { \ 235 m[H##ESIZE(e)] = addr; \ 236 } \ 237 } \ 238 mve_advance_vpt(env); \ 239 } 240 241 /* We know here TYPE is unsigned so always the same as the offset type */ 242 #define DO_VSTR_SG(OP, STTYPE, ESIZE, TYPE, ADDRFN, WB) \ 243 void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \ 244 uint32_t base) \ 245 { \ 246 TYPE *d = vd; \ 247 TYPE *m = vm; \ 248 uint16_t mask = mve_element_mask(env); \ 249 uint16_t eci_mask = mve_eci_mask(env); \ 250 unsigned e; \ 251 uint32_t addr; \ 252 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE, eci_mask >>= ESIZE) { \ 253 if (!(eci_mask & 1)) { \ 254 continue; \ 255 } \ 256 addr = ADDRFN(base, m[H##ESIZE(e)]); \ 257 if (mask & 1) { \ 258 cpu_##STTYPE##_data_ra(env, addr, d[H##ESIZE(e)], GETPC()); \ 259 } \ 260 if (WB) { \ 261 m[H##ESIZE(e)] = addr; \ 262 } \ 263 } \ 264 mve_advance_vpt(env); \ 265 } 266 267 /* 268 * 64-bit accesses are slightly different: they are done as two 32-bit 269 * accesses, controlled by the predicate mask for the relevant beat, 270 * and with a single 32-bit offset in the first of the two Qm elements. 271 * Note that for QEMU our IMPDEF AIRCR.ENDIANNESS is always 0 (little). 272 * Address writeback happens on the odd beats and updates the address 273 * stored in the even-beat element. 274 */ 275 #define DO_VLDR64_SG(OP, ADDRFN, WB) \ 276 void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \ 277 uint32_t base) \ 278 { \ 279 uint32_t *d = vd; \ 280 uint32_t *m = vm; \ 281 uint16_t mask = mve_element_mask(env); \ 282 uint16_t eci_mask = mve_eci_mask(env); \ 283 unsigned e; \ 284 uint32_t addr; \ 285 for (e = 0; e < 16 / 4; e++, mask >>= 4, eci_mask >>= 4) { \ 286 if (!(eci_mask & 1)) { \ 287 continue; \ 288 } \ 289 addr = ADDRFN(base, m[H4(e & ~1)]); \ 290 addr += 4 * (e & 1); \ 291 d[H4(e)] = (mask & 1) ? cpu_ldl_data_ra(env, addr, GETPC()) : 0; \ 292 if (WB && (e & 1)) { \ 293 m[H4(e & ~1)] = addr - 4; \ 294 } \ 295 } \ 296 mve_advance_vpt(env); \ 297 } 298 299 #define DO_VSTR64_SG(OP, ADDRFN, WB) \ 300 void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \ 301 uint32_t base) \ 302 { \ 303 uint32_t *d = vd; \ 304 uint32_t *m = vm; \ 305 uint16_t mask = mve_element_mask(env); \ 306 uint16_t eci_mask = mve_eci_mask(env); \ 307 unsigned e; \ 308 uint32_t addr; \ 309 for (e = 0; e < 16 / 4; e++, mask >>= 4, eci_mask >>= 4) { \ 310 if (!(eci_mask & 1)) { \ 311 continue; \ 312 } \ 313 addr = ADDRFN(base, m[H4(e & ~1)]); \ 314 addr += 4 * (e & 1); \ 315 if (mask & 1) { \ 316 cpu_stl_data_ra(env, addr, d[H4(e)], GETPC()); \ 317 } \ 318 if (WB && (e & 1)) { \ 319 m[H4(e & ~1)] = addr - 4; \ 320 } \ 321 } \ 322 mve_advance_vpt(env); \ 323 } 324 325 #define ADDR_ADD(BASE, OFFSET) ((BASE) + (OFFSET)) 326 #define ADDR_ADD_OSH(BASE, OFFSET) ((BASE) + ((OFFSET) << 1)) 327 #define ADDR_ADD_OSW(BASE, OFFSET) ((BASE) + ((OFFSET) << 2)) 328 #define ADDR_ADD_OSD(BASE, OFFSET) ((BASE) + ((OFFSET) << 3)) 329 330 DO_VLDR_SG(vldrb_sg_sh, ldsb, 2, int16_t, uint16_t, ADDR_ADD, false) 331 DO_VLDR_SG(vldrb_sg_sw, ldsb, 4, int32_t, uint32_t, ADDR_ADD, false) 332 DO_VLDR_SG(vldrh_sg_sw, ldsw, 4, int32_t, uint32_t, ADDR_ADD, false) 333 334 DO_VLDR_SG(vldrb_sg_ub, ldub, 1, uint8_t, uint8_t, ADDR_ADD, false) 335 DO_VLDR_SG(vldrb_sg_uh, ldub, 2, uint16_t, uint16_t, ADDR_ADD, false) 336 DO_VLDR_SG(vldrb_sg_uw, ldub, 4, uint32_t, uint32_t, ADDR_ADD, false) 337 DO_VLDR_SG(vldrh_sg_uh, lduw, 2, uint16_t, uint16_t, ADDR_ADD, false) 338 DO_VLDR_SG(vldrh_sg_uw, lduw, 4, uint32_t, uint32_t, ADDR_ADD, false) 339 DO_VLDR_SG(vldrw_sg_uw, ldl, 4, uint32_t, uint32_t, ADDR_ADD, false) 340 DO_VLDR64_SG(vldrd_sg_ud, ADDR_ADD, false) 341 342 DO_VLDR_SG(vldrh_sg_os_sw, ldsw, 4, int32_t, uint32_t, ADDR_ADD_OSH, false) 343 DO_VLDR_SG(vldrh_sg_os_uh, lduw, 2, uint16_t, uint16_t, ADDR_ADD_OSH, false) 344 DO_VLDR_SG(vldrh_sg_os_uw, lduw, 4, uint32_t, uint32_t, ADDR_ADD_OSH, false) 345 DO_VLDR_SG(vldrw_sg_os_uw, ldl, 4, uint32_t, uint32_t, ADDR_ADD_OSW, false) 346 DO_VLDR64_SG(vldrd_sg_os_ud, ADDR_ADD_OSD, false) 347 348 DO_VSTR_SG(vstrb_sg_ub, stb, 1, uint8_t, ADDR_ADD, false) 349 DO_VSTR_SG(vstrb_sg_uh, stb, 2, uint16_t, ADDR_ADD, false) 350 DO_VSTR_SG(vstrb_sg_uw, stb, 4, uint32_t, ADDR_ADD, false) 351 DO_VSTR_SG(vstrh_sg_uh, stw, 2, uint16_t, ADDR_ADD, false) 352 DO_VSTR_SG(vstrh_sg_uw, stw, 4, uint32_t, ADDR_ADD, false) 353 DO_VSTR_SG(vstrw_sg_uw, stl, 4, uint32_t, ADDR_ADD, false) 354 DO_VSTR64_SG(vstrd_sg_ud, ADDR_ADD, false) 355 356 DO_VSTR_SG(vstrh_sg_os_uh, stw, 2, uint16_t, ADDR_ADD_OSH, false) 357 DO_VSTR_SG(vstrh_sg_os_uw, stw, 4, uint32_t, ADDR_ADD_OSH, false) 358 DO_VSTR_SG(vstrw_sg_os_uw, stl, 4, uint32_t, ADDR_ADD_OSW, false) 359 DO_VSTR64_SG(vstrd_sg_os_ud, ADDR_ADD_OSD, false) 360 361 DO_VLDR_SG(vldrw_sg_wb_uw, ldl, 4, uint32_t, uint32_t, ADDR_ADD, true) 362 DO_VLDR64_SG(vldrd_sg_wb_ud, ADDR_ADD, true) 363 DO_VSTR_SG(vstrw_sg_wb_uw, stl, 4, uint32_t, ADDR_ADD, true) 364 DO_VSTR64_SG(vstrd_sg_wb_ud, ADDR_ADD, true) 365 366 /* 367 * Deinterleaving loads/interleaving stores. 368 * 369 * For these helpers we are passed the index of the first Qreg 370 * (VLD2/VST2 will also access Qn+1, VLD4/VST4 access Qn .. Qn+3) 371 * and the value of the base address register Rn. 372 * The helpers are specialized for pattern and element size, so 373 * for instance vld42h is VLD4 with pattern 2, element size MO_16. 374 * 375 * These insns are beatwise but not predicated, so we must honour ECI, 376 * but need not look at mve_element_mask(). 377 * 378 * The pseudocode implements these insns with multiple memory accesses 379 * of the element size, but rules R_VVVG and R_FXDM permit us to make 380 * one 32-bit memory access per beat. 381 */ 382 #define DO_VLD4B(OP, O1, O2, O3, O4) \ 383 void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ 384 uint32_t base) \ 385 { \ 386 int beat, e; \ 387 uint16_t mask = mve_eci_mask(env); \ 388 static const uint8_t off[4] = { O1, O2, O3, O4 }; \ 389 uint32_t addr, data; \ 390 for (beat = 0; beat < 4; beat++, mask >>= 4) { \ 391 if ((mask & 1) == 0) { \ 392 /* ECI says skip this beat */ \ 393 continue; \ 394 } \ 395 addr = base + off[beat] * 4; \ 396 data = cpu_ldl_le_data_ra(env, addr, GETPC()); \ 397 for (e = 0; e < 4; e++, data >>= 8) { \ 398 uint8_t *qd = (uint8_t *)aa32_vfp_qreg(env, qnidx + e); \ 399 qd[H1(off[beat])] = data; \ 400 } \ 401 } \ 402 } 403 404 #define DO_VLD4H(OP, O1, O2) \ 405 void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ 406 uint32_t base) \ 407 { \ 408 int beat; \ 409 uint16_t mask = mve_eci_mask(env); \ 410 static const uint8_t off[4] = { O1, O1, O2, O2 }; \ 411 uint32_t addr, data; \ 412 int y; /* y counts 0 2 0 2 */ \ 413 uint16_t *qd; \ 414 for (beat = 0, y = 0; beat < 4; beat++, mask >>= 4, y ^= 2) { \ 415 if ((mask & 1) == 0) { \ 416 /* ECI says skip this beat */ \ 417 continue; \ 418 } \ 419 addr = base + off[beat] * 8 + (beat & 1) * 4; \ 420 data = cpu_ldl_le_data_ra(env, addr, GETPC()); \ 421 qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + y); \ 422 qd[H2(off[beat])] = data; \ 423 data >>= 16; \ 424 qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + y + 1); \ 425 qd[H2(off[beat])] = data; \ 426 } \ 427 } 428 429 #define DO_VLD4W(OP, O1, O2, O3, O4) \ 430 void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ 431 uint32_t base) \ 432 { \ 433 int beat; \ 434 uint16_t mask = mve_eci_mask(env); \ 435 static const uint8_t off[4] = { O1, O2, O3, O4 }; \ 436 uint32_t addr, data; \ 437 uint32_t *qd; \ 438 int y; \ 439 for (beat = 0; beat < 4; beat++, mask >>= 4) { \ 440 if ((mask & 1) == 0) { \ 441 /* ECI says skip this beat */ \ 442 continue; \ 443 } \ 444 addr = base + off[beat] * 4; \ 445 data = cpu_ldl_le_data_ra(env, addr, GETPC()); \ 446 y = (beat + (O1 & 2)) & 3; \ 447 qd = (uint32_t *)aa32_vfp_qreg(env, qnidx + y); \ 448 qd[H4(off[beat] >> 2)] = data; \ 449 } \ 450 } 451 452 DO_VLD4B(vld40b, 0, 1, 10, 11) 453 DO_VLD4B(vld41b, 2, 3, 12, 13) 454 DO_VLD4B(vld42b, 4, 5, 14, 15) 455 DO_VLD4B(vld43b, 6, 7, 8, 9) 456 457 DO_VLD4H(vld40h, 0, 5) 458 DO_VLD4H(vld41h, 1, 6) 459 DO_VLD4H(vld42h, 2, 7) 460 DO_VLD4H(vld43h, 3, 4) 461 462 DO_VLD4W(vld40w, 0, 1, 10, 11) 463 DO_VLD4W(vld41w, 2, 3, 12, 13) 464 DO_VLD4W(vld42w, 4, 5, 14, 15) 465 DO_VLD4W(vld43w, 6, 7, 8, 9) 466 467 #define DO_VLD2B(OP, O1, O2, O3, O4) \ 468 void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ 469 uint32_t base) \ 470 { \ 471 int beat, e; \ 472 uint16_t mask = mve_eci_mask(env); \ 473 static const uint8_t off[4] = { O1, O2, O3, O4 }; \ 474 uint32_t addr, data; \ 475 uint8_t *qd; \ 476 for (beat = 0; beat < 4; beat++, mask >>= 4) { \ 477 if ((mask & 1) == 0) { \ 478 /* ECI says skip this beat */ \ 479 continue; \ 480 } \ 481 addr = base + off[beat] * 2; \ 482 data = cpu_ldl_le_data_ra(env, addr, GETPC()); \ 483 for (e = 0; e < 4; e++, data >>= 8) { \ 484 qd = (uint8_t *)aa32_vfp_qreg(env, qnidx + (e & 1)); \ 485 qd[H1(off[beat] + (e >> 1))] = data; \ 486 } \ 487 } \ 488 } 489 490 #define DO_VLD2H(OP, O1, O2, O3, O4) \ 491 void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ 492 uint32_t base) \ 493 { \ 494 int beat; \ 495 uint16_t mask = mve_eci_mask(env); \ 496 static const uint8_t off[4] = { O1, O2, O3, O4 }; \ 497 uint32_t addr, data; \ 498 int e; \ 499 uint16_t *qd; \ 500 for (beat = 0; beat < 4; beat++, mask >>= 4) { \ 501 if ((mask & 1) == 0) { \ 502 /* ECI says skip this beat */ \ 503 continue; \ 504 } \ 505 addr = base + off[beat] * 4; \ 506 data = cpu_ldl_le_data_ra(env, addr, GETPC()); \ 507 for (e = 0; e < 2; e++, data >>= 16) { \ 508 qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + e); \ 509 qd[H2(off[beat])] = data; \ 510 } \ 511 } \ 512 } 513 514 #define DO_VLD2W(OP, O1, O2, O3, O4) \ 515 void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ 516 uint32_t base) \ 517 { \ 518 int beat; \ 519 uint16_t mask = mve_eci_mask(env); \ 520 static const uint8_t off[4] = { O1, O2, O3, O4 }; \ 521 uint32_t addr, data; \ 522 uint32_t *qd; \ 523 for (beat = 0; beat < 4; beat++, mask >>= 4) { \ 524 if ((mask & 1) == 0) { \ 525 /* ECI says skip this beat */ \ 526 continue; \ 527 } \ 528 addr = base + off[beat]; \ 529 data = cpu_ldl_le_data_ra(env, addr, GETPC()); \ 530 qd = (uint32_t *)aa32_vfp_qreg(env, qnidx + (beat & 1)); \ 531 qd[H4(off[beat] >> 3)] = data; \ 532 } \ 533 } 534 535 DO_VLD2B(vld20b, 0, 2, 12, 14) 536 DO_VLD2B(vld21b, 4, 6, 8, 10) 537 538 DO_VLD2H(vld20h, 0, 1, 6, 7) 539 DO_VLD2H(vld21h, 2, 3, 4, 5) 540 541 DO_VLD2W(vld20w, 0, 4, 24, 28) 542 DO_VLD2W(vld21w, 8, 12, 16, 20) 543 544 #define DO_VST4B(OP, O1, O2, O3, O4) \ 545 void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ 546 uint32_t base) \ 547 { \ 548 int beat, e; \ 549 uint16_t mask = mve_eci_mask(env); \ 550 static const uint8_t off[4] = { O1, O2, O3, O4 }; \ 551 uint32_t addr, data; \ 552 for (beat = 0; beat < 4; beat++, mask >>= 4) { \ 553 if ((mask & 1) == 0) { \ 554 /* ECI says skip this beat */ \ 555 continue; \ 556 } \ 557 addr = base + off[beat] * 4; \ 558 data = 0; \ 559 for (e = 3; e >= 0; e--) { \ 560 uint8_t *qd = (uint8_t *)aa32_vfp_qreg(env, qnidx + e); \ 561 data = (data << 8) | qd[H1(off[beat])]; \ 562 } \ 563 cpu_stl_le_data_ra(env, addr, data, GETPC()); \ 564 } \ 565 } 566 567 #define DO_VST4H(OP, O1, O2) \ 568 void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ 569 uint32_t base) \ 570 { \ 571 int beat; \ 572 uint16_t mask = mve_eci_mask(env); \ 573 static const uint8_t off[4] = { O1, O1, O2, O2 }; \ 574 uint32_t addr, data; \ 575 int y; /* y counts 0 2 0 2 */ \ 576 uint16_t *qd; \ 577 for (beat = 0, y = 0; beat < 4; beat++, mask >>= 4, y ^= 2) { \ 578 if ((mask & 1) == 0) { \ 579 /* ECI says skip this beat */ \ 580 continue; \ 581 } \ 582 addr = base + off[beat] * 8 + (beat & 1) * 4; \ 583 qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + y); \ 584 data = qd[H2(off[beat])]; \ 585 qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + y + 1); \ 586 data |= qd[H2(off[beat])] << 16; \ 587 cpu_stl_le_data_ra(env, addr, data, GETPC()); \ 588 } \ 589 } 590 591 #define DO_VST4W(OP, O1, O2, O3, O4) \ 592 void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ 593 uint32_t base) \ 594 { \ 595 int beat; \ 596 uint16_t mask = mve_eci_mask(env); \ 597 static const uint8_t off[4] = { O1, O2, O3, O4 }; \ 598 uint32_t addr, data; \ 599 uint32_t *qd; \ 600 int y; \ 601 for (beat = 0; beat < 4; beat++, mask >>= 4) { \ 602 if ((mask & 1) == 0) { \ 603 /* ECI says skip this beat */ \ 604 continue; \ 605 } \ 606 addr = base + off[beat] * 4; \ 607 y = (beat + (O1 & 2)) & 3; \ 608 qd = (uint32_t *)aa32_vfp_qreg(env, qnidx + y); \ 609 data = qd[H4(off[beat] >> 2)]; \ 610 cpu_stl_le_data_ra(env, addr, data, GETPC()); \ 611 } \ 612 } 613 614 DO_VST4B(vst40b, 0, 1, 10, 11) 615 DO_VST4B(vst41b, 2, 3, 12, 13) 616 DO_VST4B(vst42b, 4, 5, 14, 15) 617 DO_VST4B(vst43b, 6, 7, 8, 9) 618 619 DO_VST4H(vst40h, 0, 5) 620 DO_VST4H(vst41h, 1, 6) 621 DO_VST4H(vst42h, 2, 7) 622 DO_VST4H(vst43h, 3, 4) 623 624 DO_VST4W(vst40w, 0, 1, 10, 11) 625 DO_VST4W(vst41w, 2, 3, 12, 13) 626 DO_VST4W(vst42w, 4, 5, 14, 15) 627 DO_VST4W(vst43w, 6, 7, 8, 9) 628 629 #define DO_VST2B(OP, O1, O2, O3, O4) \ 630 void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ 631 uint32_t base) \ 632 { \ 633 int beat, e; \ 634 uint16_t mask = mve_eci_mask(env); \ 635 static const uint8_t off[4] = { O1, O2, O3, O4 }; \ 636 uint32_t addr, data; \ 637 uint8_t *qd; \ 638 for (beat = 0; beat < 4; beat++, mask >>= 4) { \ 639 if ((mask & 1) == 0) { \ 640 /* ECI says skip this beat */ \ 641 continue; \ 642 } \ 643 addr = base + off[beat] * 2; \ 644 data = 0; \ 645 for (e = 3; e >= 0; e--) { \ 646 qd = (uint8_t *)aa32_vfp_qreg(env, qnidx + (e & 1)); \ 647 data = (data << 8) | qd[H1(off[beat] + (e >> 1))]; \ 648 } \ 649 cpu_stl_le_data_ra(env, addr, data, GETPC()); \ 650 } \ 651 } 652 653 #define DO_VST2H(OP, O1, O2, O3, O4) \ 654 void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ 655 uint32_t base) \ 656 { \ 657 int beat; \ 658 uint16_t mask = mve_eci_mask(env); \ 659 static const uint8_t off[4] = { O1, O2, O3, O4 }; \ 660 uint32_t addr, data; \ 661 int e; \ 662 uint16_t *qd; \ 663 for (beat = 0; beat < 4; beat++, mask >>= 4) { \ 664 if ((mask & 1) == 0) { \ 665 /* ECI says skip this beat */ \ 666 continue; \ 667 } \ 668 addr = base + off[beat] * 4; \ 669 data = 0; \ 670 for (e = 1; e >= 0; e--) { \ 671 qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + e); \ 672 data = (data << 16) | qd[H2(off[beat])]; \ 673 } \ 674 cpu_stl_le_data_ra(env, addr, data, GETPC()); \ 675 } \ 676 } 677 678 #define DO_VST2W(OP, O1, O2, O3, O4) \ 679 void HELPER(mve_##OP)(CPUARMState *env, uint32_t qnidx, \ 680 uint32_t base) \ 681 { \ 682 int beat; \ 683 uint16_t mask = mve_eci_mask(env); \ 684 static const uint8_t off[4] = { O1, O2, O3, O4 }; \ 685 uint32_t addr, data; \ 686 uint32_t *qd; \ 687 for (beat = 0; beat < 4; beat++, mask >>= 4) { \ 688 if ((mask & 1) == 0) { \ 689 /* ECI says skip this beat */ \ 690 continue; \ 691 } \ 692 addr = base + off[beat]; \ 693 qd = (uint32_t *)aa32_vfp_qreg(env, qnidx + (beat & 1)); \ 694 data = qd[H4(off[beat] >> 3)]; \ 695 cpu_stl_le_data_ra(env, addr, data, GETPC()); \ 696 } \ 697 } 698 699 DO_VST2B(vst20b, 0, 2, 12, 14) 700 DO_VST2B(vst21b, 4, 6, 8, 10) 701 702 DO_VST2H(vst20h, 0, 1, 6, 7) 703 DO_VST2H(vst21h, 2, 3, 4, 5) 704 705 DO_VST2W(vst20w, 0, 4, 24, 28) 706 DO_VST2W(vst21w, 8, 12, 16, 20) 707 708 /* 709 * The mergemask(D, R, M) macro performs the operation "*D = R" but 710 * storing only the bytes which correspond to 1 bits in M, 711 * leaving other bytes in *D unchanged. We use _Generic 712 * to select the correct implementation based on the type of D. 713 */ 714 715 static void mergemask_ub(uint8_t *d, uint8_t r, uint16_t mask) 716 { 717 if (mask & 1) { 718 *d = r; 719 } 720 } 721 722 static void mergemask_sb(int8_t *d, int8_t r, uint16_t mask) 723 { 724 mergemask_ub((uint8_t *)d, r, mask); 725 } 726 727 static void mergemask_uh(uint16_t *d, uint16_t r, uint16_t mask) 728 { 729 uint16_t bmask = expand_pred_b(mask); 730 *d = (*d & ~bmask) | (r & bmask); 731 } 732 733 static void mergemask_sh(int16_t *d, int16_t r, uint16_t mask) 734 { 735 mergemask_uh((uint16_t *)d, r, mask); 736 } 737 738 static void mergemask_uw(uint32_t *d, uint32_t r, uint16_t mask) 739 { 740 uint32_t bmask = expand_pred_b(mask); 741 *d = (*d & ~bmask) | (r & bmask); 742 } 743 744 static void mergemask_sw(int32_t *d, int32_t r, uint16_t mask) 745 { 746 mergemask_uw((uint32_t *)d, r, mask); 747 } 748 749 static void mergemask_uq(uint64_t *d, uint64_t r, uint16_t mask) 750 { 751 uint64_t bmask = expand_pred_b(mask); 752 *d = (*d & ~bmask) | (r & bmask); 753 } 754 755 static void mergemask_sq(int64_t *d, int64_t r, uint16_t mask) 756 { 757 mergemask_uq((uint64_t *)d, r, mask); 758 } 759 760 #define mergemask(D, R, M) \ 761 _Generic(D, \ 762 uint8_t *: mergemask_ub, \ 763 int8_t *: mergemask_sb, \ 764 uint16_t *: mergemask_uh, \ 765 int16_t *: mergemask_sh, \ 766 uint32_t *: mergemask_uw, \ 767 int32_t *: mergemask_sw, \ 768 uint64_t *: mergemask_uq, \ 769 int64_t *: mergemask_sq)(D, R, M) 770 771 void HELPER(mve_vdup)(CPUARMState *env, void *vd, uint32_t val) 772 { 773 /* 774 * The generated code already replicated an 8 or 16 bit constant 775 * into the 32-bit value, so we only need to write the 32-bit 776 * value to all elements of the Qreg, allowing for predication. 777 */ 778 uint32_t *d = vd; 779 uint16_t mask = mve_element_mask(env); 780 unsigned e; 781 for (e = 0; e < 16 / 4; e++, mask >>= 4) { 782 mergemask(&d[H4(e)], val, mask); 783 } 784 mve_advance_vpt(env); 785 } 786 787 #define DO_1OP(OP, ESIZE, TYPE, FN) \ 788 void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \ 789 { \ 790 TYPE *d = vd, *m = vm; \ 791 uint16_t mask = mve_element_mask(env); \ 792 unsigned e; \ 793 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 794 mergemask(&d[H##ESIZE(e)], FN(m[H##ESIZE(e)]), mask); \ 795 } \ 796 mve_advance_vpt(env); \ 797 } 798 799 #define DO_CLS_B(N) (clrsb32(N) - 24) 800 #define DO_CLS_H(N) (clrsb32(N) - 16) 801 802 DO_1OP(vclsb, 1, int8_t, DO_CLS_B) 803 DO_1OP(vclsh, 2, int16_t, DO_CLS_H) 804 DO_1OP(vclsw, 4, int32_t, clrsb32) 805 806 #define DO_CLZ_B(N) (clz32(N) - 24) 807 #define DO_CLZ_H(N) (clz32(N) - 16) 808 809 DO_1OP(vclzb, 1, uint8_t, DO_CLZ_B) 810 DO_1OP(vclzh, 2, uint16_t, DO_CLZ_H) 811 DO_1OP(vclzw, 4, uint32_t, clz32) 812 813 DO_1OP(vrev16b, 2, uint16_t, bswap16) 814 DO_1OP(vrev32b, 4, uint32_t, bswap32) 815 DO_1OP(vrev32h, 4, uint32_t, hswap32) 816 DO_1OP(vrev64b, 8, uint64_t, bswap64) 817 DO_1OP(vrev64h, 8, uint64_t, hswap64) 818 DO_1OP(vrev64w, 8, uint64_t, wswap64) 819 820 #define DO_NOT(N) (~(N)) 821 822 DO_1OP(vmvn, 8, uint64_t, DO_NOT) 823 824 #define DO_ABS(N) ((N) < 0 ? -(N) : (N)) 825 #define DO_FABSH(N) ((N) & dup_const(MO_16, 0x7fff)) 826 #define DO_FABSS(N) ((N) & dup_const(MO_32, 0x7fffffff)) 827 828 DO_1OP(vabsb, 1, int8_t, DO_ABS) 829 DO_1OP(vabsh, 2, int16_t, DO_ABS) 830 DO_1OP(vabsw, 4, int32_t, DO_ABS) 831 832 /* We can do these 64 bits at a time */ 833 DO_1OP(vfabsh, 8, uint64_t, DO_FABSH) 834 DO_1OP(vfabss, 8, uint64_t, DO_FABSS) 835 836 #define DO_NEG(N) (-(N)) 837 #define DO_FNEGH(N) ((N) ^ dup_const(MO_16, 0x8000)) 838 #define DO_FNEGS(N) ((N) ^ dup_const(MO_32, 0x80000000)) 839 840 DO_1OP(vnegb, 1, int8_t, DO_NEG) 841 DO_1OP(vnegh, 2, int16_t, DO_NEG) 842 DO_1OP(vnegw, 4, int32_t, DO_NEG) 843 844 /* We can do these 64 bits at a time */ 845 DO_1OP(vfnegh, 8, uint64_t, DO_FNEGH) 846 DO_1OP(vfnegs, 8, uint64_t, DO_FNEGS) 847 848 /* 849 * 1 operand immediates: Vda is destination and possibly also one source. 850 * All these insns work at 64-bit widths. 851 */ 852 #define DO_1OP_IMM(OP, FN) \ 853 void HELPER(mve_##OP)(CPUARMState *env, void *vda, uint64_t imm) \ 854 { \ 855 uint64_t *da = vda; \ 856 uint16_t mask = mve_element_mask(env); \ 857 unsigned e; \ 858 for (e = 0; e < 16 / 8; e++, mask >>= 8) { \ 859 mergemask(&da[H8(e)], FN(da[H8(e)], imm), mask); \ 860 } \ 861 mve_advance_vpt(env); \ 862 } 863 864 #define DO_MOVI(N, I) (I) 865 #define DO_ANDI(N, I) ((N) & (I)) 866 #define DO_ORRI(N, I) ((N) | (I)) 867 868 DO_1OP_IMM(vmovi, DO_MOVI) 869 DO_1OP_IMM(vandi, DO_ANDI) 870 DO_1OP_IMM(vorri, DO_ORRI) 871 872 #define DO_2OP(OP, ESIZE, TYPE, FN) \ 873 void HELPER(glue(mve_, OP))(CPUARMState *env, \ 874 void *vd, void *vn, void *vm) \ 875 { \ 876 TYPE *d = vd, *n = vn, *m = vm; \ 877 uint16_t mask = mve_element_mask(env); \ 878 unsigned e; \ 879 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 880 mergemask(&d[H##ESIZE(e)], \ 881 FN(n[H##ESIZE(e)], m[H##ESIZE(e)]), mask); \ 882 } \ 883 mve_advance_vpt(env); \ 884 } 885 886 /* provide unsigned 2-op helpers for all sizes */ 887 #define DO_2OP_U(OP, FN) \ 888 DO_2OP(OP##b, 1, uint8_t, FN) \ 889 DO_2OP(OP##h, 2, uint16_t, FN) \ 890 DO_2OP(OP##w, 4, uint32_t, FN) 891 892 /* provide signed 2-op helpers for all sizes */ 893 #define DO_2OP_S(OP, FN) \ 894 DO_2OP(OP##b, 1, int8_t, FN) \ 895 DO_2OP(OP##h, 2, int16_t, FN) \ 896 DO_2OP(OP##w, 4, int32_t, FN) 897 898 /* 899 * "Long" operations where two half-sized inputs (taken from either the 900 * top or the bottom of the input vector) produce a double-width result. 901 * Here ESIZE, TYPE are for the input, and LESIZE, LTYPE for the output. 902 */ 903 #define DO_2OP_L(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \ 904 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, void *vm) \ 905 { \ 906 LTYPE *d = vd; \ 907 TYPE *n = vn, *m = vm; \ 908 uint16_t mask = mve_element_mask(env); \ 909 unsigned le; \ 910 for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \ 911 LTYPE r = FN((LTYPE)n[H##ESIZE(le * 2 + TOP)], \ 912 m[H##ESIZE(le * 2 + TOP)]); \ 913 mergemask(&d[H##LESIZE(le)], r, mask); \ 914 } \ 915 mve_advance_vpt(env); \ 916 } 917 918 #define DO_2OP_SAT(OP, ESIZE, TYPE, FN) \ 919 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, void *vm) \ 920 { \ 921 TYPE *d = vd, *n = vn, *m = vm; \ 922 uint16_t mask = mve_element_mask(env); \ 923 unsigned e; \ 924 bool qc = false; \ 925 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 926 bool sat = false; \ 927 TYPE r = FN(n[H##ESIZE(e)], m[H##ESIZE(e)], &sat); \ 928 mergemask(&d[H##ESIZE(e)], r, mask); \ 929 qc |= sat & mask & 1; \ 930 } \ 931 if (qc) { \ 932 env->vfp.qc[0] = qc; \ 933 } \ 934 mve_advance_vpt(env); \ 935 } 936 937 /* provide unsigned 2-op helpers for all sizes */ 938 #define DO_2OP_SAT_U(OP, FN) \ 939 DO_2OP_SAT(OP##b, 1, uint8_t, FN) \ 940 DO_2OP_SAT(OP##h, 2, uint16_t, FN) \ 941 DO_2OP_SAT(OP##w, 4, uint32_t, FN) 942 943 /* provide signed 2-op helpers for all sizes */ 944 #define DO_2OP_SAT_S(OP, FN) \ 945 DO_2OP_SAT(OP##b, 1, int8_t, FN) \ 946 DO_2OP_SAT(OP##h, 2, int16_t, FN) \ 947 DO_2OP_SAT(OP##w, 4, int32_t, FN) 948 949 #define DO_AND(N, M) ((N) & (M)) 950 #define DO_BIC(N, M) ((N) & ~(M)) 951 #define DO_ORR(N, M) ((N) | (M)) 952 #define DO_ORN(N, M) ((N) | ~(M)) 953 #define DO_EOR(N, M) ((N) ^ (M)) 954 955 DO_2OP(vand, 8, uint64_t, DO_AND) 956 DO_2OP(vbic, 8, uint64_t, DO_BIC) 957 DO_2OP(vorr, 8, uint64_t, DO_ORR) 958 DO_2OP(vorn, 8, uint64_t, DO_ORN) 959 DO_2OP(veor, 8, uint64_t, DO_EOR) 960 961 #define DO_ADD(N, M) ((N) + (M)) 962 #define DO_SUB(N, M) ((N) - (M)) 963 #define DO_MUL(N, M) ((N) * (M)) 964 965 DO_2OP_U(vadd, DO_ADD) 966 DO_2OP_U(vsub, DO_SUB) 967 DO_2OP_U(vmul, DO_MUL) 968 969 DO_2OP_L(vmullbsb, 0, 1, int8_t, 2, int16_t, DO_MUL) 970 DO_2OP_L(vmullbsh, 0, 2, int16_t, 4, int32_t, DO_MUL) 971 DO_2OP_L(vmullbsw, 0, 4, int32_t, 8, int64_t, DO_MUL) 972 DO_2OP_L(vmullbub, 0, 1, uint8_t, 2, uint16_t, DO_MUL) 973 DO_2OP_L(vmullbuh, 0, 2, uint16_t, 4, uint32_t, DO_MUL) 974 DO_2OP_L(vmullbuw, 0, 4, uint32_t, 8, uint64_t, DO_MUL) 975 976 DO_2OP_L(vmulltsb, 1, 1, int8_t, 2, int16_t, DO_MUL) 977 DO_2OP_L(vmulltsh, 1, 2, int16_t, 4, int32_t, DO_MUL) 978 DO_2OP_L(vmulltsw, 1, 4, int32_t, 8, int64_t, DO_MUL) 979 DO_2OP_L(vmulltub, 1, 1, uint8_t, 2, uint16_t, DO_MUL) 980 DO_2OP_L(vmulltuh, 1, 2, uint16_t, 4, uint32_t, DO_MUL) 981 DO_2OP_L(vmulltuw, 1, 4, uint32_t, 8, uint64_t, DO_MUL) 982 983 /* 984 * Polynomial multiply. We can always do this generating 64 bits 985 * of the result at a time, so we don't need to use DO_2OP_L. 986 */ 987 #define VMULLPH_MASK 0x00ff00ff00ff00ffULL 988 #define VMULLPW_MASK 0x0000ffff0000ffffULL 989 #define DO_VMULLPBH(N, M) pmull_h((N) & VMULLPH_MASK, (M) & VMULLPH_MASK) 990 #define DO_VMULLPTH(N, M) DO_VMULLPBH((N) >> 8, (M) >> 8) 991 #define DO_VMULLPBW(N, M) pmull_w((N) & VMULLPW_MASK, (M) & VMULLPW_MASK) 992 #define DO_VMULLPTW(N, M) DO_VMULLPBW((N) >> 16, (M) >> 16) 993 994 DO_2OP(vmullpbh, 8, uint64_t, DO_VMULLPBH) 995 DO_2OP(vmullpth, 8, uint64_t, DO_VMULLPTH) 996 DO_2OP(vmullpbw, 8, uint64_t, DO_VMULLPBW) 997 DO_2OP(vmullptw, 8, uint64_t, DO_VMULLPTW) 998 999 /* 1000 * Because the computation type is at least twice as large as required, 1001 * these work for both signed and unsigned source types. 1002 */ 1003 static inline uint8_t do_mulh_b(int32_t n, int32_t m) 1004 { 1005 return (n * m) >> 8; 1006 } 1007 1008 static inline uint16_t do_mulh_h(int32_t n, int32_t m) 1009 { 1010 return (n * m) >> 16; 1011 } 1012 1013 static inline uint32_t do_mulh_w(int64_t n, int64_t m) 1014 { 1015 return (n * m) >> 32; 1016 } 1017 1018 static inline uint8_t do_rmulh_b(int32_t n, int32_t m) 1019 { 1020 return (n * m + (1U << 7)) >> 8; 1021 } 1022 1023 static inline uint16_t do_rmulh_h(int32_t n, int32_t m) 1024 { 1025 return (n * m + (1U << 15)) >> 16; 1026 } 1027 1028 static inline uint32_t do_rmulh_w(int64_t n, int64_t m) 1029 { 1030 return (n * m + (1U << 31)) >> 32; 1031 } 1032 1033 DO_2OP(vmulhsb, 1, int8_t, do_mulh_b) 1034 DO_2OP(vmulhsh, 2, int16_t, do_mulh_h) 1035 DO_2OP(vmulhsw, 4, int32_t, do_mulh_w) 1036 DO_2OP(vmulhub, 1, uint8_t, do_mulh_b) 1037 DO_2OP(vmulhuh, 2, uint16_t, do_mulh_h) 1038 DO_2OP(vmulhuw, 4, uint32_t, do_mulh_w) 1039 1040 DO_2OP(vrmulhsb, 1, int8_t, do_rmulh_b) 1041 DO_2OP(vrmulhsh, 2, int16_t, do_rmulh_h) 1042 DO_2OP(vrmulhsw, 4, int32_t, do_rmulh_w) 1043 DO_2OP(vrmulhub, 1, uint8_t, do_rmulh_b) 1044 DO_2OP(vrmulhuh, 2, uint16_t, do_rmulh_h) 1045 DO_2OP(vrmulhuw, 4, uint32_t, do_rmulh_w) 1046 1047 #define DO_MAX(N, M) ((N) >= (M) ? (N) : (M)) 1048 #define DO_MIN(N, M) ((N) >= (M) ? (M) : (N)) 1049 1050 DO_2OP_S(vmaxs, DO_MAX) 1051 DO_2OP_U(vmaxu, DO_MAX) 1052 DO_2OP_S(vmins, DO_MIN) 1053 DO_2OP_U(vminu, DO_MIN) 1054 1055 #define DO_ABD(N, M) ((N) >= (M) ? (N) - (M) : (M) - (N)) 1056 1057 DO_2OP_S(vabds, DO_ABD) 1058 DO_2OP_U(vabdu, DO_ABD) 1059 1060 static inline uint32_t do_vhadd_u(uint32_t n, uint32_t m) 1061 { 1062 return ((uint64_t)n + m) >> 1; 1063 } 1064 1065 static inline int32_t do_vhadd_s(int32_t n, int32_t m) 1066 { 1067 return ((int64_t)n + m) >> 1; 1068 } 1069 1070 static inline uint32_t do_vhsub_u(uint32_t n, uint32_t m) 1071 { 1072 return ((uint64_t)n - m) >> 1; 1073 } 1074 1075 static inline int32_t do_vhsub_s(int32_t n, int32_t m) 1076 { 1077 return ((int64_t)n - m) >> 1; 1078 } 1079 1080 DO_2OP_S(vhadds, do_vhadd_s) 1081 DO_2OP_U(vhaddu, do_vhadd_u) 1082 DO_2OP_S(vhsubs, do_vhsub_s) 1083 DO_2OP_U(vhsubu, do_vhsub_u) 1084 1085 #define DO_VSHLS(N, M) do_sqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, false, NULL) 1086 #define DO_VSHLU(N, M) do_uqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, false, NULL) 1087 #define DO_VRSHLS(N, M) do_sqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, true, NULL) 1088 #define DO_VRSHLU(N, M) do_uqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, true, NULL) 1089 1090 DO_2OP_S(vshls, DO_VSHLS) 1091 DO_2OP_U(vshlu, DO_VSHLU) 1092 DO_2OP_S(vrshls, DO_VRSHLS) 1093 DO_2OP_U(vrshlu, DO_VRSHLU) 1094 1095 #define DO_RHADD_S(N, M) (((int64_t)(N) + (M) + 1) >> 1) 1096 #define DO_RHADD_U(N, M) (((uint64_t)(N) + (M) + 1) >> 1) 1097 1098 DO_2OP_S(vrhadds, DO_RHADD_S) 1099 DO_2OP_U(vrhaddu, DO_RHADD_U) 1100 1101 static void do_vadc(CPUARMState *env, uint32_t *d, uint32_t *n, uint32_t *m, 1102 uint32_t inv, uint32_t carry_in, bool update_flags) 1103 { 1104 uint16_t mask = mve_element_mask(env); 1105 unsigned e; 1106 1107 /* If any additions trigger, we will update flags. */ 1108 if (mask & 0x1111) { 1109 update_flags = true; 1110 } 1111 1112 for (e = 0; e < 16 / 4; e++, mask >>= 4) { 1113 uint64_t r = carry_in; 1114 r += n[H4(e)]; 1115 r += m[H4(e)] ^ inv; 1116 if (mask & 1) { 1117 carry_in = r >> 32; 1118 } 1119 mergemask(&d[H4(e)], r, mask); 1120 } 1121 1122 if (update_flags) { 1123 /* Store C, clear NZV. */ 1124 env->vfp.xregs[ARM_VFP_FPSCR] &= ~FPCR_NZCV_MASK; 1125 env->vfp.xregs[ARM_VFP_FPSCR] |= carry_in * FPCR_C; 1126 } 1127 mve_advance_vpt(env); 1128 } 1129 1130 void HELPER(mve_vadc)(CPUARMState *env, void *vd, void *vn, void *vm) 1131 { 1132 bool carry_in = env->vfp.xregs[ARM_VFP_FPSCR] & FPCR_C; 1133 do_vadc(env, vd, vn, vm, 0, carry_in, false); 1134 } 1135 1136 void HELPER(mve_vsbc)(CPUARMState *env, void *vd, void *vn, void *vm) 1137 { 1138 bool carry_in = env->vfp.xregs[ARM_VFP_FPSCR] & FPCR_C; 1139 do_vadc(env, vd, vn, vm, -1, carry_in, false); 1140 } 1141 1142 1143 void HELPER(mve_vadci)(CPUARMState *env, void *vd, void *vn, void *vm) 1144 { 1145 do_vadc(env, vd, vn, vm, 0, 0, true); 1146 } 1147 1148 void HELPER(mve_vsbci)(CPUARMState *env, void *vd, void *vn, void *vm) 1149 { 1150 do_vadc(env, vd, vn, vm, -1, 1, true); 1151 } 1152 1153 #define DO_VCADD(OP, ESIZE, TYPE, FN0, FN1) \ 1154 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, void *vm) \ 1155 { \ 1156 TYPE *d = vd, *n = vn, *m = vm; \ 1157 uint16_t mask = mve_element_mask(env); \ 1158 unsigned e; \ 1159 TYPE r[16 / ESIZE]; \ 1160 /* Calculate all results first to avoid overwriting inputs */ \ 1161 for (e = 0; e < 16 / ESIZE; e++) { \ 1162 if (!(e & 1)) { \ 1163 r[e] = FN0(n[H##ESIZE(e)], m[H##ESIZE(e + 1)]); \ 1164 } else { \ 1165 r[e] = FN1(n[H##ESIZE(e)], m[H##ESIZE(e - 1)]); \ 1166 } \ 1167 } \ 1168 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 1169 mergemask(&d[H##ESIZE(e)], r[e], mask); \ 1170 } \ 1171 mve_advance_vpt(env); \ 1172 } 1173 1174 #define DO_VCADD_ALL(OP, FN0, FN1) \ 1175 DO_VCADD(OP##b, 1, int8_t, FN0, FN1) \ 1176 DO_VCADD(OP##h, 2, int16_t, FN0, FN1) \ 1177 DO_VCADD(OP##w, 4, int32_t, FN0, FN1) 1178 1179 DO_VCADD_ALL(vcadd90, DO_SUB, DO_ADD) 1180 DO_VCADD_ALL(vcadd270, DO_ADD, DO_SUB) 1181 DO_VCADD_ALL(vhcadd90, do_vhsub_s, do_vhadd_s) 1182 DO_VCADD_ALL(vhcadd270, do_vhadd_s, do_vhsub_s) 1183 1184 static inline int32_t do_sat_bhw(int64_t val, int64_t min, int64_t max, bool *s) 1185 { 1186 if (val > max) { 1187 *s = true; 1188 return max; 1189 } else if (val < min) { 1190 *s = true; 1191 return min; 1192 } 1193 return val; 1194 } 1195 1196 #define DO_SQADD_B(n, m, s) do_sat_bhw((int64_t)n + m, INT8_MIN, INT8_MAX, s) 1197 #define DO_SQADD_H(n, m, s) do_sat_bhw((int64_t)n + m, INT16_MIN, INT16_MAX, s) 1198 #define DO_SQADD_W(n, m, s) do_sat_bhw((int64_t)n + m, INT32_MIN, INT32_MAX, s) 1199 1200 #define DO_UQADD_B(n, m, s) do_sat_bhw((int64_t)n + m, 0, UINT8_MAX, s) 1201 #define DO_UQADD_H(n, m, s) do_sat_bhw((int64_t)n + m, 0, UINT16_MAX, s) 1202 #define DO_UQADD_W(n, m, s) do_sat_bhw((int64_t)n + m, 0, UINT32_MAX, s) 1203 1204 #define DO_SQSUB_B(n, m, s) do_sat_bhw((int64_t)n - m, INT8_MIN, INT8_MAX, s) 1205 #define DO_SQSUB_H(n, m, s) do_sat_bhw((int64_t)n - m, INT16_MIN, INT16_MAX, s) 1206 #define DO_SQSUB_W(n, m, s) do_sat_bhw((int64_t)n - m, INT32_MIN, INT32_MAX, s) 1207 1208 #define DO_UQSUB_B(n, m, s) do_sat_bhw((int64_t)n - m, 0, UINT8_MAX, s) 1209 #define DO_UQSUB_H(n, m, s) do_sat_bhw((int64_t)n - m, 0, UINT16_MAX, s) 1210 #define DO_UQSUB_W(n, m, s) do_sat_bhw((int64_t)n - m, 0, UINT32_MAX, s) 1211 1212 /* 1213 * For QDMULH and QRDMULH we simplify "double and shift by esize" into 1214 * "shift by esize-1", adjusting the QRDMULH rounding constant to match. 1215 */ 1216 #define DO_QDMULH_B(n, m, s) do_sat_bhw(((int64_t)n * m) >> 7, \ 1217 INT8_MIN, INT8_MAX, s) 1218 #define DO_QDMULH_H(n, m, s) do_sat_bhw(((int64_t)n * m) >> 15, \ 1219 INT16_MIN, INT16_MAX, s) 1220 #define DO_QDMULH_W(n, m, s) do_sat_bhw(((int64_t)n * m) >> 31, \ 1221 INT32_MIN, INT32_MAX, s) 1222 1223 #define DO_QRDMULH_B(n, m, s) do_sat_bhw(((int64_t)n * m + (1 << 6)) >> 7, \ 1224 INT8_MIN, INT8_MAX, s) 1225 #define DO_QRDMULH_H(n, m, s) do_sat_bhw(((int64_t)n * m + (1 << 14)) >> 15, \ 1226 INT16_MIN, INT16_MAX, s) 1227 #define DO_QRDMULH_W(n, m, s) do_sat_bhw(((int64_t)n * m + (1 << 30)) >> 31, \ 1228 INT32_MIN, INT32_MAX, s) 1229 1230 DO_2OP_SAT(vqdmulhb, 1, int8_t, DO_QDMULH_B) 1231 DO_2OP_SAT(vqdmulhh, 2, int16_t, DO_QDMULH_H) 1232 DO_2OP_SAT(vqdmulhw, 4, int32_t, DO_QDMULH_W) 1233 1234 DO_2OP_SAT(vqrdmulhb, 1, int8_t, DO_QRDMULH_B) 1235 DO_2OP_SAT(vqrdmulhh, 2, int16_t, DO_QRDMULH_H) 1236 DO_2OP_SAT(vqrdmulhw, 4, int32_t, DO_QRDMULH_W) 1237 1238 DO_2OP_SAT(vqaddub, 1, uint8_t, DO_UQADD_B) 1239 DO_2OP_SAT(vqadduh, 2, uint16_t, DO_UQADD_H) 1240 DO_2OP_SAT(vqadduw, 4, uint32_t, DO_UQADD_W) 1241 DO_2OP_SAT(vqaddsb, 1, int8_t, DO_SQADD_B) 1242 DO_2OP_SAT(vqaddsh, 2, int16_t, DO_SQADD_H) 1243 DO_2OP_SAT(vqaddsw, 4, int32_t, DO_SQADD_W) 1244 1245 DO_2OP_SAT(vqsubub, 1, uint8_t, DO_UQSUB_B) 1246 DO_2OP_SAT(vqsubuh, 2, uint16_t, DO_UQSUB_H) 1247 DO_2OP_SAT(vqsubuw, 4, uint32_t, DO_UQSUB_W) 1248 DO_2OP_SAT(vqsubsb, 1, int8_t, DO_SQSUB_B) 1249 DO_2OP_SAT(vqsubsh, 2, int16_t, DO_SQSUB_H) 1250 DO_2OP_SAT(vqsubsw, 4, int32_t, DO_SQSUB_W) 1251 1252 /* 1253 * This wrapper fixes up the impedance mismatch between do_sqrshl_bhs() 1254 * and friends wanting a uint32_t* sat and our needing a bool*. 1255 */ 1256 #define WRAP_QRSHL_HELPER(FN, N, M, ROUND, satp) \ 1257 ({ \ 1258 uint32_t su32 = 0; \ 1259 typeof(N) r = FN(N, (int8_t)(M), sizeof(N) * 8, ROUND, &su32); \ 1260 if (su32) { \ 1261 *satp = true; \ 1262 } \ 1263 r; \ 1264 }) 1265 1266 #define DO_SQSHL_OP(N, M, satp) \ 1267 WRAP_QRSHL_HELPER(do_sqrshl_bhs, N, M, false, satp) 1268 #define DO_UQSHL_OP(N, M, satp) \ 1269 WRAP_QRSHL_HELPER(do_uqrshl_bhs, N, M, false, satp) 1270 #define DO_SQRSHL_OP(N, M, satp) \ 1271 WRAP_QRSHL_HELPER(do_sqrshl_bhs, N, M, true, satp) 1272 #define DO_UQRSHL_OP(N, M, satp) \ 1273 WRAP_QRSHL_HELPER(do_uqrshl_bhs, N, M, true, satp) 1274 #define DO_SUQSHL_OP(N, M, satp) \ 1275 WRAP_QRSHL_HELPER(do_suqrshl_bhs, N, M, false, satp) 1276 1277 DO_2OP_SAT_S(vqshls, DO_SQSHL_OP) 1278 DO_2OP_SAT_U(vqshlu, DO_UQSHL_OP) 1279 DO_2OP_SAT_S(vqrshls, DO_SQRSHL_OP) 1280 DO_2OP_SAT_U(vqrshlu, DO_UQRSHL_OP) 1281 1282 /* 1283 * Multiply add dual returning high half 1284 * The 'FN' here takes four inputs A, B, C, D, a 0/1 indicator of 1285 * whether to add the rounding constant, and the pointer to the 1286 * saturation flag, and should do "(A * B + C * D) * 2 + rounding constant", 1287 * saturate to twice the input size and return the high half; or 1288 * (A * B - C * D) etc for VQDMLSDH. 1289 */ 1290 #define DO_VQDMLADH_OP(OP, ESIZE, TYPE, XCHG, ROUND, FN) \ 1291 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \ 1292 void *vm) \ 1293 { \ 1294 TYPE *d = vd, *n = vn, *m = vm; \ 1295 uint16_t mask = mve_element_mask(env); \ 1296 unsigned e; \ 1297 bool qc = false; \ 1298 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 1299 bool sat = false; \ 1300 if ((e & 1) == XCHG) { \ 1301 TYPE r = FN(n[H##ESIZE(e)], \ 1302 m[H##ESIZE(e - XCHG)], \ 1303 n[H##ESIZE(e + (1 - 2 * XCHG))], \ 1304 m[H##ESIZE(e + (1 - XCHG))], \ 1305 ROUND, &sat); \ 1306 mergemask(&d[H##ESIZE(e)], r, mask); \ 1307 qc |= sat & mask & 1; \ 1308 } \ 1309 } \ 1310 if (qc) { \ 1311 env->vfp.qc[0] = qc; \ 1312 } \ 1313 mve_advance_vpt(env); \ 1314 } 1315 1316 static int8_t do_vqdmladh_b(int8_t a, int8_t b, int8_t c, int8_t d, 1317 int round, bool *sat) 1318 { 1319 int64_t r = ((int64_t)a * b + (int64_t)c * d) * 2 + (round << 7); 1320 return do_sat_bhw(r, INT16_MIN, INT16_MAX, sat) >> 8; 1321 } 1322 1323 static int16_t do_vqdmladh_h(int16_t a, int16_t b, int16_t c, int16_t d, 1324 int round, bool *sat) 1325 { 1326 int64_t r = ((int64_t)a * b + (int64_t)c * d) * 2 + (round << 15); 1327 return do_sat_bhw(r, INT32_MIN, INT32_MAX, sat) >> 16; 1328 } 1329 1330 static int32_t do_vqdmladh_w(int32_t a, int32_t b, int32_t c, int32_t d, 1331 int round, bool *sat) 1332 { 1333 int64_t m1 = (int64_t)a * b; 1334 int64_t m2 = (int64_t)c * d; 1335 int64_t r; 1336 /* 1337 * Architecturally we should do the entire add, double, round 1338 * and then check for saturation. We do three saturating adds, 1339 * but we need to be careful about the order. If the first 1340 * m1 + m2 saturates then it's impossible for the *2+rc to 1341 * bring it back into the non-saturated range. However, if 1342 * m1 + m2 is negative then it's possible that doing the doubling 1343 * would take the intermediate result below INT64_MAX and the 1344 * addition of the rounding constant then brings it back in range. 1345 * So we add half the rounding constant before doubling rather 1346 * than adding the rounding constant after the doubling. 1347 */ 1348 if (sadd64_overflow(m1, m2, &r) || 1349 sadd64_overflow(r, (round << 30), &r) || 1350 sadd64_overflow(r, r, &r)) { 1351 *sat = true; 1352 return r < 0 ? INT32_MAX : INT32_MIN; 1353 } 1354 return r >> 32; 1355 } 1356 1357 static int8_t do_vqdmlsdh_b(int8_t a, int8_t b, int8_t c, int8_t d, 1358 int round, bool *sat) 1359 { 1360 int64_t r = ((int64_t)a * b - (int64_t)c * d) * 2 + (round << 7); 1361 return do_sat_bhw(r, INT16_MIN, INT16_MAX, sat) >> 8; 1362 } 1363 1364 static int16_t do_vqdmlsdh_h(int16_t a, int16_t b, int16_t c, int16_t d, 1365 int round, bool *sat) 1366 { 1367 int64_t r = ((int64_t)a * b - (int64_t)c * d) * 2 + (round << 15); 1368 return do_sat_bhw(r, INT32_MIN, INT32_MAX, sat) >> 16; 1369 } 1370 1371 static int32_t do_vqdmlsdh_w(int32_t a, int32_t b, int32_t c, int32_t d, 1372 int round, bool *sat) 1373 { 1374 int64_t m1 = (int64_t)a * b; 1375 int64_t m2 = (int64_t)c * d; 1376 int64_t r; 1377 /* The same ordering issue as in do_vqdmladh_w applies here too */ 1378 if (ssub64_overflow(m1, m2, &r) || 1379 sadd64_overflow(r, (round << 30), &r) || 1380 sadd64_overflow(r, r, &r)) { 1381 *sat = true; 1382 return r < 0 ? INT32_MAX : INT32_MIN; 1383 } 1384 return r >> 32; 1385 } 1386 1387 DO_VQDMLADH_OP(vqdmladhb, 1, int8_t, 0, 0, do_vqdmladh_b) 1388 DO_VQDMLADH_OP(vqdmladhh, 2, int16_t, 0, 0, do_vqdmladh_h) 1389 DO_VQDMLADH_OP(vqdmladhw, 4, int32_t, 0, 0, do_vqdmladh_w) 1390 DO_VQDMLADH_OP(vqdmladhxb, 1, int8_t, 1, 0, do_vqdmladh_b) 1391 DO_VQDMLADH_OP(vqdmladhxh, 2, int16_t, 1, 0, do_vqdmladh_h) 1392 DO_VQDMLADH_OP(vqdmladhxw, 4, int32_t, 1, 0, do_vqdmladh_w) 1393 1394 DO_VQDMLADH_OP(vqrdmladhb, 1, int8_t, 0, 1, do_vqdmladh_b) 1395 DO_VQDMLADH_OP(vqrdmladhh, 2, int16_t, 0, 1, do_vqdmladh_h) 1396 DO_VQDMLADH_OP(vqrdmladhw, 4, int32_t, 0, 1, do_vqdmladh_w) 1397 DO_VQDMLADH_OP(vqrdmladhxb, 1, int8_t, 1, 1, do_vqdmladh_b) 1398 DO_VQDMLADH_OP(vqrdmladhxh, 2, int16_t, 1, 1, do_vqdmladh_h) 1399 DO_VQDMLADH_OP(vqrdmladhxw, 4, int32_t, 1, 1, do_vqdmladh_w) 1400 1401 DO_VQDMLADH_OP(vqdmlsdhb, 1, int8_t, 0, 0, do_vqdmlsdh_b) 1402 DO_VQDMLADH_OP(vqdmlsdhh, 2, int16_t, 0, 0, do_vqdmlsdh_h) 1403 DO_VQDMLADH_OP(vqdmlsdhw, 4, int32_t, 0, 0, do_vqdmlsdh_w) 1404 DO_VQDMLADH_OP(vqdmlsdhxb, 1, int8_t, 1, 0, do_vqdmlsdh_b) 1405 DO_VQDMLADH_OP(vqdmlsdhxh, 2, int16_t, 1, 0, do_vqdmlsdh_h) 1406 DO_VQDMLADH_OP(vqdmlsdhxw, 4, int32_t, 1, 0, do_vqdmlsdh_w) 1407 1408 DO_VQDMLADH_OP(vqrdmlsdhb, 1, int8_t, 0, 1, do_vqdmlsdh_b) 1409 DO_VQDMLADH_OP(vqrdmlsdhh, 2, int16_t, 0, 1, do_vqdmlsdh_h) 1410 DO_VQDMLADH_OP(vqrdmlsdhw, 4, int32_t, 0, 1, do_vqdmlsdh_w) 1411 DO_VQDMLADH_OP(vqrdmlsdhxb, 1, int8_t, 1, 1, do_vqdmlsdh_b) 1412 DO_VQDMLADH_OP(vqrdmlsdhxh, 2, int16_t, 1, 1, do_vqdmlsdh_h) 1413 DO_VQDMLADH_OP(vqrdmlsdhxw, 4, int32_t, 1, 1, do_vqdmlsdh_w) 1414 1415 #define DO_2OP_SCALAR(OP, ESIZE, TYPE, FN) \ 1416 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \ 1417 uint32_t rm) \ 1418 { \ 1419 TYPE *d = vd, *n = vn; \ 1420 TYPE m = rm; \ 1421 uint16_t mask = mve_element_mask(env); \ 1422 unsigned e; \ 1423 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 1424 mergemask(&d[H##ESIZE(e)], FN(n[H##ESIZE(e)], m), mask); \ 1425 } \ 1426 mve_advance_vpt(env); \ 1427 } 1428 1429 #define DO_2OP_SAT_SCALAR(OP, ESIZE, TYPE, FN) \ 1430 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \ 1431 uint32_t rm) \ 1432 { \ 1433 TYPE *d = vd, *n = vn; \ 1434 TYPE m = rm; \ 1435 uint16_t mask = mve_element_mask(env); \ 1436 unsigned e; \ 1437 bool qc = false; \ 1438 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 1439 bool sat = false; \ 1440 mergemask(&d[H##ESIZE(e)], FN(n[H##ESIZE(e)], m, &sat), \ 1441 mask); \ 1442 qc |= sat & mask & 1; \ 1443 } \ 1444 if (qc) { \ 1445 env->vfp.qc[0] = qc; \ 1446 } \ 1447 mve_advance_vpt(env); \ 1448 } 1449 1450 /* "accumulating" version where FN takes d as well as n and m */ 1451 #define DO_2OP_ACC_SCALAR(OP, ESIZE, TYPE, FN) \ 1452 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \ 1453 uint32_t rm) \ 1454 { \ 1455 TYPE *d = vd, *n = vn; \ 1456 TYPE m = rm; \ 1457 uint16_t mask = mve_element_mask(env); \ 1458 unsigned e; \ 1459 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 1460 mergemask(&d[H##ESIZE(e)], \ 1461 FN(d[H##ESIZE(e)], n[H##ESIZE(e)], m), mask); \ 1462 } \ 1463 mve_advance_vpt(env); \ 1464 } 1465 1466 #define DO_2OP_SAT_ACC_SCALAR(OP, ESIZE, TYPE, FN) \ 1467 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \ 1468 uint32_t rm) \ 1469 { \ 1470 TYPE *d = vd, *n = vn; \ 1471 TYPE m = rm; \ 1472 uint16_t mask = mve_element_mask(env); \ 1473 unsigned e; \ 1474 bool qc = false; \ 1475 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 1476 bool sat = false; \ 1477 mergemask(&d[H##ESIZE(e)], \ 1478 FN(d[H##ESIZE(e)], n[H##ESIZE(e)], m, &sat), \ 1479 mask); \ 1480 qc |= sat & mask & 1; \ 1481 } \ 1482 if (qc) { \ 1483 env->vfp.qc[0] = qc; \ 1484 } \ 1485 mve_advance_vpt(env); \ 1486 } 1487 1488 /* provide unsigned 2-op scalar helpers for all sizes */ 1489 #define DO_2OP_SCALAR_U(OP, FN) \ 1490 DO_2OP_SCALAR(OP##b, 1, uint8_t, FN) \ 1491 DO_2OP_SCALAR(OP##h, 2, uint16_t, FN) \ 1492 DO_2OP_SCALAR(OP##w, 4, uint32_t, FN) 1493 #define DO_2OP_SCALAR_S(OP, FN) \ 1494 DO_2OP_SCALAR(OP##b, 1, int8_t, FN) \ 1495 DO_2OP_SCALAR(OP##h, 2, int16_t, FN) \ 1496 DO_2OP_SCALAR(OP##w, 4, int32_t, FN) 1497 1498 #define DO_2OP_ACC_SCALAR_U(OP, FN) \ 1499 DO_2OP_ACC_SCALAR(OP##b, 1, uint8_t, FN) \ 1500 DO_2OP_ACC_SCALAR(OP##h, 2, uint16_t, FN) \ 1501 DO_2OP_ACC_SCALAR(OP##w, 4, uint32_t, FN) 1502 1503 DO_2OP_SCALAR_U(vadd_scalar, DO_ADD) 1504 DO_2OP_SCALAR_U(vsub_scalar, DO_SUB) 1505 DO_2OP_SCALAR_U(vmul_scalar, DO_MUL) 1506 DO_2OP_SCALAR_S(vhadds_scalar, do_vhadd_s) 1507 DO_2OP_SCALAR_U(vhaddu_scalar, do_vhadd_u) 1508 DO_2OP_SCALAR_S(vhsubs_scalar, do_vhsub_s) 1509 DO_2OP_SCALAR_U(vhsubu_scalar, do_vhsub_u) 1510 1511 DO_2OP_SAT_SCALAR(vqaddu_scalarb, 1, uint8_t, DO_UQADD_B) 1512 DO_2OP_SAT_SCALAR(vqaddu_scalarh, 2, uint16_t, DO_UQADD_H) 1513 DO_2OP_SAT_SCALAR(vqaddu_scalarw, 4, uint32_t, DO_UQADD_W) 1514 DO_2OP_SAT_SCALAR(vqadds_scalarb, 1, int8_t, DO_SQADD_B) 1515 DO_2OP_SAT_SCALAR(vqadds_scalarh, 2, int16_t, DO_SQADD_H) 1516 DO_2OP_SAT_SCALAR(vqadds_scalarw, 4, int32_t, DO_SQADD_W) 1517 1518 DO_2OP_SAT_SCALAR(vqsubu_scalarb, 1, uint8_t, DO_UQSUB_B) 1519 DO_2OP_SAT_SCALAR(vqsubu_scalarh, 2, uint16_t, DO_UQSUB_H) 1520 DO_2OP_SAT_SCALAR(vqsubu_scalarw, 4, uint32_t, DO_UQSUB_W) 1521 DO_2OP_SAT_SCALAR(vqsubs_scalarb, 1, int8_t, DO_SQSUB_B) 1522 DO_2OP_SAT_SCALAR(vqsubs_scalarh, 2, int16_t, DO_SQSUB_H) 1523 DO_2OP_SAT_SCALAR(vqsubs_scalarw, 4, int32_t, DO_SQSUB_W) 1524 1525 DO_2OP_SAT_SCALAR(vqdmulh_scalarb, 1, int8_t, DO_QDMULH_B) 1526 DO_2OP_SAT_SCALAR(vqdmulh_scalarh, 2, int16_t, DO_QDMULH_H) 1527 DO_2OP_SAT_SCALAR(vqdmulh_scalarw, 4, int32_t, DO_QDMULH_W) 1528 DO_2OP_SAT_SCALAR(vqrdmulh_scalarb, 1, int8_t, DO_QRDMULH_B) 1529 DO_2OP_SAT_SCALAR(vqrdmulh_scalarh, 2, int16_t, DO_QRDMULH_H) 1530 DO_2OP_SAT_SCALAR(vqrdmulh_scalarw, 4, int32_t, DO_QRDMULH_W) 1531 1532 static int8_t do_vqdmlah_b(int8_t a, int8_t b, int8_t c, int round, bool *sat) 1533 { 1534 int64_t r = (int64_t)a * b * 2 + ((int64_t)c << 8) + (round << 7); 1535 return do_sat_bhw(r, INT16_MIN, INT16_MAX, sat) >> 8; 1536 } 1537 1538 static int16_t do_vqdmlah_h(int16_t a, int16_t b, int16_t c, 1539 int round, bool *sat) 1540 { 1541 int64_t r = (int64_t)a * b * 2 + ((int64_t)c << 16) + (round << 15); 1542 return do_sat_bhw(r, INT32_MIN, INT32_MAX, sat) >> 16; 1543 } 1544 1545 static int32_t do_vqdmlah_w(int32_t a, int32_t b, int32_t c, 1546 int round, bool *sat) 1547 { 1548 /* 1549 * Architecturally we should do the entire add, double, round 1550 * and then check for saturation. We do three saturating adds, 1551 * but we need to be careful about the order. If the first 1552 * m1 + m2 saturates then it's impossible for the *2+rc to 1553 * bring it back into the non-saturated range. However, if 1554 * m1 + m2 is negative then it's possible that doing the doubling 1555 * would take the intermediate result below INT64_MAX and the 1556 * addition of the rounding constant then brings it back in range. 1557 * So we add half the rounding constant and half the "c << esize" 1558 * before doubling rather than adding the rounding constant after 1559 * the doubling. 1560 */ 1561 int64_t m1 = (int64_t)a * b; 1562 int64_t m2 = (int64_t)c << 31; 1563 int64_t r; 1564 if (sadd64_overflow(m1, m2, &r) || 1565 sadd64_overflow(r, (round << 30), &r) || 1566 sadd64_overflow(r, r, &r)) { 1567 *sat = true; 1568 return r < 0 ? INT32_MAX : INT32_MIN; 1569 } 1570 return r >> 32; 1571 } 1572 1573 /* 1574 * The *MLAH insns are vector * scalar + vector; 1575 * the *MLASH insns are vector * vector + scalar 1576 */ 1577 #define DO_VQDMLAH_B(D, N, M, S) do_vqdmlah_b(N, M, D, 0, S) 1578 #define DO_VQDMLAH_H(D, N, M, S) do_vqdmlah_h(N, M, D, 0, S) 1579 #define DO_VQDMLAH_W(D, N, M, S) do_vqdmlah_w(N, M, D, 0, S) 1580 #define DO_VQRDMLAH_B(D, N, M, S) do_vqdmlah_b(N, M, D, 1, S) 1581 #define DO_VQRDMLAH_H(D, N, M, S) do_vqdmlah_h(N, M, D, 1, S) 1582 #define DO_VQRDMLAH_W(D, N, M, S) do_vqdmlah_w(N, M, D, 1, S) 1583 1584 #define DO_VQDMLASH_B(D, N, M, S) do_vqdmlah_b(N, D, M, 0, S) 1585 #define DO_VQDMLASH_H(D, N, M, S) do_vqdmlah_h(N, D, M, 0, S) 1586 #define DO_VQDMLASH_W(D, N, M, S) do_vqdmlah_w(N, D, M, 0, S) 1587 #define DO_VQRDMLASH_B(D, N, M, S) do_vqdmlah_b(N, D, M, 1, S) 1588 #define DO_VQRDMLASH_H(D, N, M, S) do_vqdmlah_h(N, D, M, 1, S) 1589 #define DO_VQRDMLASH_W(D, N, M, S) do_vqdmlah_w(N, D, M, 1, S) 1590 1591 DO_2OP_SAT_ACC_SCALAR(vqdmlahb, 1, int8_t, DO_VQDMLAH_B) 1592 DO_2OP_SAT_ACC_SCALAR(vqdmlahh, 2, int16_t, DO_VQDMLAH_H) 1593 DO_2OP_SAT_ACC_SCALAR(vqdmlahw, 4, int32_t, DO_VQDMLAH_W) 1594 DO_2OP_SAT_ACC_SCALAR(vqrdmlahb, 1, int8_t, DO_VQRDMLAH_B) 1595 DO_2OP_SAT_ACC_SCALAR(vqrdmlahh, 2, int16_t, DO_VQRDMLAH_H) 1596 DO_2OP_SAT_ACC_SCALAR(vqrdmlahw, 4, int32_t, DO_VQRDMLAH_W) 1597 1598 DO_2OP_SAT_ACC_SCALAR(vqdmlashb, 1, int8_t, DO_VQDMLASH_B) 1599 DO_2OP_SAT_ACC_SCALAR(vqdmlashh, 2, int16_t, DO_VQDMLASH_H) 1600 DO_2OP_SAT_ACC_SCALAR(vqdmlashw, 4, int32_t, DO_VQDMLASH_W) 1601 DO_2OP_SAT_ACC_SCALAR(vqrdmlashb, 1, int8_t, DO_VQRDMLASH_B) 1602 DO_2OP_SAT_ACC_SCALAR(vqrdmlashh, 2, int16_t, DO_VQRDMLASH_H) 1603 DO_2OP_SAT_ACC_SCALAR(vqrdmlashw, 4, int32_t, DO_VQRDMLASH_W) 1604 1605 /* Vector by scalar plus vector */ 1606 #define DO_VMLA(D, N, M) ((N) * (M) + (D)) 1607 1608 DO_2OP_ACC_SCALAR_U(vmla, DO_VMLA) 1609 1610 /* Vector by vector plus scalar */ 1611 #define DO_VMLAS(D, N, M) ((N) * (D) + (M)) 1612 1613 DO_2OP_ACC_SCALAR_U(vmlas, DO_VMLAS) 1614 1615 /* 1616 * Long saturating scalar ops. As with DO_2OP_L, TYPE and H are for the 1617 * input (smaller) type and LESIZE, LTYPE, LH for the output (long) type. 1618 * SATMASK specifies which bits of the predicate mask matter for determining 1619 * whether to propagate a saturation indication into FPSCR.QC -- for 1620 * the 16x16->32 case we must check only the bit corresponding to the T or B 1621 * half that we used, but for the 32x32->64 case we propagate if the mask 1622 * bit is set for either half. 1623 */ 1624 #define DO_2OP_SAT_SCALAR_L(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN, SATMASK) \ 1625 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \ 1626 uint32_t rm) \ 1627 { \ 1628 LTYPE *d = vd; \ 1629 TYPE *n = vn; \ 1630 TYPE m = rm; \ 1631 uint16_t mask = mve_element_mask(env); \ 1632 unsigned le; \ 1633 bool qc = false; \ 1634 for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \ 1635 bool sat = false; \ 1636 LTYPE r = FN((LTYPE)n[H##ESIZE(le * 2 + TOP)], m, &sat); \ 1637 mergemask(&d[H##LESIZE(le)], r, mask); \ 1638 qc |= sat && (mask & SATMASK); \ 1639 } \ 1640 if (qc) { \ 1641 env->vfp.qc[0] = qc; \ 1642 } \ 1643 mve_advance_vpt(env); \ 1644 } 1645 1646 static inline int32_t do_qdmullh(int16_t n, int16_t m, bool *sat) 1647 { 1648 int64_t r = ((int64_t)n * m) * 2; 1649 return do_sat_bhw(r, INT32_MIN, INT32_MAX, sat); 1650 } 1651 1652 static inline int64_t do_qdmullw(int32_t n, int32_t m, bool *sat) 1653 { 1654 /* The multiply can't overflow, but the doubling might */ 1655 int64_t r = (int64_t)n * m; 1656 if (r > INT64_MAX / 2) { 1657 *sat = true; 1658 return INT64_MAX; 1659 } else if (r < INT64_MIN / 2) { 1660 *sat = true; 1661 return INT64_MIN; 1662 } else { 1663 return r * 2; 1664 } 1665 } 1666 1667 #define SATMASK16B 1 1668 #define SATMASK16T (1 << 2) 1669 #define SATMASK32 ((1 << 4) | 1) 1670 1671 DO_2OP_SAT_SCALAR_L(vqdmullb_scalarh, 0, 2, int16_t, 4, int32_t, \ 1672 do_qdmullh, SATMASK16B) 1673 DO_2OP_SAT_SCALAR_L(vqdmullb_scalarw, 0, 4, int32_t, 8, int64_t, \ 1674 do_qdmullw, SATMASK32) 1675 DO_2OP_SAT_SCALAR_L(vqdmullt_scalarh, 1, 2, int16_t, 4, int32_t, \ 1676 do_qdmullh, SATMASK16T) 1677 DO_2OP_SAT_SCALAR_L(vqdmullt_scalarw, 1, 4, int32_t, 8, int64_t, \ 1678 do_qdmullw, SATMASK32) 1679 1680 /* 1681 * Long saturating ops 1682 */ 1683 #define DO_2OP_SAT_L(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN, SATMASK) \ 1684 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \ 1685 void *vm) \ 1686 { \ 1687 LTYPE *d = vd; \ 1688 TYPE *n = vn, *m = vm; \ 1689 uint16_t mask = mve_element_mask(env); \ 1690 unsigned le; \ 1691 bool qc = false; \ 1692 for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \ 1693 bool sat = false; \ 1694 LTYPE op1 = n[H##ESIZE(le * 2 + TOP)]; \ 1695 LTYPE op2 = m[H##ESIZE(le * 2 + TOP)]; \ 1696 mergemask(&d[H##LESIZE(le)], FN(op1, op2, &sat), mask); \ 1697 qc |= sat && (mask & SATMASK); \ 1698 } \ 1699 if (qc) { \ 1700 env->vfp.qc[0] = qc; \ 1701 } \ 1702 mve_advance_vpt(env); \ 1703 } 1704 1705 DO_2OP_SAT_L(vqdmullbh, 0, 2, int16_t, 4, int32_t, do_qdmullh, SATMASK16B) 1706 DO_2OP_SAT_L(vqdmullbw, 0, 4, int32_t, 8, int64_t, do_qdmullw, SATMASK32) 1707 DO_2OP_SAT_L(vqdmullth, 1, 2, int16_t, 4, int32_t, do_qdmullh, SATMASK16T) 1708 DO_2OP_SAT_L(vqdmulltw, 1, 4, int32_t, 8, int64_t, do_qdmullw, SATMASK32) 1709 1710 static inline uint32_t do_vbrsrb(uint32_t n, uint32_t m) 1711 { 1712 m &= 0xff; 1713 if (m == 0) { 1714 return 0; 1715 } 1716 n = revbit8(n); 1717 if (m < 8) { 1718 n >>= 8 - m; 1719 } 1720 return n; 1721 } 1722 1723 static inline uint32_t do_vbrsrh(uint32_t n, uint32_t m) 1724 { 1725 m &= 0xff; 1726 if (m == 0) { 1727 return 0; 1728 } 1729 n = revbit16(n); 1730 if (m < 16) { 1731 n >>= 16 - m; 1732 } 1733 return n; 1734 } 1735 1736 static inline uint32_t do_vbrsrw(uint32_t n, uint32_t m) 1737 { 1738 m &= 0xff; 1739 if (m == 0) { 1740 return 0; 1741 } 1742 n = revbit32(n); 1743 if (m < 32) { 1744 n >>= 32 - m; 1745 } 1746 return n; 1747 } 1748 1749 DO_2OP_SCALAR(vbrsrb, 1, uint8_t, do_vbrsrb) 1750 DO_2OP_SCALAR(vbrsrh, 2, uint16_t, do_vbrsrh) 1751 DO_2OP_SCALAR(vbrsrw, 4, uint32_t, do_vbrsrw) 1752 1753 /* 1754 * Multiply add long dual accumulate ops. 1755 */ 1756 #define DO_LDAV(OP, ESIZE, TYPE, XCHG, EVENACC, ODDACC) \ 1757 uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \ 1758 void *vm, uint64_t a) \ 1759 { \ 1760 uint16_t mask = mve_element_mask(env); \ 1761 unsigned e; \ 1762 TYPE *n = vn, *m = vm; \ 1763 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 1764 if (mask & 1) { \ 1765 if (e & 1) { \ 1766 a ODDACC \ 1767 (int64_t)n[H##ESIZE(e - 1 * XCHG)] * m[H##ESIZE(e)]; \ 1768 } else { \ 1769 a EVENACC \ 1770 (int64_t)n[H##ESIZE(e + 1 * XCHG)] * m[H##ESIZE(e)]; \ 1771 } \ 1772 } \ 1773 } \ 1774 mve_advance_vpt(env); \ 1775 return a; \ 1776 } 1777 1778 DO_LDAV(vmlaldavsh, 2, int16_t, false, +=, +=) 1779 DO_LDAV(vmlaldavxsh, 2, int16_t, true, +=, +=) 1780 DO_LDAV(vmlaldavsw, 4, int32_t, false, +=, +=) 1781 DO_LDAV(vmlaldavxsw, 4, int32_t, true, +=, +=) 1782 1783 DO_LDAV(vmlaldavuh, 2, uint16_t, false, +=, +=) 1784 DO_LDAV(vmlaldavuw, 4, uint32_t, false, +=, +=) 1785 1786 DO_LDAV(vmlsldavsh, 2, int16_t, false, +=, -=) 1787 DO_LDAV(vmlsldavxsh, 2, int16_t, true, +=, -=) 1788 DO_LDAV(vmlsldavsw, 4, int32_t, false, +=, -=) 1789 DO_LDAV(vmlsldavxsw, 4, int32_t, true, +=, -=) 1790 1791 /* 1792 * Multiply add dual accumulate ops 1793 */ 1794 #define DO_DAV(OP, ESIZE, TYPE, XCHG, EVENACC, ODDACC) \ 1795 uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \ 1796 void *vm, uint32_t a) \ 1797 { \ 1798 uint16_t mask = mve_element_mask(env); \ 1799 unsigned e; \ 1800 TYPE *n = vn, *m = vm; \ 1801 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 1802 if (mask & 1) { \ 1803 if (e & 1) { \ 1804 a ODDACC \ 1805 n[H##ESIZE(e - 1 * XCHG)] * m[H##ESIZE(e)]; \ 1806 } else { \ 1807 a EVENACC \ 1808 n[H##ESIZE(e + 1 * XCHG)] * m[H##ESIZE(e)]; \ 1809 } \ 1810 } \ 1811 } \ 1812 mve_advance_vpt(env); \ 1813 return a; \ 1814 } 1815 1816 #define DO_DAV_S(INSN, XCHG, EVENACC, ODDACC) \ 1817 DO_DAV(INSN##b, 1, int8_t, XCHG, EVENACC, ODDACC) \ 1818 DO_DAV(INSN##h, 2, int16_t, XCHG, EVENACC, ODDACC) \ 1819 DO_DAV(INSN##w, 4, int32_t, XCHG, EVENACC, ODDACC) 1820 1821 #define DO_DAV_U(INSN, XCHG, EVENACC, ODDACC) \ 1822 DO_DAV(INSN##b, 1, uint8_t, XCHG, EVENACC, ODDACC) \ 1823 DO_DAV(INSN##h, 2, uint16_t, XCHG, EVENACC, ODDACC) \ 1824 DO_DAV(INSN##w, 4, uint32_t, XCHG, EVENACC, ODDACC) 1825 1826 DO_DAV_S(vmladavs, false, +=, +=) 1827 DO_DAV_U(vmladavu, false, +=, +=) 1828 DO_DAV_S(vmlsdav, false, +=, -=) 1829 DO_DAV_S(vmladavsx, true, +=, +=) 1830 DO_DAV_S(vmlsdavx, true, +=, -=) 1831 1832 /* 1833 * Rounding multiply add long dual accumulate high. In the pseudocode 1834 * this is implemented with a 72-bit internal accumulator value of which 1835 * the top 64 bits are returned. We optimize this to avoid having to 1836 * use 128-bit arithmetic -- we can do this because the 74-bit accumulator 1837 * is squashed back into 64-bits after each beat. 1838 */ 1839 #define DO_LDAVH(OP, TYPE, LTYPE, XCHG, SUB) \ 1840 uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \ 1841 void *vm, uint64_t a) \ 1842 { \ 1843 uint16_t mask = mve_element_mask(env); \ 1844 unsigned e; \ 1845 TYPE *n = vn, *m = vm; \ 1846 for (e = 0; e < 16 / 4; e++, mask >>= 4) { \ 1847 if (mask & 1) { \ 1848 LTYPE mul; \ 1849 if (e & 1) { \ 1850 mul = (LTYPE)n[H4(e - 1 * XCHG)] * m[H4(e)]; \ 1851 if (SUB) { \ 1852 mul = -mul; \ 1853 } \ 1854 } else { \ 1855 mul = (LTYPE)n[H4(e + 1 * XCHG)] * m[H4(e)]; \ 1856 } \ 1857 mul = (mul >> 8) + ((mul >> 7) & 1); \ 1858 a += mul; \ 1859 } \ 1860 } \ 1861 mve_advance_vpt(env); \ 1862 return a; \ 1863 } 1864 1865 DO_LDAVH(vrmlaldavhsw, int32_t, int64_t, false, false) 1866 DO_LDAVH(vrmlaldavhxsw, int32_t, int64_t, true, false) 1867 1868 DO_LDAVH(vrmlaldavhuw, uint32_t, uint64_t, false, false) 1869 1870 DO_LDAVH(vrmlsldavhsw, int32_t, int64_t, false, true) 1871 DO_LDAVH(vrmlsldavhxsw, int32_t, int64_t, true, true) 1872 1873 /* Vector add across vector */ 1874 #define DO_VADDV(OP, ESIZE, TYPE) \ 1875 uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vm, \ 1876 uint32_t ra) \ 1877 { \ 1878 uint16_t mask = mve_element_mask(env); \ 1879 unsigned e; \ 1880 TYPE *m = vm; \ 1881 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 1882 if (mask & 1) { \ 1883 ra += m[H##ESIZE(e)]; \ 1884 } \ 1885 } \ 1886 mve_advance_vpt(env); \ 1887 return ra; \ 1888 } \ 1889 1890 DO_VADDV(vaddvsb, 1, int8_t) 1891 DO_VADDV(vaddvsh, 2, int16_t) 1892 DO_VADDV(vaddvsw, 4, int32_t) 1893 DO_VADDV(vaddvub, 1, uint8_t) 1894 DO_VADDV(vaddvuh, 2, uint16_t) 1895 DO_VADDV(vaddvuw, 4, uint32_t) 1896 1897 /* 1898 * Vector max/min across vector. Unlike VADDV, we must 1899 * read ra as the element size, not its full width. 1900 * We work with int64_t internally for simplicity. 1901 */ 1902 #define DO_VMAXMINV(OP, ESIZE, TYPE, RATYPE, FN) \ 1903 uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vm, \ 1904 uint32_t ra_in) \ 1905 { \ 1906 uint16_t mask = mve_element_mask(env); \ 1907 unsigned e; \ 1908 TYPE *m = vm; \ 1909 int64_t ra = (RATYPE)ra_in; \ 1910 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 1911 if (mask & 1) { \ 1912 ra = FN(ra, m[H##ESIZE(e)]); \ 1913 } \ 1914 } \ 1915 mve_advance_vpt(env); \ 1916 return ra; \ 1917 } \ 1918 1919 #define DO_VMAXMINV_U(INSN, FN) \ 1920 DO_VMAXMINV(INSN##b, 1, uint8_t, uint8_t, FN) \ 1921 DO_VMAXMINV(INSN##h, 2, uint16_t, uint16_t, FN) \ 1922 DO_VMAXMINV(INSN##w, 4, uint32_t, uint32_t, FN) 1923 #define DO_VMAXMINV_S(INSN, FN) \ 1924 DO_VMAXMINV(INSN##b, 1, int8_t, int8_t, FN) \ 1925 DO_VMAXMINV(INSN##h, 2, int16_t, int16_t, FN) \ 1926 DO_VMAXMINV(INSN##w, 4, int32_t, int32_t, FN) 1927 1928 /* 1929 * Helpers for max and min of absolute values across vector: 1930 * note that we only take the absolute value of 'm', not 'n' 1931 */ 1932 static int64_t do_maxa(int64_t n, int64_t m) 1933 { 1934 if (m < 0) { 1935 m = -m; 1936 } 1937 return MAX(n, m); 1938 } 1939 1940 static int64_t do_mina(int64_t n, int64_t m) 1941 { 1942 if (m < 0) { 1943 m = -m; 1944 } 1945 return MIN(n, m); 1946 } 1947 1948 DO_VMAXMINV_S(vmaxvs, DO_MAX) 1949 DO_VMAXMINV_U(vmaxvu, DO_MAX) 1950 DO_VMAXMINV_S(vminvs, DO_MIN) 1951 DO_VMAXMINV_U(vminvu, DO_MIN) 1952 /* 1953 * VMAXAV, VMINAV treat the general purpose input as unsigned 1954 * and the vector elements as signed. 1955 */ 1956 DO_VMAXMINV(vmaxavb, 1, int8_t, uint8_t, do_maxa) 1957 DO_VMAXMINV(vmaxavh, 2, int16_t, uint16_t, do_maxa) 1958 DO_VMAXMINV(vmaxavw, 4, int32_t, uint32_t, do_maxa) 1959 DO_VMAXMINV(vminavb, 1, int8_t, uint8_t, do_mina) 1960 DO_VMAXMINV(vminavh, 2, int16_t, uint16_t, do_mina) 1961 DO_VMAXMINV(vminavw, 4, int32_t, uint32_t, do_mina) 1962 1963 #define DO_VABAV(OP, ESIZE, TYPE) \ 1964 uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \ 1965 void *vm, uint32_t ra) \ 1966 { \ 1967 uint16_t mask = mve_element_mask(env); \ 1968 unsigned e; \ 1969 TYPE *m = vm, *n = vn; \ 1970 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 1971 if (mask & 1) { \ 1972 int64_t n0 = n[H##ESIZE(e)]; \ 1973 int64_t m0 = m[H##ESIZE(e)]; \ 1974 uint32_t r = n0 >= m0 ? (n0 - m0) : (m0 - n0); \ 1975 ra += r; \ 1976 } \ 1977 } \ 1978 mve_advance_vpt(env); \ 1979 return ra; \ 1980 } 1981 1982 DO_VABAV(vabavsb, 1, int8_t) 1983 DO_VABAV(vabavsh, 2, int16_t) 1984 DO_VABAV(vabavsw, 4, int32_t) 1985 DO_VABAV(vabavub, 1, uint8_t) 1986 DO_VABAV(vabavuh, 2, uint16_t) 1987 DO_VABAV(vabavuw, 4, uint32_t) 1988 1989 #define DO_VADDLV(OP, TYPE, LTYPE) \ 1990 uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vm, \ 1991 uint64_t ra) \ 1992 { \ 1993 uint16_t mask = mve_element_mask(env); \ 1994 unsigned e; \ 1995 TYPE *m = vm; \ 1996 for (e = 0; e < 16 / 4; e++, mask >>= 4) { \ 1997 if (mask & 1) { \ 1998 ra += (LTYPE)m[H4(e)]; \ 1999 } \ 2000 } \ 2001 mve_advance_vpt(env); \ 2002 return ra; \ 2003 } \ 2004 2005 DO_VADDLV(vaddlv_s, int32_t, int64_t) 2006 DO_VADDLV(vaddlv_u, uint32_t, uint64_t) 2007 2008 /* Shifts by immediate */ 2009 #define DO_2SHIFT(OP, ESIZE, TYPE, FN) \ 2010 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \ 2011 void *vm, uint32_t shift) \ 2012 { \ 2013 TYPE *d = vd, *m = vm; \ 2014 uint16_t mask = mve_element_mask(env); \ 2015 unsigned e; \ 2016 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 2017 mergemask(&d[H##ESIZE(e)], \ 2018 FN(m[H##ESIZE(e)], shift), mask); \ 2019 } \ 2020 mve_advance_vpt(env); \ 2021 } 2022 2023 #define DO_2SHIFT_SAT(OP, ESIZE, TYPE, FN) \ 2024 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \ 2025 void *vm, uint32_t shift) \ 2026 { \ 2027 TYPE *d = vd, *m = vm; \ 2028 uint16_t mask = mve_element_mask(env); \ 2029 unsigned e; \ 2030 bool qc = false; \ 2031 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 2032 bool sat = false; \ 2033 mergemask(&d[H##ESIZE(e)], \ 2034 FN(m[H##ESIZE(e)], shift, &sat), mask); \ 2035 qc |= sat & mask & 1; \ 2036 } \ 2037 if (qc) { \ 2038 env->vfp.qc[0] = qc; \ 2039 } \ 2040 mve_advance_vpt(env); \ 2041 } 2042 2043 /* provide unsigned 2-op shift helpers for all sizes */ 2044 #define DO_2SHIFT_U(OP, FN) \ 2045 DO_2SHIFT(OP##b, 1, uint8_t, FN) \ 2046 DO_2SHIFT(OP##h, 2, uint16_t, FN) \ 2047 DO_2SHIFT(OP##w, 4, uint32_t, FN) 2048 #define DO_2SHIFT_S(OP, FN) \ 2049 DO_2SHIFT(OP##b, 1, int8_t, FN) \ 2050 DO_2SHIFT(OP##h, 2, int16_t, FN) \ 2051 DO_2SHIFT(OP##w, 4, int32_t, FN) 2052 2053 #define DO_2SHIFT_SAT_U(OP, FN) \ 2054 DO_2SHIFT_SAT(OP##b, 1, uint8_t, FN) \ 2055 DO_2SHIFT_SAT(OP##h, 2, uint16_t, FN) \ 2056 DO_2SHIFT_SAT(OP##w, 4, uint32_t, FN) 2057 #define DO_2SHIFT_SAT_S(OP, FN) \ 2058 DO_2SHIFT_SAT(OP##b, 1, int8_t, FN) \ 2059 DO_2SHIFT_SAT(OP##h, 2, int16_t, FN) \ 2060 DO_2SHIFT_SAT(OP##w, 4, int32_t, FN) 2061 2062 DO_2SHIFT_U(vshli_u, DO_VSHLU) 2063 DO_2SHIFT_S(vshli_s, DO_VSHLS) 2064 DO_2SHIFT_SAT_U(vqshli_u, DO_UQSHL_OP) 2065 DO_2SHIFT_SAT_S(vqshli_s, DO_SQSHL_OP) 2066 DO_2SHIFT_SAT_S(vqshlui_s, DO_SUQSHL_OP) 2067 DO_2SHIFT_U(vrshli_u, DO_VRSHLU) 2068 DO_2SHIFT_S(vrshli_s, DO_VRSHLS) 2069 DO_2SHIFT_SAT_U(vqrshli_u, DO_UQRSHL_OP) 2070 DO_2SHIFT_SAT_S(vqrshli_s, DO_SQRSHL_OP) 2071 2072 /* Shift-and-insert; we always work with 64 bits at a time */ 2073 #define DO_2SHIFT_INSERT(OP, ESIZE, SHIFTFN, MASKFN) \ 2074 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \ 2075 void *vm, uint32_t shift) \ 2076 { \ 2077 uint64_t *d = vd, *m = vm; \ 2078 uint16_t mask; \ 2079 uint64_t shiftmask; \ 2080 unsigned e; \ 2081 if (shift == ESIZE * 8) { \ 2082 /* \ 2083 * Only VSRI can shift by <dt>; it should mean "don't \ 2084 * update the destination". The generic logic can't handle \ 2085 * this because it would try to shift by an out-of-range \ 2086 * amount, so special case it here. \ 2087 */ \ 2088 goto done; \ 2089 } \ 2090 assert(shift < ESIZE * 8); \ 2091 mask = mve_element_mask(env); \ 2092 /* ESIZE / 2 gives the MO_* value if ESIZE is in [1,2,4] */ \ 2093 shiftmask = dup_const(ESIZE / 2, MASKFN(ESIZE * 8, shift)); \ 2094 for (e = 0; e < 16 / 8; e++, mask >>= 8) { \ 2095 uint64_t r = (SHIFTFN(m[H8(e)], shift) & shiftmask) | \ 2096 (d[H8(e)] & ~shiftmask); \ 2097 mergemask(&d[H8(e)], r, mask); \ 2098 } \ 2099 done: \ 2100 mve_advance_vpt(env); \ 2101 } 2102 2103 #define DO_SHL(N, SHIFT) ((N) << (SHIFT)) 2104 #define DO_SHR(N, SHIFT) ((N) >> (SHIFT)) 2105 #define SHL_MASK(EBITS, SHIFT) MAKE_64BIT_MASK((SHIFT), (EBITS) - (SHIFT)) 2106 #define SHR_MASK(EBITS, SHIFT) MAKE_64BIT_MASK(0, (EBITS) - (SHIFT)) 2107 2108 DO_2SHIFT_INSERT(vsrib, 1, DO_SHR, SHR_MASK) 2109 DO_2SHIFT_INSERT(vsrih, 2, DO_SHR, SHR_MASK) 2110 DO_2SHIFT_INSERT(vsriw, 4, DO_SHR, SHR_MASK) 2111 DO_2SHIFT_INSERT(vslib, 1, DO_SHL, SHL_MASK) 2112 DO_2SHIFT_INSERT(vslih, 2, DO_SHL, SHL_MASK) 2113 DO_2SHIFT_INSERT(vsliw, 4, DO_SHL, SHL_MASK) 2114 2115 /* 2116 * Long shifts taking half-sized inputs from top or bottom of the input 2117 * vector and producing a double-width result. ESIZE, TYPE are for 2118 * the input, and LESIZE, LTYPE for the output. 2119 * Unlike the normal shift helpers, we do not handle negative shift counts, 2120 * because the long shift is strictly left-only. 2121 */ 2122 #define DO_VSHLL(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE) \ 2123 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \ 2124 void *vm, uint32_t shift) \ 2125 { \ 2126 LTYPE *d = vd; \ 2127 TYPE *m = vm; \ 2128 uint16_t mask = mve_element_mask(env); \ 2129 unsigned le; \ 2130 assert(shift <= 16); \ 2131 for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \ 2132 LTYPE r = (LTYPE)m[H##ESIZE(le * 2 + TOP)] << shift; \ 2133 mergemask(&d[H##LESIZE(le)], r, mask); \ 2134 } \ 2135 mve_advance_vpt(env); \ 2136 } 2137 2138 #define DO_VSHLL_ALL(OP, TOP) \ 2139 DO_VSHLL(OP##sb, TOP, 1, int8_t, 2, int16_t) \ 2140 DO_VSHLL(OP##ub, TOP, 1, uint8_t, 2, uint16_t) \ 2141 DO_VSHLL(OP##sh, TOP, 2, int16_t, 4, int32_t) \ 2142 DO_VSHLL(OP##uh, TOP, 2, uint16_t, 4, uint32_t) \ 2143 2144 DO_VSHLL_ALL(vshllb, false) 2145 DO_VSHLL_ALL(vshllt, true) 2146 2147 /* 2148 * Narrowing right shifts, taking a double sized input, shifting it 2149 * and putting the result in either the top or bottom half of the output. 2150 * ESIZE, TYPE are the output, and LESIZE, LTYPE the input. 2151 */ 2152 #define DO_VSHRN(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \ 2153 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \ 2154 void *vm, uint32_t shift) \ 2155 { \ 2156 LTYPE *m = vm; \ 2157 TYPE *d = vd; \ 2158 uint16_t mask = mve_element_mask(env); \ 2159 unsigned le; \ 2160 mask >>= ESIZE * TOP; \ 2161 for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \ 2162 TYPE r = FN(m[H##LESIZE(le)], shift); \ 2163 mergemask(&d[H##ESIZE(le * 2 + TOP)], r, mask); \ 2164 } \ 2165 mve_advance_vpt(env); \ 2166 } 2167 2168 #define DO_VSHRN_ALL(OP, FN) \ 2169 DO_VSHRN(OP##bb, false, 1, uint8_t, 2, uint16_t, FN) \ 2170 DO_VSHRN(OP##bh, false, 2, uint16_t, 4, uint32_t, FN) \ 2171 DO_VSHRN(OP##tb, true, 1, uint8_t, 2, uint16_t, FN) \ 2172 DO_VSHRN(OP##th, true, 2, uint16_t, 4, uint32_t, FN) 2173 2174 static inline uint64_t do_urshr(uint64_t x, unsigned sh) 2175 { 2176 if (likely(sh < 64)) { 2177 return (x >> sh) + ((x >> (sh - 1)) & 1); 2178 } else if (sh == 64) { 2179 return x >> 63; 2180 } else { 2181 return 0; 2182 } 2183 } 2184 2185 static inline int64_t do_srshr(int64_t x, unsigned sh) 2186 { 2187 if (likely(sh < 64)) { 2188 return (x >> sh) + ((x >> (sh - 1)) & 1); 2189 } else { 2190 /* Rounding the sign bit always produces 0. */ 2191 return 0; 2192 } 2193 } 2194 2195 DO_VSHRN_ALL(vshrn, DO_SHR) 2196 DO_VSHRN_ALL(vrshrn, do_urshr) 2197 2198 static inline int32_t do_sat_bhs(int64_t val, int64_t min, int64_t max, 2199 bool *satp) 2200 { 2201 if (val > max) { 2202 *satp = true; 2203 return max; 2204 } else if (val < min) { 2205 *satp = true; 2206 return min; 2207 } else { 2208 return val; 2209 } 2210 } 2211 2212 /* Saturating narrowing right shifts */ 2213 #define DO_VSHRN_SAT(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \ 2214 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \ 2215 void *vm, uint32_t shift) \ 2216 { \ 2217 LTYPE *m = vm; \ 2218 TYPE *d = vd; \ 2219 uint16_t mask = mve_element_mask(env); \ 2220 bool qc = false; \ 2221 unsigned le; \ 2222 mask >>= ESIZE * TOP; \ 2223 for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \ 2224 bool sat = false; \ 2225 TYPE r = FN(m[H##LESIZE(le)], shift, &sat); \ 2226 mergemask(&d[H##ESIZE(le * 2 + TOP)], r, mask); \ 2227 qc |= sat & mask & 1; \ 2228 } \ 2229 if (qc) { \ 2230 env->vfp.qc[0] = qc; \ 2231 } \ 2232 mve_advance_vpt(env); \ 2233 } 2234 2235 #define DO_VSHRN_SAT_UB(BOP, TOP, FN) \ 2236 DO_VSHRN_SAT(BOP, false, 1, uint8_t, 2, uint16_t, FN) \ 2237 DO_VSHRN_SAT(TOP, true, 1, uint8_t, 2, uint16_t, FN) 2238 2239 #define DO_VSHRN_SAT_UH(BOP, TOP, FN) \ 2240 DO_VSHRN_SAT(BOP, false, 2, uint16_t, 4, uint32_t, FN) \ 2241 DO_VSHRN_SAT(TOP, true, 2, uint16_t, 4, uint32_t, FN) 2242 2243 #define DO_VSHRN_SAT_SB(BOP, TOP, FN) \ 2244 DO_VSHRN_SAT(BOP, false, 1, int8_t, 2, int16_t, FN) \ 2245 DO_VSHRN_SAT(TOP, true, 1, int8_t, 2, int16_t, FN) 2246 2247 #define DO_VSHRN_SAT_SH(BOP, TOP, FN) \ 2248 DO_VSHRN_SAT(BOP, false, 2, int16_t, 4, int32_t, FN) \ 2249 DO_VSHRN_SAT(TOP, true, 2, int16_t, 4, int32_t, FN) 2250 2251 #define DO_SHRN_SB(N, M, SATP) \ 2252 do_sat_bhs((int64_t)(N) >> (M), INT8_MIN, INT8_MAX, SATP) 2253 #define DO_SHRN_UB(N, M, SATP) \ 2254 do_sat_bhs((uint64_t)(N) >> (M), 0, UINT8_MAX, SATP) 2255 #define DO_SHRUN_B(N, M, SATP) \ 2256 do_sat_bhs((int64_t)(N) >> (M), 0, UINT8_MAX, SATP) 2257 2258 #define DO_SHRN_SH(N, M, SATP) \ 2259 do_sat_bhs((int64_t)(N) >> (M), INT16_MIN, INT16_MAX, SATP) 2260 #define DO_SHRN_UH(N, M, SATP) \ 2261 do_sat_bhs((uint64_t)(N) >> (M), 0, UINT16_MAX, SATP) 2262 #define DO_SHRUN_H(N, M, SATP) \ 2263 do_sat_bhs((int64_t)(N) >> (M), 0, UINT16_MAX, SATP) 2264 2265 #define DO_RSHRN_SB(N, M, SATP) \ 2266 do_sat_bhs(do_srshr(N, M), INT8_MIN, INT8_MAX, SATP) 2267 #define DO_RSHRN_UB(N, M, SATP) \ 2268 do_sat_bhs(do_urshr(N, M), 0, UINT8_MAX, SATP) 2269 #define DO_RSHRUN_B(N, M, SATP) \ 2270 do_sat_bhs(do_srshr(N, M), 0, UINT8_MAX, SATP) 2271 2272 #define DO_RSHRN_SH(N, M, SATP) \ 2273 do_sat_bhs(do_srshr(N, M), INT16_MIN, INT16_MAX, SATP) 2274 #define DO_RSHRN_UH(N, M, SATP) \ 2275 do_sat_bhs(do_urshr(N, M), 0, UINT16_MAX, SATP) 2276 #define DO_RSHRUN_H(N, M, SATP) \ 2277 do_sat_bhs(do_srshr(N, M), 0, UINT16_MAX, SATP) 2278 2279 DO_VSHRN_SAT_SB(vqshrnb_sb, vqshrnt_sb, DO_SHRN_SB) 2280 DO_VSHRN_SAT_SH(vqshrnb_sh, vqshrnt_sh, DO_SHRN_SH) 2281 DO_VSHRN_SAT_UB(vqshrnb_ub, vqshrnt_ub, DO_SHRN_UB) 2282 DO_VSHRN_SAT_UH(vqshrnb_uh, vqshrnt_uh, DO_SHRN_UH) 2283 DO_VSHRN_SAT_SB(vqshrunbb, vqshruntb, DO_SHRUN_B) 2284 DO_VSHRN_SAT_SH(vqshrunbh, vqshrunth, DO_SHRUN_H) 2285 2286 DO_VSHRN_SAT_SB(vqrshrnb_sb, vqrshrnt_sb, DO_RSHRN_SB) 2287 DO_VSHRN_SAT_SH(vqrshrnb_sh, vqrshrnt_sh, DO_RSHRN_SH) 2288 DO_VSHRN_SAT_UB(vqrshrnb_ub, vqrshrnt_ub, DO_RSHRN_UB) 2289 DO_VSHRN_SAT_UH(vqrshrnb_uh, vqrshrnt_uh, DO_RSHRN_UH) 2290 DO_VSHRN_SAT_SB(vqrshrunbb, vqrshruntb, DO_RSHRUN_B) 2291 DO_VSHRN_SAT_SH(vqrshrunbh, vqrshrunth, DO_RSHRUN_H) 2292 2293 #define DO_VMOVN(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE) \ 2294 void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \ 2295 { \ 2296 LTYPE *m = vm; \ 2297 TYPE *d = vd; \ 2298 uint16_t mask = mve_element_mask(env); \ 2299 unsigned le; \ 2300 mask >>= ESIZE * TOP; \ 2301 for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \ 2302 mergemask(&d[H##ESIZE(le * 2 + TOP)], \ 2303 m[H##LESIZE(le)], mask); \ 2304 } \ 2305 mve_advance_vpt(env); \ 2306 } 2307 2308 DO_VMOVN(vmovnbb, false, 1, uint8_t, 2, uint16_t) 2309 DO_VMOVN(vmovnbh, false, 2, uint16_t, 4, uint32_t) 2310 DO_VMOVN(vmovntb, true, 1, uint8_t, 2, uint16_t) 2311 DO_VMOVN(vmovnth, true, 2, uint16_t, 4, uint32_t) 2312 2313 #define DO_VMOVN_SAT(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \ 2314 void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \ 2315 { \ 2316 LTYPE *m = vm; \ 2317 TYPE *d = vd; \ 2318 uint16_t mask = mve_element_mask(env); \ 2319 bool qc = false; \ 2320 unsigned le; \ 2321 mask >>= ESIZE * TOP; \ 2322 for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \ 2323 bool sat = false; \ 2324 TYPE r = FN(m[H##LESIZE(le)], &sat); \ 2325 mergemask(&d[H##ESIZE(le * 2 + TOP)], r, mask); \ 2326 qc |= sat & mask & 1; \ 2327 } \ 2328 if (qc) { \ 2329 env->vfp.qc[0] = qc; \ 2330 } \ 2331 mve_advance_vpt(env); \ 2332 } 2333 2334 #define DO_VMOVN_SAT_UB(BOP, TOP, FN) \ 2335 DO_VMOVN_SAT(BOP, false, 1, uint8_t, 2, uint16_t, FN) \ 2336 DO_VMOVN_SAT(TOP, true, 1, uint8_t, 2, uint16_t, FN) 2337 2338 #define DO_VMOVN_SAT_UH(BOP, TOP, FN) \ 2339 DO_VMOVN_SAT(BOP, false, 2, uint16_t, 4, uint32_t, FN) \ 2340 DO_VMOVN_SAT(TOP, true, 2, uint16_t, 4, uint32_t, FN) 2341 2342 #define DO_VMOVN_SAT_SB(BOP, TOP, FN) \ 2343 DO_VMOVN_SAT(BOP, false, 1, int8_t, 2, int16_t, FN) \ 2344 DO_VMOVN_SAT(TOP, true, 1, int8_t, 2, int16_t, FN) 2345 2346 #define DO_VMOVN_SAT_SH(BOP, TOP, FN) \ 2347 DO_VMOVN_SAT(BOP, false, 2, int16_t, 4, int32_t, FN) \ 2348 DO_VMOVN_SAT(TOP, true, 2, int16_t, 4, int32_t, FN) 2349 2350 #define DO_VQMOVN_SB(N, SATP) \ 2351 do_sat_bhs((int64_t)(N), INT8_MIN, INT8_MAX, SATP) 2352 #define DO_VQMOVN_UB(N, SATP) \ 2353 do_sat_bhs((uint64_t)(N), 0, UINT8_MAX, SATP) 2354 #define DO_VQMOVUN_B(N, SATP) \ 2355 do_sat_bhs((int64_t)(N), 0, UINT8_MAX, SATP) 2356 2357 #define DO_VQMOVN_SH(N, SATP) \ 2358 do_sat_bhs((int64_t)(N), INT16_MIN, INT16_MAX, SATP) 2359 #define DO_VQMOVN_UH(N, SATP) \ 2360 do_sat_bhs((uint64_t)(N), 0, UINT16_MAX, SATP) 2361 #define DO_VQMOVUN_H(N, SATP) \ 2362 do_sat_bhs((int64_t)(N), 0, UINT16_MAX, SATP) 2363 2364 DO_VMOVN_SAT_SB(vqmovnbsb, vqmovntsb, DO_VQMOVN_SB) 2365 DO_VMOVN_SAT_SH(vqmovnbsh, vqmovntsh, DO_VQMOVN_SH) 2366 DO_VMOVN_SAT_UB(vqmovnbub, vqmovntub, DO_VQMOVN_UB) 2367 DO_VMOVN_SAT_UH(vqmovnbuh, vqmovntuh, DO_VQMOVN_UH) 2368 DO_VMOVN_SAT_SB(vqmovunbb, vqmovuntb, DO_VQMOVUN_B) 2369 DO_VMOVN_SAT_SH(vqmovunbh, vqmovunth, DO_VQMOVUN_H) 2370 2371 uint32_t HELPER(mve_vshlc)(CPUARMState *env, void *vd, uint32_t rdm, 2372 uint32_t shift) 2373 { 2374 uint32_t *d = vd; 2375 uint16_t mask = mve_element_mask(env); 2376 unsigned e; 2377 uint32_t r; 2378 2379 /* 2380 * For each 32-bit element, we shift it left, bringing in the 2381 * low 'shift' bits of rdm at the bottom. Bits shifted out at 2382 * the top become the new rdm, if the predicate mask permits. 2383 * The final rdm value is returned to update the register. 2384 * shift == 0 here means "shift by 32 bits". 2385 */ 2386 if (shift == 0) { 2387 for (e = 0; e < 16 / 4; e++, mask >>= 4) { 2388 r = rdm; 2389 if (mask & 1) { 2390 rdm = d[H4(e)]; 2391 } 2392 mergemask(&d[H4(e)], r, mask); 2393 } 2394 } else { 2395 uint32_t shiftmask = MAKE_64BIT_MASK(0, shift); 2396 2397 for (e = 0; e < 16 / 4; e++, mask >>= 4) { 2398 r = (d[H4(e)] << shift) | (rdm & shiftmask); 2399 if (mask & 1) { 2400 rdm = d[H4(e)] >> (32 - shift); 2401 } 2402 mergemask(&d[H4(e)], r, mask); 2403 } 2404 } 2405 mve_advance_vpt(env); 2406 return rdm; 2407 } 2408 2409 uint64_t HELPER(mve_sshrl)(CPUARMState *env, uint64_t n, uint32_t shift) 2410 { 2411 return do_sqrshl_d(n, -(int8_t)shift, false, NULL); 2412 } 2413 2414 uint64_t HELPER(mve_ushll)(CPUARMState *env, uint64_t n, uint32_t shift) 2415 { 2416 return do_uqrshl_d(n, (int8_t)shift, false, NULL); 2417 } 2418 2419 uint64_t HELPER(mve_sqshll)(CPUARMState *env, uint64_t n, uint32_t shift) 2420 { 2421 return do_sqrshl_d(n, (int8_t)shift, false, &env->QF); 2422 } 2423 2424 uint64_t HELPER(mve_uqshll)(CPUARMState *env, uint64_t n, uint32_t shift) 2425 { 2426 return do_uqrshl_d(n, (int8_t)shift, false, &env->QF); 2427 } 2428 2429 uint64_t HELPER(mve_sqrshrl)(CPUARMState *env, uint64_t n, uint32_t shift) 2430 { 2431 return do_sqrshl_d(n, -(int8_t)shift, true, &env->QF); 2432 } 2433 2434 uint64_t HELPER(mve_uqrshll)(CPUARMState *env, uint64_t n, uint32_t shift) 2435 { 2436 return do_uqrshl_d(n, (int8_t)shift, true, &env->QF); 2437 } 2438 2439 /* Operate on 64-bit values, but saturate at 48 bits */ 2440 static inline int64_t do_sqrshl48_d(int64_t src, int64_t shift, 2441 bool round, uint32_t *sat) 2442 { 2443 int64_t val, extval; 2444 2445 if (shift <= -48) { 2446 /* Rounding the sign bit always produces 0. */ 2447 if (round) { 2448 return 0; 2449 } 2450 return src >> 63; 2451 } else if (shift < 0) { 2452 if (round) { 2453 src >>= -shift - 1; 2454 val = (src >> 1) + (src & 1); 2455 } else { 2456 val = src >> -shift; 2457 } 2458 extval = sextract64(val, 0, 48); 2459 if (!sat || val == extval) { 2460 return extval; 2461 } 2462 } else if (shift < 48) { 2463 int64_t extval = sextract64(src << shift, 0, 48); 2464 if (!sat || src == (extval >> shift)) { 2465 return extval; 2466 } 2467 } else if (!sat || src == 0) { 2468 return 0; 2469 } 2470 2471 *sat = 1; 2472 return src >= 0 ? MAKE_64BIT_MASK(0, 47) : MAKE_64BIT_MASK(47, 17); 2473 } 2474 2475 /* Operate on 64-bit values, but saturate at 48 bits */ 2476 static inline uint64_t do_uqrshl48_d(uint64_t src, int64_t shift, 2477 bool round, uint32_t *sat) 2478 { 2479 uint64_t val, extval; 2480 2481 if (shift <= -(48 + round)) { 2482 return 0; 2483 } else if (shift < 0) { 2484 if (round) { 2485 val = src >> (-shift - 1); 2486 val = (val >> 1) + (val & 1); 2487 } else { 2488 val = src >> -shift; 2489 } 2490 extval = extract64(val, 0, 48); 2491 if (!sat || val == extval) { 2492 return extval; 2493 } 2494 } else if (shift < 48) { 2495 uint64_t extval = extract64(src << shift, 0, 48); 2496 if (!sat || src == (extval >> shift)) { 2497 return extval; 2498 } 2499 } else if (!sat || src == 0) { 2500 return 0; 2501 } 2502 2503 *sat = 1; 2504 return MAKE_64BIT_MASK(0, 48); 2505 } 2506 2507 uint64_t HELPER(mve_sqrshrl48)(CPUARMState *env, uint64_t n, uint32_t shift) 2508 { 2509 return do_sqrshl48_d(n, -(int8_t)shift, true, &env->QF); 2510 } 2511 2512 uint64_t HELPER(mve_uqrshll48)(CPUARMState *env, uint64_t n, uint32_t shift) 2513 { 2514 return do_uqrshl48_d(n, (int8_t)shift, true, &env->QF); 2515 } 2516 2517 uint32_t HELPER(mve_uqshl)(CPUARMState *env, uint32_t n, uint32_t shift) 2518 { 2519 return do_uqrshl_bhs(n, (int8_t)shift, 32, false, &env->QF); 2520 } 2521 2522 uint32_t HELPER(mve_sqshl)(CPUARMState *env, uint32_t n, uint32_t shift) 2523 { 2524 return do_sqrshl_bhs(n, (int8_t)shift, 32, false, &env->QF); 2525 } 2526 2527 uint32_t HELPER(mve_uqrshl)(CPUARMState *env, uint32_t n, uint32_t shift) 2528 { 2529 return do_uqrshl_bhs(n, (int8_t)shift, 32, true, &env->QF); 2530 } 2531 2532 uint32_t HELPER(mve_sqrshr)(CPUARMState *env, uint32_t n, uint32_t shift) 2533 { 2534 return do_sqrshl_bhs(n, -(int8_t)shift, 32, true, &env->QF); 2535 } 2536 2537 #define DO_VIDUP(OP, ESIZE, TYPE, FN) \ 2538 uint32_t HELPER(mve_##OP)(CPUARMState *env, void *vd, \ 2539 uint32_t offset, uint32_t imm) \ 2540 { \ 2541 TYPE *d = vd; \ 2542 uint16_t mask = mve_element_mask(env); \ 2543 unsigned e; \ 2544 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 2545 mergemask(&d[H##ESIZE(e)], offset, mask); \ 2546 offset = FN(offset, imm); \ 2547 } \ 2548 mve_advance_vpt(env); \ 2549 return offset; \ 2550 } 2551 2552 #define DO_VIWDUP(OP, ESIZE, TYPE, FN) \ 2553 uint32_t HELPER(mve_##OP)(CPUARMState *env, void *vd, \ 2554 uint32_t offset, uint32_t wrap, \ 2555 uint32_t imm) \ 2556 { \ 2557 TYPE *d = vd; \ 2558 uint16_t mask = mve_element_mask(env); \ 2559 unsigned e; \ 2560 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 2561 mergemask(&d[H##ESIZE(e)], offset, mask); \ 2562 offset = FN(offset, wrap, imm); \ 2563 } \ 2564 mve_advance_vpt(env); \ 2565 return offset; \ 2566 } 2567 2568 #define DO_VIDUP_ALL(OP, FN) \ 2569 DO_VIDUP(OP##b, 1, int8_t, FN) \ 2570 DO_VIDUP(OP##h, 2, int16_t, FN) \ 2571 DO_VIDUP(OP##w, 4, int32_t, FN) 2572 2573 #define DO_VIWDUP_ALL(OP, FN) \ 2574 DO_VIWDUP(OP##b, 1, int8_t, FN) \ 2575 DO_VIWDUP(OP##h, 2, int16_t, FN) \ 2576 DO_VIWDUP(OP##w, 4, int32_t, FN) 2577 2578 static uint32_t do_add_wrap(uint32_t offset, uint32_t wrap, uint32_t imm) 2579 { 2580 offset += imm; 2581 if (offset == wrap) { 2582 offset = 0; 2583 } 2584 return offset; 2585 } 2586 2587 static uint32_t do_sub_wrap(uint32_t offset, uint32_t wrap, uint32_t imm) 2588 { 2589 if (offset == 0) { 2590 offset = wrap; 2591 } 2592 offset -= imm; 2593 return offset; 2594 } 2595 2596 DO_VIDUP_ALL(vidup, DO_ADD) 2597 DO_VIWDUP_ALL(viwdup, do_add_wrap) 2598 DO_VIWDUP_ALL(vdwdup, do_sub_wrap) 2599 2600 /* 2601 * Vector comparison. 2602 * P0 bits for non-executed beats (where eci_mask is 0) are unchanged. 2603 * P0 bits for predicated lanes in executed beats (where mask is 0) are 0. 2604 * P0 bits otherwise are updated with the results of the comparisons. 2605 * We must also keep unchanged the MASK fields at the top of v7m.vpr. 2606 */ 2607 #define DO_VCMP(OP, ESIZE, TYPE, FN) \ 2608 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, void *vm) \ 2609 { \ 2610 TYPE *n = vn, *m = vm; \ 2611 uint16_t mask = mve_element_mask(env); \ 2612 uint16_t eci_mask = mve_eci_mask(env); \ 2613 uint16_t beatpred = 0; \ 2614 uint16_t emask = MAKE_64BIT_MASK(0, ESIZE); \ 2615 unsigned e; \ 2616 for (e = 0; e < 16 / ESIZE; e++) { \ 2617 bool r = FN(n[H##ESIZE(e)], m[H##ESIZE(e)]); \ 2618 /* Comparison sets 0/1 bits for each byte in the element */ \ 2619 beatpred |= r * emask; \ 2620 emask <<= ESIZE; \ 2621 } \ 2622 beatpred &= mask; \ 2623 env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | \ 2624 (beatpred & eci_mask); \ 2625 mve_advance_vpt(env); \ 2626 } 2627 2628 #define DO_VCMP_SCALAR(OP, ESIZE, TYPE, FN) \ 2629 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \ 2630 uint32_t rm) \ 2631 { \ 2632 TYPE *n = vn; \ 2633 uint16_t mask = mve_element_mask(env); \ 2634 uint16_t eci_mask = mve_eci_mask(env); \ 2635 uint16_t beatpred = 0; \ 2636 uint16_t emask = MAKE_64BIT_MASK(0, ESIZE); \ 2637 unsigned e; \ 2638 for (e = 0; e < 16 / ESIZE; e++) { \ 2639 bool r = FN(n[H##ESIZE(e)], (TYPE)rm); \ 2640 /* Comparison sets 0/1 bits for each byte in the element */ \ 2641 beatpred |= r * emask; \ 2642 emask <<= ESIZE; \ 2643 } \ 2644 beatpred &= mask; \ 2645 env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | \ 2646 (beatpred & eci_mask); \ 2647 mve_advance_vpt(env); \ 2648 } 2649 2650 #define DO_VCMP_S(OP, FN) \ 2651 DO_VCMP(OP##b, 1, int8_t, FN) \ 2652 DO_VCMP(OP##h, 2, int16_t, FN) \ 2653 DO_VCMP(OP##w, 4, int32_t, FN) \ 2654 DO_VCMP_SCALAR(OP##_scalarb, 1, int8_t, FN) \ 2655 DO_VCMP_SCALAR(OP##_scalarh, 2, int16_t, FN) \ 2656 DO_VCMP_SCALAR(OP##_scalarw, 4, int32_t, FN) 2657 2658 #define DO_VCMP_U(OP, FN) \ 2659 DO_VCMP(OP##b, 1, uint8_t, FN) \ 2660 DO_VCMP(OP##h, 2, uint16_t, FN) \ 2661 DO_VCMP(OP##w, 4, uint32_t, FN) \ 2662 DO_VCMP_SCALAR(OP##_scalarb, 1, uint8_t, FN) \ 2663 DO_VCMP_SCALAR(OP##_scalarh, 2, uint16_t, FN) \ 2664 DO_VCMP_SCALAR(OP##_scalarw, 4, uint32_t, FN) 2665 2666 #define DO_EQ(N, M) ((N) == (M)) 2667 #define DO_NE(N, M) ((N) != (M)) 2668 #define DO_EQ(N, M) ((N) == (M)) 2669 #define DO_EQ(N, M) ((N) == (M)) 2670 #define DO_GE(N, M) ((N) >= (M)) 2671 #define DO_LT(N, M) ((N) < (M)) 2672 #define DO_GT(N, M) ((N) > (M)) 2673 #define DO_LE(N, M) ((N) <= (M)) 2674 2675 DO_VCMP_U(vcmpeq, DO_EQ) 2676 DO_VCMP_U(vcmpne, DO_NE) 2677 DO_VCMP_U(vcmpcs, DO_GE) 2678 DO_VCMP_U(vcmphi, DO_GT) 2679 DO_VCMP_S(vcmpge, DO_GE) 2680 DO_VCMP_S(vcmplt, DO_LT) 2681 DO_VCMP_S(vcmpgt, DO_GT) 2682 DO_VCMP_S(vcmple, DO_LE) 2683 2684 void HELPER(mve_vpsel)(CPUARMState *env, void *vd, void *vn, void *vm) 2685 { 2686 /* 2687 * Qd[n] = VPR.P0[n] ? Qn[n] : Qm[n] 2688 * but note that whether bytes are written to Qd is still subject 2689 * to (all forms of) predication in the usual way. 2690 */ 2691 uint64_t *d = vd, *n = vn, *m = vm; 2692 uint16_t mask = mve_element_mask(env); 2693 uint16_t p0 = FIELD_EX32(env->v7m.vpr, V7M_VPR, P0); 2694 unsigned e; 2695 for (e = 0; e < 16 / 8; e++, mask >>= 8, p0 >>= 8) { 2696 uint64_t r = m[H8(e)]; 2697 mergemask(&r, n[H8(e)], p0); 2698 mergemask(&d[H8(e)], r, mask); 2699 } 2700 mve_advance_vpt(env); 2701 } 2702 2703 void HELPER(mve_vpnot)(CPUARMState *env) 2704 { 2705 /* 2706 * P0 bits for unexecuted beats (where eci_mask is 0) are unchanged. 2707 * P0 bits for predicated lanes in executed bits (where mask is 0) are 0. 2708 * P0 bits otherwise are inverted. 2709 * (This is the same logic as VCMP.) 2710 * This insn is itself subject to predication and to beat-wise execution, 2711 * and after it executes VPT state advances in the usual way. 2712 */ 2713 uint16_t mask = mve_element_mask(env); 2714 uint16_t eci_mask = mve_eci_mask(env); 2715 uint16_t beatpred = ~env->v7m.vpr & mask; 2716 env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | (beatpred & eci_mask); 2717 mve_advance_vpt(env); 2718 } 2719 2720 /* 2721 * VCTP: P0 unexecuted bits unchanged, predicated bits zeroed, 2722 * otherwise set according to value of Rn. The calculation of 2723 * newmask here works in the same way as the calculation of the 2724 * ltpmask in mve_element_mask(), but we have pre-calculated 2725 * the masklen in the generated code. 2726 */ 2727 void HELPER(mve_vctp)(CPUARMState *env, uint32_t masklen) 2728 { 2729 uint16_t mask = mve_element_mask(env); 2730 uint16_t eci_mask = mve_eci_mask(env); 2731 uint16_t newmask; 2732 2733 assert(masklen <= 16); 2734 newmask = masklen ? MAKE_64BIT_MASK(0, masklen) : 0; 2735 newmask &= mask; 2736 env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | (newmask & eci_mask); 2737 mve_advance_vpt(env); 2738 } 2739 2740 #define DO_1OP_SAT(OP, ESIZE, TYPE, FN) \ 2741 void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \ 2742 { \ 2743 TYPE *d = vd, *m = vm; \ 2744 uint16_t mask = mve_element_mask(env); \ 2745 unsigned e; \ 2746 bool qc = false; \ 2747 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 2748 bool sat = false; \ 2749 mergemask(&d[H##ESIZE(e)], FN(m[H##ESIZE(e)], &sat), mask); \ 2750 qc |= sat & mask & 1; \ 2751 } \ 2752 if (qc) { \ 2753 env->vfp.qc[0] = qc; \ 2754 } \ 2755 mve_advance_vpt(env); \ 2756 } 2757 2758 #define DO_VQABS_B(N, SATP) \ 2759 do_sat_bhs(DO_ABS((int64_t)N), INT8_MIN, INT8_MAX, SATP) 2760 #define DO_VQABS_H(N, SATP) \ 2761 do_sat_bhs(DO_ABS((int64_t)N), INT16_MIN, INT16_MAX, SATP) 2762 #define DO_VQABS_W(N, SATP) \ 2763 do_sat_bhs(DO_ABS((int64_t)N), INT32_MIN, INT32_MAX, SATP) 2764 2765 #define DO_VQNEG_B(N, SATP) do_sat_bhs(-(int64_t)N, INT8_MIN, INT8_MAX, SATP) 2766 #define DO_VQNEG_H(N, SATP) do_sat_bhs(-(int64_t)N, INT16_MIN, INT16_MAX, SATP) 2767 #define DO_VQNEG_W(N, SATP) do_sat_bhs(-(int64_t)N, INT32_MIN, INT32_MAX, SATP) 2768 2769 DO_1OP_SAT(vqabsb, 1, int8_t, DO_VQABS_B) 2770 DO_1OP_SAT(vqabsh, 2, int16_t, DO_VQABS_H) 2771 DO_1OP_SAT(vqabsw, 4, int32_t, DO_VQABS_W) 2772 2773 DO_1OP_SAT(vqnegb, 1, int8_t, DO_VQNEG_B) 2774 DO_1OP_SAT(vqnegh, 2, int16_t, DO_VQNEG_H) 2775 DO_1OP_SAT(vqnegw, 4, int32_t, DO_VQNEG_W) 2776 2777 /* 2778 * VMAXA, VMINA: vd is unsigned; vm is signed, and we take its 2779 * absolute value; we then do an unsigned comparison. 2780 */ 2781 #define DO_VMAXMINA(OP, ESIZE, STYPE, UTYPE, FN) \ 2782 void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \ 2783 { \ 2784 UTYPE *d = vd; \ 2785 STYPE *m = vm; \ 2786 uint16_t mask = mve_element_mask(env); \ 2787 unsigned e; \ 2788 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 2789 UTYPE r = DO_ABS(m[H##ESIZE(e)]); \ 2790 r = FN(d[H##ESIZE(e)], r); \ 2791 mergemask(&d[H##ESIZE(e)], r, mask); \ 2792 } \ 2793 mve_advance_vpt(env); \ 2794 } 2795 2796 DO_VMAXMINA(vmaxab, 1, int8_t, uint8_t, DO_MAX) 2797 DO_VMAXMINA(vmaxah, 2, int16_t, uint16_t, DO_MAX) 2798 DO_VMAXMINA(vmaxaw, 4, int32_t, uint32_t, DO_MAX) 2799 DO_VMAXMINA(vminab, 1, int8_t, uint8_t, DO_MIN) 2800 DO_VMAXMINA(vminah, 2, int16_t, uint16_t, DO_MIN) 2801 DO_VMAXMINA(vminaw, 4, int32_t, uint32_t, DO_MIN) 2802 2803 /* 2804 * 2-operand floating point. Note that if an element is partially 2805 * predicated we must do the FP operation to update the non-predicated 2806 * bytes, but we must be careful to avoid updating the FP exception 2807 * state unless byte 0 of the element was unpredicated. 2808 */ 2809 #define DO_2OP_FP(OP, ESIZE, TYPE, FN) \ 2810 void HELPER(glue(mve_, OP))(CPUARMState *env, \ 2811 void *vd, void *vn, void *vm) \ 2812 { \ 2813 TYPE *d = vd, *n = vn, *m = vm; \ 2814 TYPE r; \ 2815 uint16_t mask = mve_element_mask(env); \ 2816 unsigned e; \ 2817 float_status *fpst; \ 2818 float_status scratch_fpst; \ 2819 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 2820 if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \ 2821 continue; \ 2822 } \ 2823 fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ 2824 &env->vfp.standard_fp_status; \ 2825 if (!(mask & 1)) { \ 2826 /* We need the result but without updating flags */ \ 2827 scratch_fpst = *fpst; \ 2828 fpst = &scratch_fpst; \ 2829 } \ 2830 r = FN(n[H##ESIZE(e)], m[H##ESIZE(e)], fpst); \ 2831 mergemask(&d[H##ESIZE(e)], r, mask); \ 2832 } \ 2833 mve_advance_vpt(env); \ 2834 } 2835 2836 #define DO_2OP_FP_ALL(OP, FN) \ 2837 DO_2OP_FP(OP##h, 2, float16, float16_##FN) \ 2838 DO_2OP_FP(OP##s, 4, float32, float32_##FN) 2839 2840 DO_2OP_FP_ALL(vfadd, add) 2841 DO_2OP_FP_ALL(vfsub, sub) 2842 DO_2OP_FP_ALL(vfmul, mul) 2843 2844 static inline float16 float16_abd(float16 a, float16 b, float_status *s) 2845 { 2846 return float16_abs(float16_sub(a, b, s)); 2847 } 2848 2849 static inline float32 float32_abd(float32 a, float32 b, float_status *s) 2850 { 2851 return float32_abs(float32_sub(a, b, s)); 2852 } 2853 2854 DO_2OP_FP_ALL(vfabd, abd) 2855 DO_2OP_FP_ALL(vmaxnm, maxnum) 2856 DO_2OP_FP_ALL(vminnm, minnum) 2857 2858 static inline float16 float16_maxnuma(float16 a, float16 b, float_status *s) 2859 { 2860 return float16_maxnum(float16_abs(a), float16_abs(b), s); 2861 } 2862 2863 static inline float32 float32_maxnuma(float32 a, float32 b, float_status *s) 2864 { 2865 return float32_maxnum(float32_abs(a), float32_abs(b), s); 2866 } 2867 2868 static inline float16 float16_minnuma(float16 a, float16 b, float_status *s) 2869 { 2870 return float16_minnum(float16_abs(a), float16_abs(b), s); 2871 } 2872 2873 static inline float32 float32_minnuma(float32 a, float32 b, float_status *s) 2874 { 2875 return float32_minnum(float32_abs(a), float32_abs(b), s); 2876 } 2877 2878 DO_2OP_FP_ALL(vmaxnma, maxnuma) 2879 DO_2OP_FP_ALL(vminnma, minnuma) 2880 2881 #define DO_VCADD_FP(OP, ESIZE, TYPE, FN0, FN1) \ 2882 void HELPER(glue(mve_, OP))(CPUARMState *env, \ 2883 void *vd, void *vn, void *vm) \ 2884 { \ 2885 TYPE *d = vd, *n = vn, *m = vm; \ 2886 TYPE r[16 / ESIZE]; \ 2887 uint16_t tm, mask = mve_element_mask(env); \ 2888 unsigned e; \ 2889 float_status *fpst; \ 2890 float_status scratch_fpst; \ 2891 /* Calculate all results first to avoid overwriting inputs */ \ 2892 for (e = 0, tm = mask; e < 16 / ESIZE; e++, tm >>= ESIZE) { \ 2893 if ((tm & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \ 2894 r[e] = 0; \ 2895 continue; \ 2896 } \ 2897 fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ 2898 &env->vfp.standard_fp_status; \ 2899 if (!(tm & 1)) { \ 2900 /* We need the result but without updating flags */ \ 2901 scratch_fpst = *fpst; \ 2902 fpst = &scratch_fpst; \ 2903 } \ 2904 if (!(e & 1)) { \ 2905 r[e] = FN0(n[H##ESIZE(e)], m[H##ESIZE(e + 1)], fpst); \ 2906 } else { \ 2907 r[e] = FN1(n[H##ESIZE(e)], m[H##ESIZE(e - 1)], fpst); \ 2908 } \ 2909 } \ 2910 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 2911 mergemask(&d[H##ESIZE(e)], r[e], mask); \ 2912 } \ 2913 mve_advance_vpt(env); \ 2914 } 2915 2916 DO_VCADD_FP(vfcadd90h, 2, float16, float16_sub, float16_add) 2917 DO_VCADD_FP(vfcadd90s, 4, float32, float32_sub, float32_add) 2918 DO_VCADD_FP(vfcadd270h, 2, float16, float16_add, float16_sub) 2919 DO_VCADD_FP(vfcadd270s, 4, float32, float32_add, float32_sub) 2920 2921 #define DO_VFMA(OP, ESIZE, TYPE, CHS) \ 2922 void HELPER(glue(mve_, OP))(CPUARMState *env, \ 2923 void *vd, void *vn, void *vm) \ 2924 { \ 2925 TYPE *d = vd, *n = vn, *m = vm; \ 2926 TYPE r; \ 2927 uint16_t mask = mve_element_mask(env); \ 2928 unsigned e; \ 2929 float_status *fpst; \ 2930 float_status scratch_fpst; \ 2931 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 2932 if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \ 2933 continue; \ 2934 } \ 2935 fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ 2936 &env->vfp.standard_fp_status; \ 2937 if (!(mask & 1)) { \ 2938 /* We need the result but without updating flags */ \ 2939 scratch_fpst = *fpst; \ 2940 fpst = &scratch_fpst; \ 2941 } \ 2942 r = n[H##ESIZE(e)]; \ 2943 if (CHS) { \ 2944 r = TYPE##_chs(r); \ 2945 } \ 2946 r = TYPE##_muladd(r, m[H##ESIZE(e)], d[H##ESIZE(e)], \ 2947 0, fpst); \ 2948 mergemask(&d[H##ESIZE(e)], r, mask); \ 2949 } \ 2950 mve_advance_vpt(env); \ 2951 } 2952 2953 DO_VFMA(vfmah, 2, float16, false) 2954 DO_VFMA(vfmas, 4, float32, false) 2955 DO_VFMA(vfmsh, 2, float16, true) 2956 DO_VFMA(vfmss, 4, float32, true) 2957 2958 #define DO_VCMLA(OP, ESIZE, TYPE, ROT, FN) \ 2959 void HELPER(glue(mve_, OP))(CPUARMState *env, \ 2960 void *vd, void *vn, void *vm) \ 2961 { \ 2962 TYPE *d = vd, *n = vn, *m = vm; \ 2963 TYPE r0, r1, e1, e2, e3, e4; \ 2964 uint16_t mask = mve_element_mask(env); \ 2965 unsigned e; \ 2966 float_status *fpst0, *fpst1; \ 2967 float_status scratch_fpst; \ 2968 /* We loop through pairs of elements at a time */ \ 2969 for (e = 0; e < 16 / ESIZE; e += 2, mask >>= ESIZE * 2) { \ 2970 if ((mask & MAKE_64BIT_MASK(0, ESIZE * 2)) == 0) { \ 2971 continue; \ 2972 } \ 2973 fpst0 = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ 2974 &env->vfp.standard_fp_status; \ 2975 fpst1 = fpst0; \ 2976 if (!(mask & 1)) { \ 2977 scratch_fpst = *fpst0; \ 2978 fpst0 = &scratch_fpst; \ 2979 } \ 2980 if (!(mask & (1 << ESIZE))) { \ 2981 scratch_fpst = *fpst1; \ 2982 fpst1 = &scratch_fpst; \ 2983 } \ 2984 switch (ROT) { \ 2985 case 0: \ 2986 e1 = m[H##ESIZE(e)]; \ 2987 e2 = n[H##ESIZE(e)]; \ 2988 e3 = m[H##ESIZE(e + 1)]; \ 2989 e4 = n[H##ESIZE(e)]; \ 2990 break; \ 2991 case 1: \ 2992 e1 = TYPE##_chs(m[H##ESIZE(e + 1)]); \ 2993 e2 = n[H##ESIZE(e + 1)]; \ 2994 e3 = m[H##ESIZE(e)]; \ 2995 e4 = n[H##ESIZE(e + 1)]; \ 2996 break; \ 2997 case 2: \ 2998 e1 = TYPE##_chs(m[H##ESIZE(e)]); \ 2999 e2 = n[H##ESIZE(e)]; \ 3000 e3 = TYPE##_chs(m[H##ESIZE(e + 1)]); \ 3001 e4 = n[H##ESIZE(e)]; \ 3002 break; \ 3003 case 3: \ 3004 e1 = m[H##ESIZE(e + 1)]; \ 3005 e2 = n[H##ESIZE(e + 1)]; \ 3006 e3 = TYPE##_chs(m[H##ESIZE(e)]); \ 3007 e4 = n[H##ESIZE(e + 1)]; \ 3008 break; \ 3009 default: \ 3010 g_assert_not_reached(); \ 3011 } \ 3012 r0 = FN(e2, e1, d[H##ESIZE(e)], fpst0); \ 3013 r1 = FN(e4, e3, d[H##ESIZE(e + 1)], fpst1); \ 3014 mergemask(&d[H##ESIZE(e)], r0, mask); \ 3015 mergemask(&d[H##ESIZE(e + 1)], r1, mask >> ESIZE); \ 3016 } \ 3017 mve_advance_vpt(env); \ 3018 } 3019 3020 #define DO_VCMULH(N, M, D, S) float16_mul(N, M, S) 3021 #define DO_VCMULS(N, M, D, S) float32_mul(N, M, S) 3022 3023 #define DO_VCMLAH(N, M, D, S) float16_muladd(N, M, D, 0, S) 3024 #define DO_VCMLAS(N, M, D, S) float32_muladd(N, M, D, 0, S) 3025 3026 DO_VCMLA(vcmul0h, 2, float16, 0, DO_VCMULH) 3027 DO_VCMLA(vcmul0s, 4, float32, 0, DO_VCMULS) 3028 DO_VCMLA(vcmul90h, 2, float16, 1, DO_VCMULH) 3029 DO_VCMLA(vcmul90s, 4, float32, 1, DO_VCMULS) 3030 DO_VCMLA(vcmul180h, 2, float16, 2, DO_VCMULH) 3031 DO_VCMLA(vcmul180s, 4, float32, 2, DO_VCMULS) 3032 DO_VCMLA(vcmul270h, 2, float16, 3, DO_VCMULH) 3033 DO_VCMLA(vcmul270s, 4, float32, 3, DO_VCMULS) 3034 3035 DO_VCMLA(vcmla0h, 2, float16, 0, DO_VCMLAH) 3036 DO_VCMLA(vcmla0s, 4, float32, 0, DO_VCMLAS) 3037 DO_VCMLA(vcmla90h, 2, float16, 1, DO_VCMLAH) 3038 DO_VCMLA(vcmla90s, 4, float32, 1, DO_VCMLAS) 3039 DO_VCMLA(vcmla180h, 2, float16, 2, DO_VCMLAH) 3040 DO_VCMLA(vcmla180s, 4, float32, 2, DO_VCMLAS) 3041 DO_VCMLA(vcmla270h, 2, float16, 3, DO_VCMLAH) 3042 DO_VCMLA(vcmla270s, 4, float32, 3, DO_VCMLAS) 3043 3044 #define DO_2OP_FP_SCALAR(OP, ESIZE, TYPE, FN) \ 3045 void HELPER(glue(mve_, OP))(CPUARMState *env, \ 3046 void *vd, void *vn, uint32_t rm) \ 3047 { \ 3048 TYPE *d = vd, *n = vn; \ 3049 TYPE r, m = rm; \ 3050 uint16_t mask = mve_element_mask(env); \ 3051 unsigned e; \ 3052 float_status *fpst; \ 3053 float_status scratch_fpst; \ 3054 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 3055 if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \ 3056 continue; \ 3057 } \ 3058 fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ 3059 &env->vfp.standard_fp_status; \ 3060 if (!(mask & 1)) { \ 3061 /* We need the result but without updating flags */ \ 3062 scratch_fpst = *fpst; \ 3063 fpst = &scratch_fpst; \ 3064 } \ 3065 r = FN(n[H##ESIZE(e)], m, fpst); \ 3066 mergemask(&d[H##ESIZE(e)], r, mask); \ 3067 } \ 3068 mve_advance_vpt(env); \ 3069 } 3070 3071 #define DO_2OP_FP_SCALAR_ALL(OP, FN) \ 3072 DO_2OP_FP_SCALAR(OP##h, 2, float16, float16_##FN) \ 3073 DO_2OP_FP_SCALAR(OP##s, 4, float32, float32_##FN) 3074 3075 DO_2OP_FP_SCALAR_ALL(vfadd_scalar, add) 3076 DO_2OP_FP_SCALAR_ALL(vfsub_scalar, sub) 3077 DO_2OP_FP_SCALAR_ALL(vfmul_scalar, mul) 3078 3079 #define DO_2OP_FP_ACC_SCALAR(OP, ESIZE, TYPE, FN) \ 3080 void HELPER(glue(mve_, OP))(CPUARMState *env, \ 3081 void *vd, void *vn, uint32_t rm) \ 3082 { \ 3083 TYPE *d = vd, *n = vn; \ 3084 TYPE r, m = rm; \ 3085 uint16_t mask = mve_element_mask(env); \ 3086 unsigned e; \ 3087 float_status *fpst; \ 3088 float_status scratch_fpst; \ 3089 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 3090 if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \ 3091 continue; \ 3092 } \ 3093 fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ 3094 &env->vfp.standard_fp_status; \ 3095 if (!(mask & 1)) { \ 3096 /* We need the result but without updating flags */ \ 3097 scratch_fpst = *fpst; \ 3098 fpst = &scratch_fpst; \ 3099 } \ 3100 r = FN(n[H##ESIZE(e)], m, d[H##ESIZE(e)], 0, fpst); \ 3101 mergemask(&d[H##ESIZE(e)], r, mask); \ 3102 } \ 3103 mve_advance_vpt(env); \ 3104 } 3105 3106 /* VFMAS is vector * vector + scalar, so swap op2 and op3 */ 3107 #define DO_VFMAS_SCALARH(N, M, D, F, S) float16_muladd(N, D, M, F, S) 3108 #define DO_VFMAS_SCALARS(N, M, D, F, S) float32_muladd(N, D, M, F, S) 3109 3110 /* VFMA is vector * scalar + vector */ 3111 DO_2OP_FP_ACC_SCALAR(vfma_scalarh, 2, float16, float16_muladd) 3112 DO_2OP_FP_ACC_SCALAR(vfma_scalars, 4, float32, float32_muladd) 3113 DO_2OP_FP_ACC_SCALAR(vfmas_scalarh, 2, float16, DO_VFMAS_SCALARH) 3114 DO_2OP_FP_ACC_SCALAR(vfmas_scalars, 4, float32, DO_VFMAS_SCALARS) 3115 3116 /* Floating point max/min across vector. */ 3117 #define DO_FP_VMAXMINV(OP, ESIZE, TYPE, ABS, FN) \ 3118 uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vm, \ 3119 uint32_t ra_in) \ 3120 { \ 3121 uint16_t mask = mve_element_mask(env); \ 3122 unsigned e; \ 3123 TYPE *m = vm; \ 3124 TYPE ra = (TYPE)ra_in; \ 3125 float_status *fpst = (ESIZE == 2) ? \ 3126 &env->vfp.standard_fp_status_f16 : \ 3127 &env->vfp.standard_fp_status; \ 3128 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 3129 if (mask & 1) { \ 3130 TYPE v = m[H##ESIZE(e)]; \ 3131 if (TYPE##_is_signaling_nan(ra, fpst)) { \ 3132 ra = TYPE##_silence_nan(ra, fpst); \ 3133 float_raise(float_flag_invalid, fpst); \ 3134 } \ 3135 if (TYPE##_is_signaling_nan(v, fpst)) { \ 3136 v = TYPE##_silence_nan(v, fpst); \ 3137 float_raise(float_flag_invalid, fpst); \ 3138 } \ 3139 if (ABS) { \ 3140 v = TYPE##_abs(v); \ 3141 } \ 3142 ra = FN(ra, v, fpst); \ 3143 } \ 3144 } \ 3145 mve_advance_vpt(env); \ 3146 return ra; \ 3147 } \ 3148 3149 #define NOP(X) (X) 3150 3151 DO_FP_VMAXMINV(vmaxnmvh, 2, float16, false, float16_maxnum) 3152 DO_FP_VMAXMINV(vmaxnmvs, 4, float32, false, float32_maxnum) 3153 DO_FP_VMAXMINV(vminnmvh, 2, float16, false, float16_minnum) 3154 DO_FP_VMAXMINV(vminnmvs, 4, float32, false, float32_minnum) 3155 DO_FP_VMAXMINV(vmaxnmavh, 2, float16, true, float16_maxnum) 3156 DO_FP_VMAXMINV(vmaxnmavs, 4, float32, true, float32_maxnum) 3157 DO_FP_VMAXMINV(vminnmavh, 2, float16, true, float16_minnum) 3158 DO_FP_VMAXMINV(vminnmavs, 4, float32, true, float32_minnum) 3159 3160 /* FP compares; note that all comparisons signal InvalidOp for QNaNs */ 3161 #define DO_VCMP_FP(OP, ESIZE, TYPE, FN) \ 3162 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, void *vm) \ 3163 { \ 3164 TYPE *n = vn, *m = vm; \ 3165 uint16_t mask = mve_element_mask(env); \ 3166 uint16_t eci_mask = mve_eci_mask(env); \ 3167 uint16_t beatpred = 0; \ 3168 uint16_t emask = MAKE_64BIT_MASK(0, ESIZE); \ 3169 unsigned e; \ 3170 float_status *fpst; \ 3171 float_status scratch_fpst; \ 3172 bool r; \ 3173 for (e = 0; e < 16 / ESIZE; e++, emask <<= ESIZE) { \ 3174 if ((mask & emask) == 0) { \ 3175 continue; \ 3176 } \ 3177 fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ 3178 &env->vfp.standard_fp_status; \ 3179 if (!(mask & (1 << (e * ESIZE)))) { \ 3180 /* We need the result but without updating flags */ \ 3181 scratch_fpst = *fpst; \ 3182 fpst = &scratch_fpst; \ 3183 } \ 3184 r = FN(n[H##ESIZE(e)], m[H##ESIZE(e)], fpst); \ 3185 /* Comparison sets 0/1 bits for each byte in the element */ \ 3186 beatpred |= r * emask; \ 3187 } \ 3188 beatpred &= mask; \ 3189 env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | \ 3190 (beatpred & eci_mask); \ 3191 mve_advance_vpt(env); \ 3192 } 3193 3194 #define DO_VCMP_FP_SCALAR(OP, ESIZE, TYPE, FN) \ 3195 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \ 3196 uint32_t rm) \ 3197 { \ 3198 TYPE *n = vn; \ 3199 uint16_t mask = mve_element_mask(env); \ 3200 uint16_t eci_mask = mve_eci_mask(env); \ 3201 uint16_t beatpred = 0; \ 3202 uint16_t emask = MAKE_64BIT_MASK(0, ESIZE); \ 3203 unsigned e; \ 3204 float_status *fpst; \ 3205 float_status scratch_fpst; \ 3206 bool r; \ 3207 for (e = 0; e < 16 / ESIZE; e++, emask <<= ESIZE) { \ 3208 if ((mask & emask) == 0) { \ 3209 continue; \ 3210 } \ 3211 fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ 3212 &env->vfp.standard_fp_status; \ 3213 if (!(mask & (1 << (e * ESIZE)))) { \ 3214 /* We need the result but without updating flags */ \ 3215 scratch_fpst = *fpst; \ 3216 fpst = &scratch_fpst; \ 3217 } \ 3218 r = FN(n[H##ESIZE(e)], (TYPE)rm, fpst); \ 3219 /* Comparison sets 0/1 bits for each byte in the element */ \ 3220 beatpred |= r * emask; \ 3221 } \ 3222 beatpred &= mask; \ 3223 env->v7m.vpr = (env->v7m.vpr & ~(uint32_t)eci_mask) | \ 3224 (beatpred & eci_mask); \ 3225 mve_advance_vpt(env); \ 3226 } 3227 3228 #define DO_VCMP_FP_BOTH(VOP, SOP, ESIZE, TYPE, FN) \ 3229 DO_VCMP_FP(VOP, ESIZE, TYPE, FN) \ 3230 DO_VCMP_FP_SCALAR(SOP, ESIZE, TYPE, FN) 3231 3232 /* 3233 * Some care is needed here to get the correct result for the unordered case. 3234 * Architecturally EQ, GE and GT are defined to be false for unordered, but 3235 * the NE, LT and LE comparisons are defined as simple logical inverses of 3236 * EQ, GE and GT and so they must return true for unordered. The softfloat 3237 * comparison functions float*_{eq,le,lt} all return false for unordered. 3238 */ 3239 #define DO_GE16(X, Y, S) float16_le(Y, X, S) 3240 #define DO_GE32(X, Y, S) float32_le(Y, X, S) 3241 #define DO_GT16(X, Y, S) float16_lt(Y, X, S) 3242 #define DO_GT32(X, Y, S) float32_lt(Y, X, S) 3243 3244 DO_VCMP_FP_BOTH(vfcmpeqh, vfcmpeq_scalarh, 2, float16, float16_eq) 3245 DO_VCMP_FP_BOTH(vfcmpeqs, vfcmpeq_scalars, 4, float32, float32_eq) 3246 3247 DO_VCMP_FP_BOTH(vfcmpneh, vfcmpne_scalarh, 2, float16, !float16_eq) 3248 DO_VCMP_FP_BOTH(vfcmpnes, vfcmpne_scalars, 4, float32, !float32_eq) 3249 3250 DO_VCMP_FP_BOTH(vfcmpgeh, vfcmpge_scalarh, 2, float16, DO_GE16) 3251 DO_VCMP_FP_BOTH(vfcmpges, vfcmpge_scalars, 4, float32, DO_GE32) 3252 3253 DO_VCMP_FP_BOTH(vfcmplth, vfcmplt_scalarh, 2, float16, !DO_GE16) 3254 DO_VCMP_FP_BOTH(vfcmplts, vfcmplt_scalars, 4, float32, !DO_GE32) 3255 3256 DO_VCMP_FP_BOTH(vfcmpgth, vfcmpgt_scalarh, 2, float16, DO_GT16) 3257 DO_VCMP_FP_BOTH(vfcmpgts, vfcmpgt_scalars, 4, float32, DO_GT32) 3258 3259 DO_VCMP_FP_BOTH(vfcmpleh, vfcmple_scalarh, 2, float16, !DO_GT16) 3260 DO_VCMP_FP_BOTH(vfcmples, vfcmple_scalars, 4, float32, !DO_GT32) 3261 3262 #define DO_VCVT_FIXED(OP, ESIZE, TYPE, FN) \ 3263 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vm, \ 3264 uint32_t shift) \ 3265 { \ 3266 TYPE *d = vd, *m = vm; \ 3267 TYPE r; \ 3268 uint16_t mask = mve_element_mask(env); \ 3269 unsigned e; \ 3270 float_status *fpst; \ 3271 float_status scratch_fpst; \ 3272 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 3273 if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \ 3274 continue; \ 3275 } \ 3276 fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ 3277 &env->vfp.standard_fp_status; \ 3278 if (!(mask & 1)) { \ 3279 /* We need the result but without updating flags */ \ 3280 scratch_fpst = *fpst; \ 3281 fpst = &scratch_fpst; \ 3282 } \ 3283 r = FN(m[H##ESIZE(e)], shift, fpst); \ 3284 mergemask(&d[H##ESIZE(e)], r, mask); \ 3285 } \ 3286 mve_advance_vpt(env); \ 3287 } 3288 3289 DO_VCVT_FIXED(vcvt_sh, 2, int16_t, helper_vfp_shtoh) 3290 DO_VCVT_FIXED(vcvt_uh, 2, uint16_t, helper_vfp_uhtoh) 3291 DO_VCVT_FIXED(vcvt_hs, 2, int16_t, helper_vfp_toshh_round_to_zero) 3292 DO_VCVT_FIXED(vcvt_hu, 2, uint16_t, helper_vfp_touhh_round_to_zero) 3293 DO_VCVT_FIXED(vcvt_sf, 4, int32_t, helper_vfp_sltos) 3294 DO_VCVT_FIXED(vcvt_uf, 4, uint32_t, helper_vfp_ultos) 3295 DO_VCVT_FIXED(vcvt_fs, 4, int32_t, helper_vfp_tosls_round_to_zero) 3296 DO_VCVT_FIXED(vcvt_fu, 4, uint32_t, helper_vfp_touls_round_to_zero) 3297 3298 /* VCVT with specified rmode */ 3299 #define DO_VCVT_RMODE(OP, ESIZE, TYPE, FN) \ 3300 void HELPER(glue(mve_, OP))(CPUARMState *env, \ 3301 void *vd, void *vm, uint32_t rmode) \ 3302 { \ 3303 TYPE *d = vd, *m = vm; \ 3304 TYPE r; \ 3305 uint16_t mask = mve_element_mask(env); \ 3306 unsigned e; \ 3307 float_status *fpst; \ 3308 float_status scratch_fpst; \ 3309 float_status *base_fpst = (ESIZE == 2) ? \ 3310 &env->vfp.standard_fp_status_f16 : \ 3311 &env->vfp.standard_fp_status; \ 3312 uint32_t prev_rmode = get_float_rounding_mode(base_fpst); \ 3313 set_float_rounding_mode(rmode, base_fpst); \ 3314 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 3315 if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \ 3316 continue; \ 3317 } \ 3318 fpst = base_fpst; \ 3319 if (!(mask & 1)) { \ 3320 /* We need the result but without updating flags */ \ 3321 scratch_fpst = *fpst; \ 3322 fpst = &scratch_fpst; \ 3323 } \ 3324 r = FN(m[H##ESIZE(e)], 0, fpst); \ 3325 mergemask(&d[H##ESIZE(e)], r, mask); \ 3326 } \ 3327 set_float_rounding_mode(prev_rmode, base_fpst); \ 3328 mve_advance_vpt(env); \ 3329 } 3330 3331 DO_VCVT_RMODE(vcvt_rm_sh, 2, uint16_t, helper_vfp_toshh) 3332 DO_VCVT_RMODE(vcvt_rm_uh, 2, uint16_t, helper_vfp_touhh) 3333 DO_VCVT_RMODE(vcvt_rm_ss, 4, uint32_t, helper_vfp_tosls) 3334 DO_VCVT_RMODE(vcvt_rm_us, 4, uint32_t, helper_vfp_touls) 3335 3336 #define DO_VRINT_RM_H(M, F, S) helper_rinth(M, S) 3337 #define DO_VRINT_RM_S(M, F, S) helper_rints(M, S) 3338 3339 DO_VCVT_RMODE(vrint_rm_h, 2, uint16_t, DO_VRINT_RM_H) 3340 DO_VCVT_RMODE(vrint_rm_s, 4, uint32_t, DO_VRINT_RM_S) 3341 3342 /* 3343 * VCVT between halfprec and singleprec. As usual for halfprec 3344 * conversions, FZ16 is ignored and AHP is observed. 3345 */ 3346 static void do_vcvt_sh(CPUARMState *env, void *vd, void *vm, int top) 3347 { 3348 uint16_t *d = vd; 3349 uint32_t *m = vm; 3350 uint16_t r; 3351 uint16_t mask = mve_element_mask(env); 3352 bool ieee = !(env->vfp.xregs[ARM_VFP_FPSCR] & FPCR_AHP); 3353 unsigned e; 3354 float_status *fpst; 3355 float_status scratch_fpst; 3356 float_status *base_fpst = &env->vfp.standard_fp_status; 3357 bool old_fz = get_flush_to_zero(base_fpst); 3358 set_flush_to_zero(false, base_fpst); 3359 for (e = 0; e < 16 / 4; e++, mask >>= 4) { 3360 if ((mask & MAKE_64BIT_MASK(0, 4)) == 0) { 3361 continue; 3362 } 3363 fpst = base_fpst; 3364 if (!(mask & 1)) { 3365 /* We need the result but without updating flags */ 3366 scratch_fpst = *fpst; 3367 fpst = &scratch_fpst; 3368 } 3369 r = float32_to_float16(m[H4(e)], ieee, fpst); 3370 mergemask(&d[H2(e * 2 + top)], r, mask >> (top * 2)); 3371 } 3372 set_flush_to_zero(old_fz, base_fpst); 3373 mve_advance_vpt(env); 3374 } 3375 3376 static void do_vcvt_hs(CPUARMState *env, void *vd, void *vm, int top) 3377 { 3378 uint32_t *d = vd; 3379 uint16_t *m = vm; 3380 uint32_t r; 3381 uint16_t mask = mve_element_mask(env); 3382 bool ieee = !(env->vfp.xregs[ARM_VFP_FPSCR] & FPCR_AHP); 3383 unsigned e; 3384 float_status *fpst; 3385 float_status scratch_fpst; 3386 float_status *base_fpst = &env->vfp.standard_fp_status; 3387 bool old_fiz = get_flush_inputs_to_zero(base_fpst); 3388 set_flush_inputs_to_zero(false, base_fpst); 3389 for (e = 0; e < 16 / 4; e++, mask >>= 4) { 3390 if ((mask & MAKE_64BIT_MASK(0, 4)) == 0) { 3391 continue; 3392 } 3393 fpst = base_fpst; 3394 if (!(mask & (1 << (top * 2)))) { 3395 /* We need the result but without updating flags */ 3396 scratch_fpst = *fpst; 3397 fpst = &scratch_fpst; 3398 } 3399 r = float16_to_float32(m[H2(e * 2 + top)], ieee, fpst); 3400 mergemask(&d[H4(e)], r, mask); 3401 } 3402 set_flush_inputs_to_zero(old_fiz, base_fpst); 3403 mve_advance_vpt(env); 3404 } 3405 3406 void HELPER(mve_vcvtb_sh)(CPUARMState *env, void *vd, void *vm) 3407 { 3408 do_vcvt_sh(env, vd, vm, 0); 3409 } 3410 void HELPER(mve_vcvtt_sh)(CPUARMState *env, void *vd, void *vm) 3411 { 3412 do_vcvt_sh(env, vd, vm, 1); 3413 } 3414 void HELPER(mve_vcvtb_hs)(CPUARMState *env, void *vd, void *vm) 3415 { 3416 do_vcvt_hs(env, vd, vm, 0); 3417 } 3418 void HELPER(mve_vcvtt_hs)(CPUARMState *env, void *vd, void *vm) 3419 { 3420 do_vcvt_hs(env, vd, vm, 1); 3421 } 3422 3423 #define DO_1OP_FP(OP, ESIZE, TYPE, FN) \ 3424 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vm) \ 3425 { \ 3426 TYPE *d = vd, *m = vm; \ 3427 TYPE r; \ 3428 uint16_t mask = mve_element_mask(env); \ 3429 unsigned e; \ 3430 float_status *fpst; \ 3431 float_status scratch_fpst; \ 3432 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ 3433 if ((mask & MAKE_64BIT_MASK(0, ESIZE)) == 0) { \ 3434 continue; \ 3435 } \ 3436 fpst = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ 3437 &env->vfp.standard_fp_status; \ 3438 if (!(mask & 1)) { \ 3439 /* We need the result but without updating flags */ \ 3440 scratch_fpst = *fpst; \ 3441 fpst = &scratch_fpst; \ 3442 } \ 3443 r = FN(m[H##ESIZE(e)], fpst); \ 3444 mergemask(&d[H##ESIZE(e)], r, mask); \ 3445 } \ 3446 mve_advance_vpt(env); \ 3447 } 3448 3449 DO_1OP_FP(vrintx_h, 2, float16, float16_round_to_int) 3450 DO_1OP_FP(vrintx_s, 4, float32, float32_round_to_int) 3451