1 /* 2 * MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4/PNI support 3 * 4 * Copyright (c) 2005 Fabrice Bellard 5 * Copyright (c) 2008 Intel Corporation <andrew.zaborowski@intel.com> 6 * 7 * This library is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Lesser General Public 9 * License as published by the Free Software Foundation; either 10 * version 2.1 of the License, or (at your option) any later version. 11 * 12 * This library is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * Lesser General Public License for more details. 16 * 17 * You should have received a copy of the GNU Lesser General Public 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #include "crypto/aes.h" 22 #include "crypto/aes-round.h" 23 #include "crypto/clmul.h" 24 25 #if SHIFT == 0 26 #define Reg MMXReg 27 #define XMM_ONLY(...) 28 #define B(n) MMX_B(n) 29 #define W(n) MMX_W(n) 30 #define L(n) MMX_L(n) 31 #define Q(n) MMX_Q(n) 32 #define SUFFIX _mmx 33 #else 34 #define Reg ZMMReg 35 #define XMM_ONLY(...) __VA_ARGS__ 36 #define B(n) ZMM_B(n) 37 #define W(n) ZMM_W(n) 38 #define L(n) ZMM_L(n) 39 #define Q(n) ZMM_Q(n) 40 #if SHIFT == 1 41 #define SUFFIX _xmm 42 #else 43 #define SUFFIX _ymm 44 #endif 45 #endif 46 47 #define LANE_WIDTH (SHIFT ? 16 : 8) 48 #define PACK_WIDTH (LANE_WIDTH / 2) 49 50 #if SHIFT == 0 51 #define FPSRL(x, c) ((x) >> shift) 52 #define FPSRAW(x, c) ((int16_t)(x) >> shift) 53 #define FPSRAL(x, c) ((int32_t)(x) >> shift) 54 #define FPSLL(x, c) ((x) << shift) 55 #endif 56 57 void glue(helper_psrlw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, Reg *c) 58 { 59 int shift; 60 if (c->Q(0) > 15) { 61 for (int i = 0; i < 1 << SHIFT; i++) { 62 d->Q(i) = 0; 63 } 64 } else { 65 shift = c->B(0); 66 for (int i = 0; i < 4 << SHIFT; i++) { 67 d->W(i) = FPSRL(s->W(i), shift); 68 } 69 } 70 } 71 72 void glue(helper_psllw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, Reg *c) 73 { 74 int shift; 75 if (c->Q(0) > 15) { 76 for (int i = 0; i < 1 << SHIFT; i++) { 77 d->Q(i) = 0; 78 } 79 } else { 80 shift = c->B(0); 81 for (int i = 0; i < 4 << SHIFT; i++) { 82 d->W(i) = FPSLL(s->W(i), shift); 83 } 84 } 85 } 86 87 void glue(helper_psraw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, Reg *c) 88 { 89 int shift; 90 if (c->Q(0) > 15) { 91 shift = 15; 92 } else { 93 shift = c->B(0); 94 } 95 for (int i = 0; i < 4 << SHIFT; i++) { 96 d->W(i) = FPSRAW(s->W(i), shift); 97 } 98 } 99 100 void glue(helper_psrld, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, Reg *c) 101 { 102 int shift; 103 if (c->Q(0) > 31) { 104 for (int i = 0; i < 1 << SHIFT; i++) { 105 d->Q(i) = 0; 106 } 107 } else { 108 shift = c->B(0); 109 for (int i = 0; i < 2 << SHIFT; i++) { 110 d->L(i) = FPSRL(s->L(i), shift); 111 } 112 } 113 } 114 115 void glue(helper_pslld, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, Reg *c) 116 { 117 int shift; 118 if (c->Q(0) > 31) { 119 for (int i = 0; i < 1 << SHIFT; i++) { 120 d->Q(i) = 0; 121 } 122 } else { 123 shift = c->B(0); 124 for (int i = 0; i < 2 << SHIFT; i++) { 125 d->L(i) = FPSLL(s->L(i), shift); 126 } 127 } 128 } 129 130 void glue(helper_psrad, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, Reg *c) 131 { 132 int shift; 133 if (c->Q(0) > 31) { 134 shift = 31; 135 } else { 136 shift = c->B(0); 137 } 138 for (int i = 0; i < 2 << SHIFT; i++) { 139 d->L(i) = FPSRAL(s->L(i), shift); 140 } 141 } 142 143 void glue(helper_psrlq, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, Reg *c) 144 { 145 int shift; 146 if (c->Q(0) > 63) { 147 for (int i = 0; i < 1 << SHIFT; i++) { 148 d->Q(i) = 0; 149 } 150 } else { 151 shift = c->B(0); 152 for (int i = 0; i < 1 << SHIFT; i++) { 153 d->Q(i) = FPSRL(s->Q(i), shift); 154 } 155 } 156 } 157 158 void glue(helper_psllq, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, Reg *c) 159 { 160 int shift; 161 if (c->Q(0) > 63) { 162 for (int i = 0; i < 1 << SHIFT; i++) { 163 d->Q(i) = 0; 164 } 165 } else { 166 shift = c->B(0); 167 for (int i = 0; i < 1 << SHIFT; i++) { 168 d->Q(i) = FPSLL(s->Q(i), shift); 169 } 170 } 171 } 172 173 #if SHIFT >= 1 174 void glue(helper_psrldq, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, Reg *c) 175 { 176 int shift, i, j; 177 178 shift = c->L(0); 179 if (shift > 16) { 180 shift = 16; 181 } 182 for (j = 0; j < 8 << SHIFT; j += LANE_WIDTH) { 183 for (i = 0; i < 16 - shift; i++) { 184 d->B(j + i) = s->B(j + i + shift); 185 } 186 for (i = 16 - shift; i < 16; i++) { 187 d->B(j + i) = 0; 188 } 189 } 190 } 191 192 void glue(helper_pslldq, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, Reg *c) 193 { 194 int shift, i, j; 195 196 shift = c->L(0); 197 if (shift > 16) { 198 shift = 16; 199 } 200 for (j = 0; j < 8 << SHIFT; j += LANE_WIDTH) { 201 for (i = 15; i >= shift; i--) { 202 d->B(j + i) = s->B(j + i - shift); 203 } 204 for (i = 0; i < shift; i++) { 205 d->B(j + i) = 0; 206 } 207 } 208 } 209 #endif 210 211 #define SSE_HELPER_1(name, elem, num, F) \ 212 void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) \ 213 { \ 214 int n = num; \ 215 for (int i = 0; i < n; i++) { \ 216 d->elem(i) = F(s->elem(i)); \ 217 } \ 218 } 219 220 #define SSE_HELPER_2(name, elem, num, F) \ 221 void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) \ 222 { \ 223 int n = num; \ 224 for (int i = 0; i < n; i++) { \ 225 d->elem(i) = F(v->elem(i), s->elem(i)); \ 226 } \ 227 } 228 229 #define SSE_HELPER_B(name, F) \ 230 SSE_HELPER_2(name, B, 8 << SHIFT, F) 231 232 #define SSE_HELPER_W(name, F) \ 233 SSE_HELPER_2(name, W, 4 << SHIFT, F) 234 235 #define SSE_HELPER_L(name, F) \ 236 SSE_HELPER_2(name, L, 2 << SHIFT, F) 237 238 #define SSE_HELPER_Q(name, F) \ 239 SSE_HELPER_2(name, Q, 1 << SHIFT, F) 240 241 #if SHIFT == 0 242 static inline int satub(int x) 243 { 244 if (x < 0) { 245 return 0; 246 } else if (x > 255) { 247 return 255; 248 } else { 249 return x; 250 } 251 } 252 253 static inline int satuw(int x) 254 { 255 if (x < 0) { 256 return 0; 257 } else if (x > 65535) { 258 return 65535; 259 } else { 260 return x; 261 } 262 } 263 264 static inline int satsb(int x) 265 { 266 if (x < -128) { 267 return -128; 268 } else if (x > 127) { 269 return 127; 270 } else { 271 return x; 272 } 273 } 274 275 static inline int satsw(int x) 276 { 277 if (x < -32768) { 278 return -32768; 279 } else if (x > 32767) { 280 return 32767; 281 } else { 282 return x; 283 } 284 } 285 286 #define FADD(a, b) ((a) + (b)) 287 #define FADDUB(a, b) satub((a) + (b)) 288 #define FADDUW(a, b) satuw((a) + (b)) 289 #define FADDSB(a, b) satsb((int8_t)(a) + (int8_t)(b)) 290 #define FADDSW(a, b) satsw((int16_t)(a) + (int16_t)(b)) 291 292 #define FSUB(a, b) ((a) - (b)) 293 #define FSUBUB(a, b) satub((a) - (b)) 294 #define FSUBUW(a, b) satuw((a) - (b)) 295 #define FSUBSB(a, b) satsb((int8_t)(a) - (int8_t)(b)) 296 #define FSUBSW(a, b) satsw((int16_t)(a) - (int16_t)(b)) 297 #define FMINUB(a, b) ((a) < (b)) ? (a) : (b) 298 #define FMINSW(a, b) ((int16_t)(a) < (int16_t)(b)) ? (a) : (b) 299 #define FMAXUB(a, b) ((a) > (b)) ? (a) : (b) 300 #define FMAXSW(a, b) ((int16_t)(a) > (int16_t)(b)) ? (a) : (b) 301 302 #define FMULHRW(a, b) (((int16_t)(a) * (int16_t)(b) + 0x8000) >> 16) 303 #define FMULHUW(a, b) ((a) * (b) >> 16) 304 #define FMULHW(a, b) ((int16_t)(a) * (int16_t)(b) >> 16) 305 306 #define FAVG(a, b) (((a) + (b) + 1) >> 1) 307 #endif 308 309 SSE_HELPER_W(helper_pmulhuw, FMULHUW) 310 SSE_HELPER_W(helper_pmulhw, FMULHW) 311 312 #if SHIFT == 0 313 void glue(helper_pmulhrw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) 314 { 315 d->W(0) = FMULHRW(d->W(0), s->W(0)); 316 d->W(1) = FMULHRW(d->W(1), s->W(1)); 317 d->W(2) = FMULHRW(d->W(2), s->W(2)); 318 d->W(3) = FMULHRW(d->W(3), s->W(3)); 319 } 320 #endif 321 322 SSE_HELPER_B(helper_pavgb, FAVG) 323 SSE_HELPER_W(helper_pavgw, FAVG) 324 325 void glue(helper_pmuludq, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) 326 { 327 int i; 328 329 for (i = 0; i < (1 << SHIFT); i++) { 330 d->Q(i) = (uint64_t)s->L(i * 2) * (uint64_t)v->L(i * 2); 331 } 332 } 333 334 void glue(helper_pmaddwd, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) 335 { 336 int i; 337 338 for (i = 0; i < (2 << SHIFT); i++) { 339 d->L(i) = (int16_t)s->W(2 * i) * (int16_t)v->W(2 * i) + 340 (int16_t)s->W(2 * i + 1) * (int16_t)v->W(2 * i + 1); 341 } 342 } 343 344 #if SHIFT == 0 345 static inline int abs1(int a) 346 { 347 if (a < 0) { 348 return -a; 349 } else { 350 return a; 351 } 352 } 353 #endif 354 void glue(helper_psadbw, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) 355 { 356 int i; 357 358 for (i = 0; i < (1 << SHIFT); i++) { 359 unsigned int val = 0; 360 val += abs1(v->B(8 * i + 0) - s->B(8 * i + 0)); 361 val += abs1(v->B(8 * i + 1) - s->B(8 * i + 1)); 362 val += abs1(v->B(8 * i + 2) - s->B(8 * i + 2)); 363 val += abs1(v->B(8 * i + 3) - s->B(8 * i + 3)); 364 val += abs1(v->B(8 * i + 4) - s->B(8 * i + 4)); 365 val += abs1(v->B(8 * i + 5) - s->B(8 * i + 5)); 366 val += abs1(v->B(8 * i + 6) - s->B(8 * i + 6)); 367 val += abs1(v->B(8 * i + 7) - s->B(8 * i + 7)); 368 d->Q(i) = val; 369 } 370 } 371 372 #if SHIFT < 2 373 void glue(helper_maskmov, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, 374 target_ulong a0) 375 { 376 int i; 377 378 for (i = 0; i < (8 << SHIFT); i++) { 379 if (s->B(i) & 0x80) { 380 cpu_stb_data_ra(env, a0 + i, d->B(i), GETPC()); 381 } 382 } 383 } 384 #endif 385 386 #define SHUFFLE4(F, a, b, offset) do { \ 387 r0 = a->F((order & 3) + offset); \ 388 r1 = a->F(((order >> 2) & 3) + offset); \ 389 r2 = b->F(((order >> 4) & 3) + offset); \ 390 r3 = b->F(((order >> 6) & 3) + offset); \ 391 d->F(offset) = r0; \ 392 d->F(offset + 1) = r1; \ 393 d->F(offset + 2) = r2; \ 394 d->F(offset + 3) = r3; \ 395 } while (0) 396 397 #if SHIFT == 0 398 void glue(helper_pshufw, SUFFIX)(Reg *d, Reg *s, int order) 399 { 400 uint16_t r0, r1, r2, r3; 401 402 SHUFFLE4(W, s, s, 0); 403 } 404 #else 405 void glue(helper_shufps, SUFFIX)(Reg *d, Reg *v, Reg *s, int order) 406 { 407 uint32_t r0, r1, r2, r3; 408 int i; 409 410 for (i = 0; i < 2 << SHIFT; i += 4) { 411 SHUFFLE4(L, v, s, i); 412 } 413 } 414 415 void glue(helper_shufpd, SUFFIX)(Reg *d, Reg *v, Reg *s, int order) 416 { 417 uint64_t r0, r1; 418 int i; 419 420 for (i = 0; i < 1 << SHIFT; i += 2) { 421 r0 = v->Q(((order & 1) & 1) + i); 422 r1 = s->Q(((order >> 1) & 1) + i); 423 d->Q(i) = r0; 424 d->Q(i + 1) = r1; 425 order >>= 2; 426 } 427 } 428 429 void glue(helper_pshufd, SUFFIX)(Reg *d, Reg *s, int order) 430 { 431 uint32_t r0, r1, r2, r3; 432 int i; 433 434 for (i = 0; i < 2 << SHIFT; i += 4) { 435 SHUFFLE4(L, s, s, i); 436 } 437 } 438 439 void glue(helper_pshuflw, SUFFIX)(Reg *d, Reg *s, int order) 440 { 441 uint16_t r0, r1, r2, r3; 442 int i, j; 443 444 for (i = 0, j = 1; j < 1 << SHIFT; i += 8, j += 2) { 445 SHUFFLE4(W, s, s, i); 446 d->Q(j) = s->Q(j); 447 } 448 } 449 450 void glue(helper_pshufhw, SUFFIX)(Reg *d, Reg *s, int order) 451 { 452 uint16_t r0, r1, r2, r3; 453 int i, j; 454 455 for (i = 4, j = 0; j < 1 << SHIFT; i += 8, j += 2) { 456 d->Q(j) = s->Q(j); 457 SHUFFLE4(W, s, s, i); 458 } 459 } 460 #endif 461 462 #if SHIFT >= 1 463 /* FPU ops */ 464 /* XXX: not accurate */ 465 466 #define SSE_HELPER_P(name, F) \ 467 void glue(helper_ ## name ## ps, SUFFIX)(CPUX86State *env, \ 468 Reg *d, Reg *v, Reg *s) \ 469 { \ 470 int i; \ 471 for (i = 0; i < 2 << SHIFT; i++) { \ 472 d->ZMM_S(i) = F(32, v->ZMM_S(i), s->ZMM_S(i)); \ 473 } \ 474 } \ 475 \ 476 void glue(helper_ ## name ## pd, SUFFIX)(CPUX86State *env, \ 477 Reg *d, Reg *v, Reg *s) \ 478 { \ 479 int i; \ 480 for (i = 0; i < 1 << SHIFT; i++) { \ 481 d->ZMM_D(i) = F(64, v->ZMM_D(i), s->ZMM_D(i)); \ 482 } \ 483 } 484 485 #if SHIFT == 1 486 487 #define SSE_HELPER_S(name, F) \ 488 SSE_HELPER_P(name, F) \ 489 \ 490 void helper_ ## name ## ss(CPUX86State *env, Reg *d, Reg *v, Reg *s)\ 491 { \ 492 int i; \ 493 d->ZMM_S(0) = F(32, v->ZMM_S(0), s->ZMM_S(0)); \ 494 for (i = 1; i < 2 << SHIFT; i++) { \ 495 d->ZMM_L(i) = v->ZMM_L(i); \ 496 } \ 497 } \ 498 \ 499 void helper_ ## name ## sd(CPUX86State *env, Reg *d, Reg *v, Reg *s)\ 500 { \ 501 int i; \ 502 d->ZMM_D(0) = F(64, v->ZMM_D(0), s->ZMM_D(0)); \ 503 for (i = 1; i < 1 << SHIFT; i++) { \ 504 d->ZMM_Q(i) = v->ZMM_Q(i); \ 505 } \ 506 } 507 508 #else 509 510 #define SSE_HELPER_S(name, F) SSE_HELPER_P(name, F) 511 512 #endif 513 514 #define FPU_ADD(size, a, b) float ## size ## _add(a, b, &env->sse_status) 515 #define FPU_SUB(size, a, b) float ## size ## _sub(a, b, &env->sse_status) 516 #define FPU_MUL(size, a, b) float ## size ## _mul(a, b, &env->sse_status) 517 #define FPU_DIV(size, a, b) float ## size ## _div(a, b, &env->sse_status) 518 519 /* Note that the choice of comparison op here is important to get the 520 * special cases right: for min and max Intel specifies that (-0,0), 521 * (NaN, anything) and (anything, NaN) return the second argument. 522 */ 523 #define FPU_MIN(size, a, b) \ 524 (float ## size ## _lt(a, b, &env->sse_status) ? (a) : (b)) 525 #define FPU_MAX(size, a, b) \ 526 (float ## size ## _lt(b, a, &env->sse_status) ? (a) : (b)) 527 528 SSE_HELPER_S(add, FPU_ADD) 529 SSE_HELPER_S(sub, FPU_SUB) 530 SSE_HELPER_S(mul, FPU_MUL) 531 SSE_HELPER_S(div, FPU_DIV) 532 SSE_HELPER_S(min, FPU_MIN) 533 SSE_HELPER_S(max, FPU_MAX) 534 535 void glue(helper_sqrtps, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) 536 { 537 int i; 538 for (i = 0; i < 2 << SHIFT; i++) { 539 d->ZMM_S(i) = float32_sqrt(s->ZMM_S(i), &env->sse_status); 540 } 541 } 542 543 void glue(helper_sqrtpd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) 544 { 545 int i; 546 for (i = 0; i < 1 << SHIFT; i++) { 547 d->ZMM_D(i) = float64_sqrt(s->ZMM_D(i), &env->sse_status); 548 } 549 } 550 551 #if SHIFT == 1 552 void helper_sqrtss(CPUX86State *env, Reg *d, Reg *v, Reg *s) 553 { 554 int i; 555 d->ZMM_S(0) = float32_sqrt(s->ZMM_S(0), &env->sse_status); 556 for (i = 1; i < 2 << SHIFT; i++) { 557 d->ZMM_L(i) = v->ZMM_L(i); 558 } 559 } 560 561 void helper_sqrtsd(CPUX86State *env, Reg *d, Reg *v, Reg *s) 562 { 563 int i; 564 d->ZMM_D(0) = float64_sqrt(s->ZMM_D(0), &env->sse_status); 565 for (i = 1; i < 1 << SHIFT; i++) { 566 d->ZMM_Q(i) = v->ZMM_Q(i); 567 } 568 } 569 #endif 570 571 /* float to float conversions */ 572 void glue(helper_cvtps2pd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) 573 { 574 int i; 575 for (i = 1 << SHIFT; --i >= 0; ) { 576 d->ZMM_D(i) = float32_to_float64(s->ZMM_S(i), &env->sse_status); 577 } 578 } 579 580 void glue(helper_cvtpd2ps, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) 581 { 582 int i; 583 for (i = 0; i < 1 << SHIFT; i++) { 584 d->ZMM_S(i) = float64_to_float32(s->ZMM_D(i), &env->sse_status); 585 } 586 for (i >>= 1; i < 1 << SHIFT; i++) { 587 d->Q(i) = 0; 588 } 589 } 590 591 #if SHIFT >= 1 592 void glue(helper_cvtph2ps, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) 593 { 594 int i; 595 596 for (i = 2 << SHIFT; --i >= 0; ) { 597 d->ZMM_S(i) = float16_to_float32(s->ZMM_H(i), true, &env->sse_status); 598 } 599 } 600 601 void glue(helper_cvtps2ph, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, int mode) 602 { 603 int i; 604 FloatRoundMode prev_rounding_mode = env->sse_status.float_rounding_mode; 605 if (!(mode & (1 << 2))) { 606 set_x86_rounding_mode(mode & 3, &env->sse_status); 607 } 608 609 for (i = 0; i < 2 << SHIFT; i++) { 610 d->ZMM_H(i) = float32_to_float16(s->ZMM_S(i), true, &env->sse_status); 611 } 612 for (i >>= 2; i < 1 << SHIFT; i++) { 613 d->Q(i) = 0; 614 } 615 616 env->sse_status.float_rounding_mode = prev_rounding_mode; 617 } 618 #endif 619 620 #if SHIFT == 1 621 void helper_cvtss2sd(CPUX86State *env, Reg *d, Reg *v, Reg *s) 622 { 623 int i; 624 d->ZMM_D(0) = float32_to_float64(s->ZMM_S(0), &env->sse_status); 625 for (i = 1; i < 1 << SHIFT; i++) { 626 d->ZMM_Q(i) = v->ZMM_Q(i); 627 } 628 } 629 630 void helper_cvtsd2ss(CPUX86State *env, Reg *d, Reg *v, Reg *s) 631 { 632 int i; 633 d->ZMM_S(0) = float64_to_float32(s->ZMM_D(0), &env->sse_status); 634 for (i = 1; i < 2 << SHIFT; i++) { 635 d->ZMM_L(i) = v->ZMM_L(i); 636 } 637 } 638 #endif 639 640 /* integer to float */ 641 void glue(helper_cvtdq2ps, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) 642 { 643 int i; 644 for (i = 0; i < 2 << SHIFT; i++) { 645 d->ZMM_S(i) = int32_to_float32(s->ZMM_L(i), &env->sse_status); 646 } 647 } 648 649 void glue(helper_cvtdq2pd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) 650 { 651 int i; 652 for (i = 1 << SHIFT; --i >= 0; ) { 653 int32_t l = s->ZMM_L(i); 654 d->ZMM_D(i) = int32_to_float64(l, &env->sse_status); 655 } 656 } 657 658 #if SHIFT == 1 659 void helper_cvtpi2ps(CPUX86State *env, ZMMReg *d, MMXReg *s) 660 { 661 d->ZMM_S(0) = int32_to_float32(s->MMX_L(0), &env->sse_status); 662 d->ZMM_S(1) = int32_to_float32(s->MMX_L(1), &env->sse_status); 663 } 664 665 void helper_cvtpi2pd(CPUX86State *env, ZMMReg *d, MMXReg *s) 666 { 667 d->ZMM_D(0) = int32_to_float64(s->MMX_L(0), &env->sse_status); 668 d->ZMM_D(1) = int32_to_float64(s->MMX_L(1), &env->sse_status); 669 } 670 671 void helper_cvtsi2ss(CPUX86State *env, ZMMReg *d, uint32_t val) 672 { 673 d->ZMM_S(0) = int32_to_float32(val, &env->sse_status); 674 } 675 676 void helper_cvtsi2sd(CPUX86State *env, ZMMReg *d, uint32_t val) 677 { 678 d->ZMM_D(0) = int32_to_float64(val, &env->sse_status); 679 } 680 681 #ifdef TARGET_X86_64 682 void helper_cvtsq2ss(CPUX86State *env, ZMMReg *d, uint64_t val) 683 { 684 d->ZMM_S(0) = int64_to_float32(val, &env->sse_status); 685 } 686 687 void helper_cvtsq2sd(CPUX86State *env, ZMMReg *d, uint64_t val) 688 { 689 d->ZMM_D(0) = int64_to_float64(val, &env->sse_status); 690 } 691 #endif 692 693 #endif 694 695 /* float to integer */ 696 697 #if SHIFT == 1 698 /* 699 * x86 mandates that we return the indefinite integer value for the result 700 * of any float-to-integer conversion that raises the 'invalid' exception. 701 * Wrap the softfloat functions to get this behaviour. 702 */ 703 #define WRAP_FLOATCONV(RETTYPE, FN, FLOATTYPE, INDEFVALUE) \ 704 static inline RETTYPE x86_##FN(FLOATTYPE a, float_status *s) \ 705 { \ 706 int oldflags, newflags; \ 707 RETTYPE r; \ 708 \ 709 oldflags = get_float_exception_flags(s); \ 710 set_float_exception_flags(0, s); \ 711 r = FN(a, s); \ 712 newflags = get_float_exception_flags(s); \ 713 if (newflags & float_flag_invalid) { \ 714 r = INDEFVALUE; \ 715 } \ 716 set_float_exception_flags(newflags | oldflags, s); \ 717 return r; \ 718 } 719 720 WRAP_FLOATCONV(int32_t, float32_to_int32, float32, INT32_MIN) 721 WRAP_FLOATCONV(int32_t, float32_to_int32_round_to_zero, float32, INT32_MIN) 722 WRAP_FLOATCONV(int32_t, float64_to_int32, float64, INT32_MIN) 723 WRAP_FLOATCONV(int32_t, float64_to_int32_round_to_zero, float64, INT32_MIN) 724 WRAP_FLOATCONV(int64_t, float32_to_int64, float32, INT64_MIN) 725 WRAP_FLOATCONV(int64_t, float32_to_int64_round_to_zero, float32, INT64_MIN) 726 WRAP_FLOATCONV(int64_t, float64_to_int64, float64, INT64_MIN) 727 WRAP_FLOATCONV(int64_t, float64_to_int64_round_to_zero, float64, INT64_MIN) 728 #endif 729 730 void glue(helper_cvtps2dq, SUFFIX)(CPUX86State *env, ZMMReg *d, ZMMReg *s) 731 { 732 int i; 733 for (i = 0; i < 2 << SHIFT; i++) { 734 d->ZMM_L(i) = x86_float32_to_int32(s->ZMM_S(i), &env->sse_status); 735 } 736 } 737 738 void glue(helper_cvtpd2dq, SUFFIX)(CPUX86State *env, ZMMReg *d, ZMMReg *s) 739 { 740 int i; 741 for (i = 0; i < 1 << SHIFT; i++) { 742 d->ZMM_L(i) = x86_float64_to_int32(s->ZMM_D(i), &env->sse_status); 743 } 744 for (i >>= 1; i < 1 << SHIFT; i++) { 745 d->Q(i) = 0; 746 } 747 } 748 749 #if SHIFT == 1 750 void helper_cvtps2pi(CPUX86State *env, MMXReg *d, ZMMReg *s) 751 { 752 d->MMX_L(0) = x86_float32_to_int32(s->ZMM_S(0), &env->sse_status); 753 d->MMX_L(1) = x86_float32_to_int32(s->ZMM_S(1), &env->sse_status); 754 } 755 756 void helper_cvtpd2pi(CPUX86State *env, MMXReg *d, ZMMReg *s) 757 { 758 d->MMX_L(0) = x86_float64_to_int32(s->ZMM_D(0), &env->sse_status); 759 d->MMX_L(1) = x86_float64_to_int32(s->ZMM_D(1), &env->sse_status); 760 } 761 762 int32_t helper_cvtss2si(CPUX86State *env, ZMMReg *s) 763 { 764 return x86_float32_to_int32(s->ZMM_S(0), &env->sse_status); 765 } 766 767 int32_t helper_cvtsd2si(CPUX86State *env, ZMMReg *s) 768 { 769 return x86_float64_to_int32(s->ZMM_D(0), &env->sse_status); 770 } 771 772 #ifdef TARGET_X86_64 773 int64_t helper_cvtss2sq(CPUX86State *env, ZMMReg *s) 774 { 775 return x86_float32_to_int64(s->ZMM_S(0), &env->sse_status); 776 } 777 778 int64_t helper_cvtsd2sq(CPUX86State *env, ZMMReg *s) 779 { 780 return x86_float64_to_int64(s->ZMM_D(0), &env->sse_status); 781 } 782 #endif 783 #endif 784 785 /* float to integer truncated */ 786 void glue(helper_cvttps2dq, SUFFIX)(CPUX86State *env, ZMMReg *d, ZMMReg *s) 787 { 788 int i; 789 for (i = 0; i < 2 << SHIFT; i++) { 790 d->ZMM_L(i) = x86_float32_to_int32_round_to_zero(s->ZMM_S(i), 791 &env->sse_status); 792 } 793 } 794 795 void glue(helper_cvttpd2dq, SUFFIX)(CPUX86State *env, ZMMReg *d, ZMMReg *s) 796 { 797 int i; 798 for (i = 0; i < 1 << SHIFT; i++) { 799 d->ZMM_L(i) = x86_float64_to_int32_round_to_zero(s->ZMM_D(i), 800 &env->sse_status); 801 } 802 for (i >>= 1; i < 1 << SHIFT; i++) { 803 d->Q(i) = 0; 804 } 805 } 806 807 #if SHIFT == 1 808 void helper_cvttps2pi(CPUX86State *env, MMXReg *d, ZMMReg *s) 809 { 810 d->MMX_L(0) = x86_float32_to_int32_round_to_zero(s->ZMM_S(0), &env->sse_status); 811 d->MMX_L(1) = x86_float32_to_int32_round_to_zero(s->ZMM_S(1), &env->sse_status); 812 } 813 814 void helper_cvttpd2pi(CPUX86State *env, MMXReg *d, ZMMReg *s) 815 { 816 d->MMX_L(0) = x86_float64_to_int32_round_to_zero(s->ZMM_D(0), &env->sse_status); 817 d->MMX_L(1) = x86_float64_to_int32_round_to_zero(s->ZMM_D(1), &env->sse_status); 818 } 819 820 int32_t helper_cvttss2si(CPUX86State *env, ZMMReg *s) 821 { 822 return x86_float32_to_int32_round_to_zero(s->ZMM_S(0), &env->sse_status); 823 } 824 825 int32_t helper_cvttsd2si(CPUX86State *env, ZMMReg *s) 826 { 827 return x86_float64_to_int32_round_to_zero(s->ZMM_D(0), &env->sse_status); 828 } 829 830 #ifdef TARGET_X86_64 831 int64_t helper_cvttss2sq(CPUX86State *env, ZMMReg *s) 832 { 833 return x86_float32_to_int64_round_to_zero(s->ZMM_S(0), &env->sse_status); 834 } 835 836 int64_t helper_cvttsd2sq(CPUX86State *env, ZMMReg *s) 837 { 838 return x86_float64_to_int64_round_to_zero(s->ZMM_D(0), &env->sse_status); 839 } 840 #endif 841 #endif 842 843 void glue(helper_rsqrtps, SUFFIX)(CPUX86State *env, ZMMReg *d, ZMMReg *s) 844 { 845 uint8_t old_flags = get_float_exception_flags(&env->sse_status); 846 int i; 847 for (i = 0; i < 2 << SHIFT; i++) { 848 d->ZMM_S(i) = float32_div(float32_one, 849 float32_sqrt(s->ZMM_S(i), &env->sse_status), 850 &env->sse_status); 851 } 852 set_float_exception_flags(old_flags, &env->sse_status); 853 } 854 855 #if SHIFT == 1 856 void helper_rsqrtss(CPUX86State *env, ZMMReg *d, ZMMReg *v, ZMMReg *s) 857 { 858 uint8_t old_flags = get_float_exception_flags(&env->sse_status); 859 int i; 860 d->ZMM_S(0) = float32_div(float32_one, 861 float32_sqrt(s->ZMM_S(0), &env->sse_status), 862 &env->sse_status); 863 set_float_exception_flags(old_flags, &env->sse_status); 864 for (i = 1; i < 2 << SHIFT; i++) { 865 d->ZMM_L(i) = v->ZMM_L(i); 866 } 867 } 868 #endif 869 870 void glue(helper_rcpps, SUFFIX)(CPUX86State *env, ZMMReg *d, ZMMReg *s) 871 { 872 uint8_t old_flags = get_float_exception_flags(&env->sse_status); 873 int i; 874 for (i = 0; i < 2 << SHIFT; i++) { 875 d->ZMM_S(i) = float32_div(float32_one, s->ZMM_S(i), &env->sse_status); 876 } 877 set_float_exception_flags(old_flags, &env->sse_status); 878 } 879 880 #if SHIFT == 1 881 void helper_rcpss(CPUX86State *env, ZMMReg *d, ZMMReg *v, ZMMReg *s) 882 { 883 uint8_t old_flags = get_float_exception_flags(&env->sse_status); 884 int i; 885 d->ZMM_S(0) = float32_div(float32_one, s->ZMM_S(0), &env->sse_status); 886 for (i = 1; i < 2 << SHIFT; i++) { 887 d->ZMM_L(i) = v->ZMM_L(i); 888 } 889 set_float_exception_flags(old_flags, &env->sse_status); 890 } 891 #endif 892 893 #if SHIFT == 1 894 static inline uint64_t helper_extrq(uint64_t src, int shift, int len) 895 { 896 uint64_t mask; 897 898 if (len == 0) { 899 mask = ~0LL; 900 } else { 901 mask = (1ULL << len) - 1; 902 } 903 return (src >> shift) & mask; 904 } 905 906 void helper_extrq_r(CPUX86State *env, ZMMReg *d, ZMMReg *s) 907 { 908 d->ZMM_Q(0) = helper_extrq(d->ZMM_Q(0), s->ZMM_B(1) & 63, s->ZMM_B(0) & 63); 909 } 910 911 void helper_extrq_i(CPUX86State *env, ZMMReg *d, int index, int length) 912 { 913 d->ZMM_Q(0) = helper_extrq(d->ZMM_Q(0), index, length); 914 } 915 916 static inline uint64_t helper_insertq(uint64_t dest, uint64_t src, int shift, int len) 917 { 918 uint64_t mask; 919 920 if (len == 0) { 921 mask = ~0ULL; 922 } else { 923 mask = (1ULL << len) - 1; 924 } 925 return (dest & ~(mask << shift)) | ((src & mask) << shift); 926 } 927 928 void helper_insertq_r(CPUX86State *env, ZMMReg *d, ZMMReg *s) 929 { 930 d->ZMM_Q(0) = helper_insertq(d->ZMM_Q(0), s->ZMM_Q(0), s->ZMM_B(9) & 63, s->ZMM_B(8) & 63); 931 } 932 933 void helper_insertq_i(CPUX86State *env, ZMMReg *d, ZMMReg *s, int index, int length) 934 { 935 d->ZMM_Q(0) = helper_insertq(d->ZMM_Q(0), s->ZMM_Q(0), index, length); 936 } 937 #endif 938 939 #define SSE_HELPER_HPS(name, F) \ 940 void glue(helper_ ## name, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) \ 941 { \ 942 float32 r[2 << SHIFT]; \ 943 int i, j, k; \ 944 for (k = 0; k < 2 << SHIFT; k += LANE_WIDTH / 4) { \ 945 for (i = j = 0; j < 4; i++, j += 2) { \ 946 r[i + k] = F(v->ZMM_S(j + k), v->ZMM_S(j + k + 1), &env->sse_status); \ 947 } \ 948 for (j = 0; j < 4; i++, j += 2) { \ 949 r[i + k] = F(s->ZMM_S(j + k), s->ZMM_S(j + k + 1), &env->sse_status); \ 950 } \ 951 } \ 952 for (i = 0; i < 2 << SHIFT; i++) { \ 953 d->ZMM_S(i) = r[i]; \ 954 } \ 955 } 956 957 SSE_HELPER_HPS(haddps, float32_add) 958 SSE_HELPER_HPS(hsubps, float32_sub) 959 960 #define SSE_HELPER_HPD(name, F) \ 961 void glue(helper_ ## name, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) \ 962 { \ 963 float64 r[1 << SHIFT]; \ 964 int i, j, k; \ 965 for (k = 0; k < 1 << SHIFT; k += LANE_WIDTH / 8) { \ 966 for (i = j = 0; j < 2; i++, j += 2) { \ 967 r[i + k] = F(v->ZMM_D(j + k), v->ZMM_D(j + k + 1), &env->sse_status); \ 968 } \ 969 for (j = 0; j < 2; i++, j += 2) { \ 970 r[i + k] = F(s->ZMM_D(j + k), s->ZMM_D(j + k + 1), &env->sse_status); \ 971 } \ 972 } \ 973 for (i = 0; i < 1 << SHIFT; i++) { \ 974 d->ZMM_D(i) = r[i]; \ 975 } \ 976 } 977 978 SSE_HELPER_HPD(haddpd, float64_add) 979 SSE_HELPER_HPD(hsubpd, float64_sub) 980 981 void glue(helper_addsubps, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) 982 { 983 int i; 984 for (i = 0; i < 2 << SHIFT; i += 2) { 985 d->ZMM_S(i) = float32_sub(v->ZMM_S(i), s->ZMM_S(i), &env->sse_status); 986 d->ZMM_S(i+1) = float32_add(v->ZMM_S(i+1), s->ZMM_S(i+1), &env->sse_status); 987 } 988 } 989 990 void glue(helper_addsubpd, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) 991 { 992 int i; 993 for (i = 0; i < 1 << SHIFT; i += 2) { 994 d->ZMM_D(i) = float64_sub(v->ZMM_D(i), s->ZMM_D(i), &env->sse_status); 995 d->ZMM_D(i+1) = float64_add(v->ZMM_D(i+1), s->ZMM_D(i+1), &env->sse_status); 996 } 997 } 998 999 #define SSE_HELPER_CMP_P(name, F, C) \ 1000 void glue(helper_ ## name ## ps, SUFFIX)(CPUX86State *env, \ 1001 Reg *d, Reg *v, Reg *s) \ 1002 { \ 1003 int i; \ 1004 for (i = 0; i < 2 << SHIFT; i++) { \ 1005 d->ZMM_L(i) = C(F(32, v->ZMM_S(i), s->ZMM_S(i))) ? -1 : 0; \ 1006 } \ 1007 } \ 1008 \ 1009 void glue(helper_ ## name ## pd, SUFFIX)(CPUX86State *env, \ 1010 Reg *d, Reg *v, Reg *s) \ 1011 { \ 1012 int i; \ 1013 for (i = 0; i < 1 << SHIFT; i++) { \ 1014 d->ZMM_Q(i) = C(F(64, v->ZMM_D(i), s->ZMM_D(i))) ? -1 : 0; \ 1015 } \ 1016 } 1017 1018 #if SHIFT == 1 1019 #define SSE_HELPER_CMP(name, F, C) \ 1020 SSE_HELPER_CMP_P(name, F, C) \ 1021 void helper_ ## name ## ss(CPUX86State *env, Reg *d, Reg *v, Reg *s) \ 1022 { \ 1023 int i; \ 1024 d->ZMM_L(0) = C(F(32, v->ZMM_S(0), s->ZMM_S(0))) ? -1 : 0; \ 1025 for (i = 1; i < 2 << SHIFT; i++) { \ 1026 d->ZMM_L(i) = v->ZMM_L(i); \ 1027 } \ 1028 } \ 1029 \ 1030 void helper_ ## name ## sd(CPUX86State *env, Reg *d, Reg *v, Reg *s) \ 1031 { \ 1032 int i; \ 1033 d->ZMM_Q(0) = C(F(64, v->ZMM_D(0), s->ZMM_D(0))) ? -1 : 0; \ 1034 for (i = 1; i < 1 << SHIFT; i++) { \ 1035 d->ZMM_Q(i) = v->ZMM_Q(i); \ 1036 } \ 1037 } 1038 1039 static inline bool FPU_EQU(FloatRelation x) 1040 { 1041 return (x == float_relation_equal || x == float_relation_unordered); 1042 } 1043 static inline bool FPU_GE(FloatRelation x) 1044 { 1045 return (x == float_relation_equal || x == float_relation_greater); 1046 } 1047 #define FPU_EQ(x) (x == float_relation_equal) 1048 #define FPU_LT(x) (x == float_relation_less) 1049 #define FPU_LE(x) (x <= float_relation_equal) 1050 #define FPU_GT(x) (x == float_relation_greater) 1051 #define FPU_UNORD(x) (x == float_relation_unordered) 1052 /* We must make sure we evaluate the argument in case it is a signalling NAN */ 1053 #define FPU_FALSE(x) (x == float_relation_equal && 0) 1054 1055 #define FPU_CMPQ(size, a, b) \ 1056 float ## size ## _compare_quiet(a, b, &env->sse_status) 1057 #define FPU_CMPS(size, a, b) \ 1058 float ## size ## _compare(a, b, &env->sse_status) 1059 1060 #else 1061 #define SSE_HELPER_CMP(name, F, C) SSE_HELPER_CMP_P(name, F, C) 1062 #endif 1063 1064 SSE_HELPER_CMP(cmpeq, FPU_CMPQ, FPU_EQ) 1065 SSE_HELPER_CMP(cmplt, FPU_CMPS, FPU_LT) 1066 SSE_HELPER_CMP(cmple, FPU_CMPS, FPU_LE) 1067 SSE_HELPER_CMP(cmpunord, FPU_CMPQ, FPU_UNORD) 1068 SSE_HELPER_CMP(cmpneq, FPU_CMPQ, !FPU_EQ) 1069 SSE_HELPER_CMP(cmpnlt, FPU_CMPS, !FPU_LT) 1070 SSE_HELPER_CMP(cmpnle, FPU_CMPS, !FPU_LE) 1071 SSE_HELPER_CMP(cmpord, FPU_CMPQ, !FPU_UNORD) 1072 1073 SSE_HELPER_CMP(cmpequ, FPU_CMPQ, FPU_EQU) 1074 SSE_HELPER_CMP(cmpnge, FPU_CMPS, !FPU_GE) 1075 SSE_HELPER_CMP(cmpngt, FPU_CMPS, !FPU_GT) 1076 SSE_HELPER_CMP(cmpfalse, FPU_CMPQ, FPU_FALSE) 1077 SSE_HELPER_CMP(cmpnequ, FPU_CMPQ, !FPU_EQU) 1078 SSE_HELPER_CMP(cmpge, FPU_CMPS, FPU_GE) 1079 SSE_HELPER_CMP(cmpgt, FPU_CMPS, FPU_GT) 1080 SSE_HELPER_CMP(cmptrue, FPU_CMPQ, !FPU_FALSE) 1081 1082 SSE_HELPER_CMP(cmpeqs, FPU_CMPS, FPU_EQ) 1083 SSE_HELPER_CMP(cmpltq, FPU_CMPQ, FPU_LT) 1084 SSE_HELPER_CMP(cmpleq, FPU_CMPQ, FPU_LE) 1085 SSE_HELPER_CMP(cmpunords, FPU_CMPS, FPU_UNORD) 1086 SSE_HELPER_CMP(cmpneqq, FPU_CMPS, !FPU_EQ) 1087 SSE_HELPER_CMP(cmpnltq, FPU_CMPQ, !FPU_LT) 1088 SSE_HELPER_CMP(cmpnleq, FPU_CMPQ, !FPU_LE) 1089 SSE_HELPER_CMP(cmpords, FPU_CMPS, !FPU_UNORD) 1090 1091 SSE_HELPER_CMP(cmpequs, FPU_CMPS, FPU_EQU) 1092 SSE_HELPER_CMP(cmpngeq, FPU_CMPQ, !FPU_GE) 1093 SSE_HELPER_CMP(cmpngtq, FPU_CMPQ, !FPU_GT) 1094 SSE_HELPER_CMP(cmpfalses, FPU_CMPS, FPU_FALSE) 1095 SSE_HELPER_CMP(cmpnequs, FPU_CMPS, !FPU_EQU) 1096 SSE_HELPER_CMP(cmpgeq, FPU_CMPQ, FPU_GE) 1097 SSE_HELPER_CMP(cmpgtq, FPU_CMPQ, FPU_GT) 1098 SSE_HELPER_CMP(cmptrues, FPU_CMPS, !FPU_FALSE) 1099 1100 #undef SSE_HELPER_CMP 1101 1102 #if SHIFT == 1 1103 static const int comis_eflags[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C}; 1104 1105 void helper_ucomiss(CPUX86State *env, Reg *d, Reg *s) 1106 { 1107 FloatRelation ret; 1108 float32 s0, s1; 1109 1110 s0 = d->ZMM_S(0); 1111 s1 = s->ZMM_S(0); 1112 ret = float32_compare_quiet(s0, s1, &env->sse_status); 1113 CC_SRC = comis_eflags[ret + 1]; 1114 } 1115 1116 void helper_comiss(CPUX86State *env, Reg *d, Reg *s) 1117 { 1118 FloatRelation ret; 1119 float32 s0, s1; 1120 1121 s0 = d->ZMM_S(0); 1122 s1 = s->ZMM_S(0); 1123 ret = float32_compare(s0, s1, &env->sse_status); 1124 CC_SRC = comis_eflags[ret + 1]; 1125 } 1126 1127 void helper_ucomisd(CPUX86State *env, Reg *d, Reg *s) 1128 { 1129 FloatRelation ret; 1130 float64 d0, d1; 1131 1132 d0 = d->ZMM_D(0); 1133 d1 = s->ZMM_D(0); 1134 ret = float64_compare_quiet(d0, d1, &env->sse_status); 1135 CC_SRC = comis_eflags[ret + 1]; 1136 } 1137 1138 void helper_comisd(CPUX86State *env, Reg *d, Reg *s) 1139 { 1140 FloatRelation ret; 1141 float64 d0, d1; 1142 1143 d0 = d->ZMM_D(0); 1144 d1 = s->ZMM_D(0); 1145 ret = float64_compare(d0, d1, &env->sse_status); 1146 CC_SRC = comis_eflags[ret + 1]; 1147 } 1148 #endif 1149 1150 uint32_t glue(helper_movmskps, SUFFIX)(CPUX86State *env, Reg *s) 1151 { 1152 uint32_t mask; 1153 int i; 1154 1155 mask = 0; 1156 for (i = 0; i < 2 << SHIFT; i++) { 1157 mask |= (s->ZMM_L(i) >> (31 - i)) & (1 << i); 1158 } 1159 return mask; 1160 } 1161 1162 uint32_t glue(helper_movmskpd, SUFFIX)(CPUX86State *env, Reg *s) 1163 { 1164 uint32_t mask; 1165 int i; 1166 1167 mask = 0; 1168 for (i = 0; i < 1 << SHIFT; i++) { 1169 mask |= (s->ZMM_Q(i) >> (63 - i)) & (1 << i); 1170 } 1171 return mask; 1172 } 1173 1174 #endif 1175 1176 #define PACK_HELPER_B(name, F) \ 1177 void glue(helper_pack ## name, SUFFIX)(CPUX86State *env, \ 1178 Reg *d, Reg *v, Reg *s) \ 1179 { \ 1180 uint8_t r[PACK_WIDTH * 2]; \ 1181 int j, k; \ 1182 for (j = 0; j < 4 << SHIFT; j += PACK_WIDTH) { \ 1183 for (k = 0; k < PACK_WIDTH; k++) { \ 1184 r[k] = F((int16_t)v->W(j + k)); \ 1185 } \ 1186 for (k = 0; k < PACK_WIDTH; k++) { \ 1187 r[PACK_WIDTH + k] = F((int16_t)s->W(j + k)); \ 1188 } \ 1189 for (k = 0; k < PACK_WIDTH * 2; k++) { \ 1190 d->B(2 * j + k) = r[k]; \ 1191 } \ 1192 } \ 1193 } 1194 1195 PACK_HELPER_B(sswb, satsb) 1196 PACK_HELPER_B(uswb, satub) 1197 1198 void glue(helper_packssdw, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) 1199 { 1200 uint16_t r[PACK_WIDTH]; 1201 int j, k; 1202 1203 for (j = 0; j < 2 << SHIFT; j += PACK_WIDTH / 2) { 1204 for (k = 0; k < PACK_WIDTH / 2; k++) { 1205 r[k] = satsw(v->L(j + k)); 1206 } 1207 for (k = 0; k < PACK_WIDTH / 2; k++) { 1208 r[PACK_WIDTH / 2 + k] = satsw(s->L(j + k)); 1209 } 1210 for (k = 0; k < PACK_WIDTH; k++) { 1211 d->W(2 * j + k) = r[k]; 1212 } 1213 } 1214 } 1215 1216 #define UNPCK_OP(base_name, base) \ 1217 \ 1218 void glue(helper_punpck ## base_name ## bw, SUFFIX)(CPUX86State *env,\ 1219 Reg *d, Reg *v, Reg *s) \ 1220 { \ 1221 uint8_t r[PACK_WIDTH * 2]; \ 1222 int j, i; \ 1223 \ 1224 for (j = 0; j < 8 << SHIFT; ) { \ 1225 int k = j + base * PACK_WIDTH; \ 1226 for (i = 0; i < PACK_WIDTH; i++) { \ 1227 r[2 * i] = v->B(k + i); \ 1228 r[2 * i + 1] = s->B(k + i); \ 1229 } \ 1230 for (i = 0; i < PACK_WIDTH * 2; i++, j++) { \ 1231 d->B(j) = r[i]; \ 1232 } \ 1233 } \ 1234 } \ 1235 \ 1236 void glue(helper_punpck ## base_name ## wd, SUFFIX)(CPUX86State *env,\ 1237 Reg *d, Reg *v, Reg *s) \ 1238 { \ 1239 uint16_t r[PACK_WIDTH]; \ 1240 int j, i; \ 1241 \ 1242 for (j = 0; j < 4 << SHIFT; ) { \ 1243 int k = j + base * PACK_WIDTH / 2; \ 1244 for (i = 0; i < PACK_WIDTH / 2; i++) { \ 1245 r[2 * i] = v->W(k + i); \ 1246 r[2 * i + 1] = s->W(k + i); \ 1247 } \ 1248 for (i = 0; i < PACK_WIDTH; i++, j++) { \ 1249 d->W(j) = r[i]; \ 1250 } \ 1251 } \ 1252 } \ 1253 \ 1254 void glue(helper_punpck ## base_name ## dq, SUFFIX)(CPUX86State *env,\ 1255 Reg *d, Reg *v, Reg *s) \ 1256 { \ 1257 uint32_t r[PACK_WIDTH / 2]; \ 1258 int j, i; \ 1259 \ 1260 for (j = 0; j < 2 << SHIFT; ) { \ 1261 int k = j + base * PACK_WIDTH / 4; \ 1262 for (i = 0; i < PACK_WIDTH / 4; i++) { \ 1263 r[2 * i] = v->L(k + i); \ 1264 r[2 * i + 1] = s->L(k + i); \ 1265 } \ 1266 for (i = 0; i < PACK_WIDTH / 2; i++, j++) { \ 1267 d->L(j) = r[i]; \ 1268 } \ 1269 } \ 1270 } \ 1271 \ 1272 XMM_ONLY( \ 1273 void glue(helper_punpck ## base_name ## qdq, SUFFIX)( \ 1274 CPUX86State *env, Reg *d, Reg *v, Reg *s) \ 1275 { \ 1276 uint64_t r[2]; \ 1277 int i; \ 1278 \ 1279 for (i = 0; i < 1 << SHIFT; i += 2) { \ 1280 r[0] = v->Q(base + i); \ 1281 r[1] = s->Q(base + i); \ 1282 d->Q(i) = r[0]; \ 1283 d->Q(i + 1) = r[1]; \ 1284 } \ 1285 } \ 1286 ) 1287 1288 UNPCK_OP(l, 0) 1289 UNPCK_OP(h, 1) 1290 1291 #undef PACK_WIDTH 1292 #undef PACK_HELPER_B 1293 #undef UNPCK_OP 1294 1295 1296 /* 3DNow! float ops */ 1297 #if SHIFT == 0 1298 void helper_pi2fd(CPUX86State *env, MMXReg *d, MMXReg *s) 1299 { 1300 d->MMX_S(0) = int32_to_float32(s->MMX_L(0), &env->mmx_status); 1301 d->MMX_S(1) = int32_to_float32(s->MMX_L(1), &env->mmx_status); 1302 } 1303 1304 void helper_pi2fw(CPUX86State *env, MMXReg *d, MMXReg *s) 1305 { 1306 d->MMX_S(0) = int32_to_float32((int16_t)s->MMX_W(0), &env->mmx_status); 1307 d->MMX_S(1) = int32_to_float32((int16_t)s->MMX_W(2), &env->mmx_status); 1308 } 1309 1310 void helper_pf2id(CPUX86State *env, MMXReg *d, MMXReg *s) 1311 { 1312 d->MMX_L(0) = float32_to_int32_round_to_zero(s->MMX_S(0), &env->mmx_status); 1313 d->MMX_L(1) = float32_to_int32_round_to_zero(s->MMX_S(1), &env->mmx_status); 1314 } 1315 1316 void helper_pf2iw(CPUX86State *env, MMXReg *d, MMXReg *s) 1317 { 1318 d->MMX_L(0) = satsw(float32_to_int32_round_to_zero(s->MMX_S(0), 1319 &env->mmx_status)); 1320 d->MMX_L(1) = satsw(float32_to_int32_round_to_zero(s->MMX_S(1), 1321 &env->mmx_status)); 1322 } 1323 1324 void helper_pfacc(CPUX86State *env, MMXReg *d, MMXReg *s) 1325 { 1326 float32 r; 1327 1328 r = float32_add(d->MMX_S(0), d->MMX_S(1), &env->mmx_status); 1329 d->MMX_S(1) = float32_add(s->MMX_S(0), s->MMX_S(1), &env->mmx_status); 1330 d->MMX_S(0) = r; 1331 } 1332 1333 void helper_pfadd(CPUX86State *env, MMXReg *d, MMXReg *s) 1334 { 1335 d->MMX_S(0) = float32_add(d->MMX_S(0), s->MMX_S(0), &env->mmx_status); 1336 d->MMX_S(1) = float32_add(d->MMX_S(1), s->MMX_S(1), &env->mmx_status); 1337 } 1338 1339 void helper_pfcmpeq(CPUX86State *env, MMXReg *d, MMXReg *s) 1340 { 1341 d->MMX_L(0) = float32_eq_quiet(d->MMX_S(0), s->MMX_S(0), 1342 &env->mmx_status) ? -1 : 0; 1343 d->MMX_L(1) = float32_eq_quiet(d->MMX_S(1), s->MMX_S(1), 1344 &env->mmx_status) ? -1 : 0; 1345 } 1346 1347 void helper_pfcmpge(CPUX86State *env, MMXReg *d, MMXReg *s) 1348 { 1349 d->MMX_L(0) = float32_le(s->MMX_S(0), d->MMX_S(0), 1350 &env->mmx_status) ? -1 : 0; 1351 d->MMX_L(1) = float32_le(s->MMX_S(1), d->MMX_S(1), 1352 &env->mmx_status) ? -1 : 0; 1353 } 1354 1355 void helper_pfcmpgt(CPUX86State *env, MMXReg *d, MMXReg *s) 1356 { 1357 d->MMX_L(0) = float32_lt(s->MMX_S(0), d->MMX_S(0), 1358 &env->mmx_status) ? -1 : 0; 1359 d->MMX_L(1) = float32_lt(s->MMX_S(1), d->MMX_S(1), 1360 &env->mmx_status) ? -1 : 0; 1361 } 1362 1363 void helper_pfmax(CPUX86State *env, MMXReg *d, MMXReg *s) 1364 { 1365 if (float32_lt(d->MMX_S(0), s->MMX_S(0), &env->mmx_status)) { 1366 d->MMX_S(0) = s->MMX_S(0); 1367 } 1368 if (float32_lt(d->MMX_S(1), s->MMX_S(1), &env->mmx_status)) { 1369 d->MMX_S(1) = s->MMX_S(1); 1370 } 1371 } 1372 1373 void helper_pfmin(CPUX86State *env, MMXReg *d, MMXReg *s) 1374 { 1375 if (float32_lt(s->MMX_S(0), d->MMX_S(0), &env->mmx_status)) { 1376 d->MMX_S(0) = s->MMX_S(0); 1377 } 1378 if (float32_lt(s->MMX_S(1), d->MMX_S(1), &env->mmx_status)) { 1379 d->MMX_S(1) = s->MMX_S(1); 1380 } 1381 } 1382 1383 void helper_pfmul(CPUX86State *env, MMXReg *d, MMXReg *s) 1384 { 1385 d->MMX_S(0) = float32_mul(d->MMX_S(0), s->MMX_S(0), &env->mmx_status); 1386 d->MMX_S(1) = float32_mul(d->MMX_S(1), s->MMX_S(1), &env->mmx_status); 1387 } 1388 1389 void helper_pfnacc(CPUX86State *env, MMXReg *d, MMXReg *s) 1390 { 1391 float32 r; 1392 1393 r = float32_sub(d->MMX_S(0), d->MMX_S(1), &env->mmx_status); 1394 d->MMX_S(1) = float32_sub(s->MMX_S(0), s->MMX_S(1), &env->mmx_status); 1395 d->MMX_S(0) = r; 1396 } 1397 1398 void helper_pfpnacc(CPUX86State *env, MMXReg *d, MMXReg *s) 1399 { 1400 float32 r; 1401 1402 r = float32_sub(d->MMX_S(0), d->MMX_S(1), &env->mmx_status); 1403 d->MMX_S(1) = float32_add(s->MMX_S(0), s->MMX_S(1), &env->mmx_status); 1404 d->MMX_S(0) = r; 1405 } 1406 1407 void helper_pfrcp(CPUX86State *env, MMXReg *d, MMXReg *s) 1408 { 1409 d->MMX_S(0) = float32_div(float32_one, s->MMX_S(0), &env->mmx_status); 1410 d->MMX_S(1) = d->MMX_S(0); 1411 } 1412 1413 void helper_pfrsqrt(CPUX86State *env, MMXReg *d, MMXReg *s) 1414 { 1415 d->MMX_L(1) = s->MMX_L(0) & 0x7fffffff; 1416 d->MMX_S(1) = float32_div(float32_one, 1417 float32_sqrt(d->MMX_S(1), &env->mmx_status), 1418 &env->mmx_status); 1419 d->MMX_L(1) |= s->MMX_L(0) & 0x80000000; 1420 d->MMX_L(0) = d->MMX_L(1); 1421 } 1422 1423 void helper_pfsub(CPUX86State *env, MMXReg *d, MMXReg *s) 1424 { 1425 d->MMX_S(0) = float32_sub(d->MMX_S(0), s->MMX_S(0), &env->mmx_status); 1426 d->MMX_S(1) = float32_sub(d->MMX_S(1), s->MMX_S(1), &env->mmx_status); 1427 } 1428 1429 void helper_pfsubr(CPUX86State *env, MMXReg *d, MMXReg *s) 1430 { 1431 d->MMX_S(0) = float32_sub(s->MMX_S(0), d->MMX_S(0), &env->mmx_status); 1432 d->MMX_S(1) = float32_sub(s->MMX_S(1), d->MMX_S(1), &env->mmx_status); 1433 } 1434 1435 void helper_pswapd(CPUX86State *env, MMXReg *d, MMXReg *s) 1436 { 1437 uint32_t r; 1438 1439 r = s->MMX_L(0); 1440 d->MMX_L(0) = s->MMX_L(1); 1441 d->MMX_L(1) = r; 1442 } 1443 #endif 1444 1445 /* SSSE3 op helpers */ 1446 void glue(helper_pshufb, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) 1447 { 1448 int i; 1449 #if SHIFT == 0 1450 uint8_t r[8]; 1451 1452 for (i = 0; i < 8; i++) { 1453 r[i] = (s->B(i) & 0x80) ? 0 : (v->B(s->B(i) & 7)); 1454 } 1455 for (i = 0; i < 8; i++) { 1456 d->B(i) = r[i]; 1457 } 1458 #else 1459 uint8_t r[8 << SHIFT]; 1460 1461 for (i = 0; i < 8 << SHIFT; i++) { 1462 int j = i & ~0xf; 1463 r[i] = (s->B(i) & 0x80) ? 0 : v->B(j | (s->B(i) & 0xf)); 1464 } 1465 for (i = 0; i < 8 << SHIFT; i++) { 1466 d->B(i) = r[i]; 1467 } 1468 #endif 1469 } 1470 1471 #define SSE_HELPER_HW(name, F) \ 1472 void glue(helper_ ## name, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) \ 1473 { \ 1474 uint16_t r[4 << SHIFT]; \ 1475 int i, j, k; \ 1476 for (k = 0; k < 4 << SHIFT; k += LANE_WIDTH / 2) { \ 1477 for (i = j = 0; j < LANE_WIDTH / 2; i++, j += 2) { \ 1478 r[i + k] = F(v->W(j + k), v->W(j + k + 1)); \ 1479 } \ 1480 for (j = 0; j < LANE_WIDTH / 2; i++, j += 2) { \ 1481 r[i + k] = F(s->W(j + k), s->W(j + k + 1)); \ 1482 } \ 1483 } \ 1484 for (i = 0; i < 4 << SHIFT; i++) { \ 1485 d->W(i) = r[i]; \ 1486 } \ 1487 } 1488 1489 #define SSE_HELPER_HL(name, F) \ 1490 void glue(helper_ ## name, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) \ 1491 { \ 1492 uint32_t r[2 << SHIFT]; \ 1493 int i, j, k; \ 1494 for (k = 0; k < 2 << SHIFT; k += LANE_WIDTH / 4) { \ 1495 for (i = j = 0; j < LANE_WIDTH / 4; i++, j += 2) { \ 1496 r[i + k] = F(v->L(j + k), v->L(j + k + 1)); \ 1497 } \ 1498 for (j = 0; j < LANE_WIDTH / 4; i++, j += 2) { \ 1499 r[i + k] = F(s->L(j + k), s->L(j + k + 1)); \ 1500 } \ 1501 } \ 1502 for (i = 0; i < 2 << SHIFT; i++) { \ 1503 d->L(i) = r[i]; \ 1504 } \ 1505 } 1506 1507 SSE_HELPER_HW(phaddw, FADD) 1508 SSE_HELPER_HW(phsubw, FSUB) 1509 SSE_HELPER_HW(phaddsw, FADDSW) 1510 SSE_HELPER_HW(phsubsw, FSUBSW) 1511 SSE_HELPER_HL(phaddd, FADD) 1512 SSE_HELPER_HL(phsubd, FSUB) 1513 1514 #undef SSE_HELPER_HW 1515 #undef SSE_HELPER_HL 1516 1517 void glue(helper_pmaddubsw, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) 1518 { 1519 int i; 1520 for (i = 0; i < 4 << SHIFT; i++) { 1521 d->W(i) = satsw((int8_t)s->B(i * 2) * (uint8_t)v->B(i * 2) + 1522 (int8_t)s->B(i * 2 + 1) * (uint8_t)v->B(i * 2 + 1)); 1523 } 1524 } 1525 1526 #define FMULHRSW(d, s) (((int16_t) d * (int16_t)s + 0x4000) >> 15) 1527 SSE_HELPER_W(helper_pmulhrsw, FMULHRSW) 1528 1529 #define FSIGNB(d, s) (s <= INT8_MAX ? s ? d : 0 : -(int8_t)d) 1530 #define FSIGNW(d, s) (s <= INT16_MAX ? s ? d : 0 : -(int16_t)d) 1531 #define FSIGNL(d, s) (s <= INT32_MAX ? s ? d : 0 : -(int32_t)d) 1532 SSE_HELPER_B(helper_psignb, FSIGNB) 1533 SSE_HELPER_W(helper_psignw, FSIGNW) 1534 SSE_HELPER_L(helper_psignd, FSIGNL) 1535 1536 void glue(helper_palignr, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s, 1537 uint32_t imm) 1538 { 1539 int i; 1540 1541 /* XXX could be checked during translation */ 1542 if (imm >= (SHIFT ? 32 : 16)) { 1543 for (i = 0; i < (1 << SHIFT); i++) { 1544 d->Q(i) = 0; 1545 } 1546 } else { 1547 int shift = imm * 8; 1548 #define SHR(v, i) (i < 64 && i > -64 ? i > 0 ? v >> (i) : (v << -(i)) : 0) 1549 #if SHIFT == 0 1550 d->Q(0) = SHR(s->Q(0), shift - 0) | 1551 SHR(v->Q(0), shift - 64); 1552 #else 1553 for (i = 0; i < (1 << SHIFT); i += 2) { 1554 uint64_t r0, r1; 1555 1556 r0 = SHR(s->Q(i), shift - 0) | 1557 SHR(s->Q(i + 1), shift - 64) | 1558 SHR(v->Q(i), shift - 128) | 1559 SHR(v->Q(i + 1), shift - 192); 1560 r1 = SHR(s->Q(i), shift + 64) | 1561 SHR(s->Q(i + 1), shift - 0) | 1562 SHR(v->Q(i), shift - 64) | 1563 SHR(v->Q(i + 1), shift - 128); 1564 d->Q(i) = r0; 1565 d->Q(i + 1) = r1; 1566 } 1567 #endif 1568 #undef SHR 1569 } 1570 } 1571 1572 #if SHIFT >= 1 1573 1574 #define SSE_HELPER_V(name, elem, num, F) \ 1575 void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s, \ 1576 Reg *m) \ 1577 { \ 1578 int i; \ 1579 for (i = 0; i < num; i++) { \ 1580 d->elem(i) = F(v->elem(i), s->elem(i), m->elem(i)); \ 1581 } \ 1582 } 1583 1584 #define SSE_HELPER_I(name, elem, num, F) \ 1585 void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s, \ 1586 uint32_t imm) \ 1587 { \ 1588 int i; \ 1589 for (i = 0; i < num; i++) { \ 1590 int j = i & 7; \ 1591 d->elem(i) = F(v->elem(i), s->elem(i), (imm >> j) & 1); \ 1592 } \ 1593 } 1594 1595 /* SSE4.1 op helpers */ 1596 #define FBLENDVB(v, s, m) ((m & 0x80) ? s : v) 1597 #define FBLENDVPS(v, s, m) ((m & 0x80000000) ? s : v) 1598 #define FBLENDVPD(v, s, m) ((m & 0x8000000000000000LL) ? s : v) 1599 SSE_HELPER_V(helper_pblendvb, B, 8 << SHIFT, FBLENDVB) 1600 SSE_HELPER_V(helper_blendvps, L, 2 << SHIFT, FBLENDVPS) 1601 SSE_HELPER_V(helper_blendvpd, Q, 1 << SHIFT, FBLENDVPD) 1602 1603 void glue(helper_ptest, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) 1604 { 1605 uint64_t zf = 0, cf = 0; 1606 int i; 1607 1608 for (i = 0; i < 1 << SHIFT; i++) { 1609 zf |= (s->Q(i) & d->Q(i)); 1610 cf |= (s->Q(i) & ~d->Q(i)); 1611 } 1612 CC_SRC = (zf ? 0 : CC_Z) | (cf ? 0 : CC_C); 1613 } 1614 1615 #define FMOVSLDUP(i) s->L((i) & ~1) 1616 #define FMOVSHDUP(i) s->L((i) | 1) 1617 #define FMOVDLDUP(i) s->Q((i) & ~1) 1618 1619 #define SSE_HELPER_F(name, elem, num, F) \ 1620 void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) \ 1621 { \ 1622 int n = num; \ 1623 for (int i = n; --i >= 0; ) { \ 1624 d->elem(i) = F(i); \ 1625 } \ 1626 } 1627 1628 #if SHIFT > 0 1629 SSE_HELPER_F(helper_pmovsxbw, W, 4 << SHIFT, (int8_t) s->B) 1630 SSE_HELPER_F(helper_pmovsxbd, L, 2 << SHIFT, (int8_t) s->B) 1631 SSE_HELPER_F(helper_pmovsxbq, Q, 1 << SHIFT, (int8_t) s->B) 1632 SSE_HELPER_F(helper_pmovsxwd, L, 2 << SHIFT, (int16_t) s->W) 1633 SSE_HELPER_F(helper_pmovsxwq, Q, 1 << SHIFT, (int16_t) s->W) 1634 SSE_HELPER_F(helper_pmovsxdq, Q, 1 << SHIFT, (int32_t) s->L) 1635 SSE_HELPER_F(helper_pmovzxbw, W, 4 << SHIFT, s->B) 1636 SSE_HELPER_F(helper_pmovzxbd, L, 2 << SHIFT, s->B) 1637 SSE_HELPER_F(helper_pmovzxbq, Q, 1 << SHIFT, s->B) 1638 SSE_HELPER_F(helper_pmovzxwd, L, 2 << SHIFT, s->W) 1639 SSE_HELPER_F(helper_pmovzxwq, Q, 1 << SHIFT, s->W) 1640 SSE_HELPER_F(helper_pmovzxdq, Q, 1 << SHIFT, s->L) 1641 SSE_HELPER_F(helper_pmovsldup, L, 2 << SHIFT, FMOVSLDUP) 1642 SSE_HELPER_F(helper_pmovshdup, L, 2 << SHIFT, FMOVSHDUP) 1643 SSE_HELPER_F(helper_pmovdldup, Q, 1 << SHIFT, FMOVDLDUP) 1644 #endif 1645 1646 void glue(helper_pmuldq, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) 1647 { 1648 int i; 1649 1650 for (i = 0; i < 1 << SHIFT; i++) { 1651 d->Q(i) = (int64_t)(int32_t) v->L(2 * i) * (int32_t) s->L(2 * i); 1652 } 1653 } 1654 1655 void glue(helper_packusdw, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) 1656 { 1657 uint16_t r[8]; 1658 int i, j, k; 1659 1660 for (i = 0, j = 0; i <= 2 << SHIFT; i += 8, j += 4) { 1661 r[0] = satuw(v->L(j)); 1662 r[1] = satuw(v->L(j + 1)); 1663 r[2] = satuw(v->L(j + 2)); 1664 r[3] = satuw(v->L(j + 3)); 1665 r[4] = satuw(s->L(j)); 1666 r[5] = satuw(s->L(j + 1)); 1667 r[6] = satuw(s->L(j + 2)); 1668 r[7] = satuw(s->L(j + 3)); 1669 for (k = 0; k < 8; k++) { 1670 d->W(i + k) = r[k]; 1671 } 1672 } 1673 } 1674 1675 #if SHIFT == 1 1676 void glue(helper_phminposuw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) 1677 { 1678 int idx = 0; 1679 1680 if (s->W(1) < s->W(idx)) { 1681 idx = 1; 1682 } 1683 if (s->W(2) < s->W(idx)) { 1684 idx = 2; 1685 } 1686 if (s->W(3) < s->W(idx)) { 1687 idx = 3; 1688 } 1689 if (s->W(4) < s->W(idx)) { 1690 idx = 4; 1691 } 1692 if (s->W(5) < s->W(idx)) { 1693 idx = 5; 1694 } 1695 if (s->W(6) < s->W(idx)) { 1696 idx = 6; 1697 } 1698 if (s->W(7) < s->W(idx)) { 1699 idx = 7; 1700 } 1701 1702 d->W(0) = s->W(idx); 1703 d->W(1) = idx; 1704 d->L(1) = 0; 1705 d->Q(1) = 0; 1706 } 1707 #endif 1708 1709 void glue(helper_roundps, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, 1710 uint32_t mode) 1711 { 1712 uint8_t old_flags = get_float_exception_flags(&env->sse_status); 1713 signed char prev_rounding_mode; 1714 int i; 1715 1716 prev_rounding_mode = env->sse_status.float_rounding_mode; 1717 if (!(mode & (1 << 2))) { 1718 set_x86_rounding_mode(mode & 3, &env->sse_status); 1719 } 1720 1721 for (i = 0; i < 2 << SHIFT; i++) { 1722 d->ZMM_S(i) = float32_round_to_int(s->ZMM_S(i), &env->sse_status); 1723 } 1724 1725 if (mode & (1 << 3) && !(old_flags & float_flag_inexact)) { 1726 set_float_exception_flags(get_float_exception_flags(&env->sse_status) & 1727 ~float_flag_inexact, 1728 &env->sse_status); 1729 } 1730 env->sse_status.float_rounding_mode = prev_rounding_mode; 1731 } 1732 1733 void glue(helper_roundpd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, 1734 uint32_t mode) 1735 { 1736 uint8_t old_flags = get_float_exception_flags(&env->sse_status); 1737 signed char prev_rounding_mode; 1738 int i; 1739 1740 prev_rounding_mode = env->sse_status.float_rounding_mode; 1741 if (!(mode & (1 << 2))) { 1742 set_x86_rounding_mode(mode & 3, &env->sse_status); 1743 } 1744 1745 for (i = 0; i < 1 << SHIFT; i++) { 1746 d->ZMM_D(i) = float64_round_to_int(s->ZMM_D(i), &env->sse_status); 1747 } 1748 1749 if (mode & (1 << 3) && !(old_flags & float_flag_inexact)) { 1750 set_float_exception_flags(get_float_exception_flags(&env->sse_status) & 1751 ~float_flag_inexact, 1752 &env->sse_status); 1753 } 1754 env->sse_status.float_rounding_mode = prev_rounding_mode; 1755 } 1756 1757 #if SHIFT == 1 1758 void glue(helper_roundss, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s, 1759 uint32_t mode) 1760 { 1761 uint8_t old_flags = get_float_exception_flags(&env->sse_status); 1762 signed char prev_rounding_mode; 1763 int i; 1764 1765 prev_rounding_mode = env->sse_status.float_rounding_mode; 1766 if (!(mode & (1 << 2))) { 1767 set_x86_rounding_mode(mode & 3, &env->sse_status); 1768 } 1769 1770 d->ZMM_S(0) = float32_round_to_int(s->ZMM_S(0), &env->sse_status); 1771 for (i = 1; i < 2 << SHIFT; i++) { 1772 d->ZMM_L(i) = v->ZMM_L(i); 1773 } 1774 1775 if (mode & (1 << 3) && !(old_flags & float_flag_inexact)) { 1776 set_float_exception_flags(get_float_exception_flags(&env->sse_status) & 1777 ~float_flag_inexact, 1778 &env->sse_status); 1779 } 1780 env->sse_status.float_rounding_mode = prev_rounding_mode; 1781 } 1782 1783 void glue(helper_roundsd, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s, 1784 uint32_t mode) 1785 { 1786 uint8_t old_flags = get_float_exception_flags(&env->sse_status); 1787 signed char prev_rounding_mode; 1788 int i; 1789 1790 prev_rounding_mode = env->sse_status.float_rounding_mode; 1791 if (!(mode & (1 << 2))) { 1792 set_x86_rounding_mode(mode & 3, &env->sse_status); 1793 } 1794 1795 d->ZMM_D(0) = float64_round_to_int(s->ZMM_D(0), &env->sse_status); 1796 for (i = 1; i < 1 << SHIFT; i++) { 1797 d->ZMM_Q(i) = v->ZMM_Q(i); 1798 } 1799 1800 if (mode & (1 << 3) && !(old_flags & float_flag_inexact)) { 1801 set_float_exception_flags(get_float_exception_flags(&env->sse_status) & 1802 ~float_flag_inexact, 1803 &env->sse_status); 1804 } 1805 env->sse_status.float_rounding_mode = prev_rounding_mode; 1806 } 1807 #endif 1808 1809 #define FBLENDP(v, s, m) (m ? s : v) 1810 SSE_HELPER_I(helper_blendps, L, 2 << SHIFT, FBLENDP) 1811 SSE_HELPER_I(helper_blendpd, Q, 1 << SHIFT, FBLENDP) 1812 SSE_HELPER_I(helper_pblendw, W, 4 << SHIFT, FBLENDP) 1813 1814 void glue(helper_dpps, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s, 1815 uint32_t mask) 1816 { 1817 float32 prod1, prod2, temp2, temp3, temp4; 1818 int i; 1819 1820 for (i = 0; i < 2 << SHIFT; i += 4) { 1821 /* 1822 * We must evaluate (A+B)+(C+D), not ((A+B)+C)+D 1823 * to correctly round the intermediate results 1824 */ 1825 if (mask & (1 << 4)) { 1826 prod1 = float32_mul(v->ZMM_S(i), s->ZMM_S(i), &env->sse_status); 1827 } else { 1828 prod1 = float32_zero; 1829 } 1830 if (mask & (1 << 5)) { 1831 prod2 = float32_mul(v->ZMM_S(i+1), s->ZMM_S(i+1), &env->sse_status); 1832 } else { 1833 prod2 = float32_zero; 1834 } 1835 temp2 = float32_add(prod1, prod2, &env->sse_status); 1836 if (mask & (1 << 6)) { 1837 prod1 = float32_mul(v->ZMM_S(i+2), s->ZMM_S(i+2), &env->sse_status); 1838 } else { 1839 prod1 = float32_zero; 1840 } 1841 if (mask & (1 << 7)) { 1842 prod2 = float32_mul(v->ZMM_S(i+3), s->ZMM_S(i+3), &env->sse_status); 1843 } else { 1844 prod2 = float32_zero; 1845 } 1846 temp3 = float32_add(prod1, prod2, &env->sse_status); 1847 temp4 = float32_add(temp2, temp3, &env->sse_status); 1848 1849 d->ZMM_S(i) = (mask & (1 << 0)) ? temp4 : float32_zero; 1850 d->ZMM_S(i+1) = (mask & (1 << 1)) ? temp4 : float32_zero; 1851 d->ZMM_S(i+2) = (mask & (1 << 2)) ? temp4 : float32_zero; 1852 d->ZMM_S(i+3) = (mask & (1 << 3)) ? temp4 : float32_zero; 1853 } 1854 } 1855 1856 #if SHIFT == 1 1857 /* Oddly, there is no ymm version of dppd */ 1858 void glue(helper_dppd, SUFFIX)(CPUX86State *env, 1859 Reg *d, Reg *v, Reg *s, uint32_t mask) 1860 { 1861 float64 prod1, prod2, temp2; 1862 1863 if (mask & (1 << 4)) { 1864 prod1 = float64_mul(v->ZMM_D(0), s->ZMM_D(0), &env->sse_status); 1865 } else { 1866 prod1 = float64_zero; 1867 } 1868 if (mask & (1 << 5)) { 1869 prod2 = float64_mul(v->ZMM_D(1), s->ZMM_D(1), &env->sse_status); 1870 } else { 1871 prod2 = float64_zero; 1872 } 1873 temp2 = float64_add(prod1, prod2, &env->sse_status); 1874 d->ZMM_D(0) = (mask & (1 << 0)) ? temp2 : float64_zero; 1875 d->ZMM_D(1) = (mask & (1 << 1)) ? temp2 : float64_zero; 1876 } 1877 #endif 1878 1879 void glue(helper_mpsadbw, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s, 1880 uint32_t offset) 1881 { 1882 int i, j; 1883 uint16_t r[8]; 1884 1885 for (j = 0; j < 4 << SHIFT; ) { 1886 int s0 = (j * 2) + ((offset & 3) << 2); 1887 int d0 = (j * 2) + ((offset & 4) << 0); 1888 for (i = 0; i < LANE_WIDTH / 2; i++, d0++) { 1889 r[i] = 0; 1890 r[i] += abs1(v->B(d0 + 0) - s->B(s0 + 0)); 1891 r[i] += abs1(v->B(d0 + 1) - s->B(s0 + 1)); 1892 r[i] += abs1(v->B(d0 + 2) - s->B(s0 + 2)); 1893 r[i] += abs1(v->B(d0 + 3) - s->B(s0 + 3)); 1894 } 1895 for (i = 0; i < LANE_WIDTH / 2; i++, j++) { 1896 d->W(j) = r[i]; 1897 } 1898 offset >>= 3; 1899 } 1900 } 1901 1902 /* SSE4.2 op helpers */ 1903 #if SHIFT == 1 1904 static inline int pcmp_elen(CPUX86State *env, int reg, uint32_t ctrl) 1905 { 1906 target_long val, limit; 1907 1908 /* Presence of REX.W is indicated by a bit higher than 7 set */ 1909 if (ctrl >> 8) { 1910 val = (target_long)env->regs[reg]; 1911 } else { 1912 val = (int32_t)env->regs[reg]; 1913 } 1914 if (ctrl & 1) { 1915 limit = 8; 1916 } else { 1917 limit = 16; 1918 } 1919 if ((val > limit) || (val < -limit)) { 1920 return limit; 1921 } 1922 return abs1(val); 1923 } 1924 1925 static inline int pcmp_ilen(Reg *r, uint8_t ctrl) 1926 { 1927 int val = 0; 1928 1929 if (ctrl & 1) { 1930 while (val < 8 && r->W(val)) { 1931 val++; 1932 } 1933 } else { 1934 while (val < 16 && r->B(val)) { 1935 val++; 1936 } 1937 } 1938 1939 return val; 1940 } 1941 1942 static inline int pcmp_val(Reg *r, uint8_t ctrl, int i) 1943 { 1944 switch ((ctrl >> 0) & 3) { 1945 case 0: 1946 return r->B(i); 1947 case 1: 1948 return r->W(i); 1949 case 2: 1950 return (int8_t)r->B(i); 1951 case 3: 1952 default: 1953 return (int16_t)r->W(i); 1954 } 1955 } 1956 1957 static inline unsigned pcmpxstrx(CPUX86State *env, Reg *d, Reg *s, 1958 uint8_t ctrl, int valids, int validd) 1959 { 1960 unsigned int res = 0; 1961 int v; 1962 int j, i; 1963 int upper = (ctrl & 1) ? 7 : 15; 1964 1965 valids--; 1966 validd--; 1967 1968 CC_SRC = (valids < upper ? CC_Z : 0) | (validd < upper ? CC_S : 0); 1969 1970 switch ((ctrl >> 2) & 3) { 1971 case 0: 1972 for (j = valids; j >= 0; j--) { 1973 res <<= 1; 1974 v = pcmp_val(s, ctrl, j); 1975 for (i = validd; i >= 0; i--) { 1976 res |= (v == pcmp_val(d, ctrl, i)); 1977 } 1978 } 1979 break; 1980 case 1: 1981 for (j = valids; j >= 0; j--) { 1982 res <<= 1; 1983 v = pcmp_val(s, ctrl, j); 1984 for (i = ((validd - 1) | 1); i >= 0; i -= 2) { 1985 res |= (pcmp_val(d, ctrl, i - 0) >= v && 1986 pcmp_val(d, ctrl, i - 1) <= v); 1987 } 1988 } 1989 break; 1990 case 2: 1991 res = (1 << (upper - MAX(valids, validd))) - 1; 1992 res <<= MAX(valids, validd) - MIN(valids, validd); 1993 for (i = MIN(valids, validd); i >= 0; i--) { 1994 res <<= 1; 1995 v = pcmp_val(s, ctrl, i); 1996 res |= (v == pcmp_val(d, ctrl, i)); 1997 } 1998 break; 1999 case 3: 2000 if (validd == -1) { 2001 res = (2 << upper) - 1; 2002 break; 2003 } 2004 for (j = valids == upper ? valids : valids - validd; j >= 0; j--) { 2005 res <<= 1; 2006 v = 1; 2007 for (i = MIN(valids - j, validd); i >= 0; i--) { 2008 v &= (pcmp_val(s, ctrl, i + j) == pcmp_val(d, ctrl, i)); 2009 } 2010 res |= v; 2011 } 2012 break; 2013 } 2014 2015 switch ((ctrl >> 4) & 3) { 2016 case 1: 2017 res ^= (2 << upper) - 1; 2018 break; 2019 case 3: 2020 res ^= (1 << (valids + 1)) - 1; 2021 break; 2022 } 2023 2024 if (res) { 2025 CC_SRC |= CC_C; 2026 } 2027 if (res & 1) { 2028 CC_SRC |= CC_O; 2029 } 2030 2031 return res; 2032 } 2033 2034 void glue(helper_pcmpestri, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, 2035 uint32_t ctrl) 2036 { 2037 unsigned int res = pcmpxstrx(env, d, s, ctrl, 2038 pcmp_elen(env, R_EDX, ctrl), 2039 pcmp_elen(env, R_EAX, ctrl)); 2040 2041 if (res) { 2042 env->regs[R_ECX] = (ctrl & (1 << 6)) ? 31 - clz32(res) : ctz32(res); 2043 } else { 2044 env->regs[R_ECX] = 16 >> (ctrl & (1 << 0)); 2045 } 2046 } 2047 2048 void glue(helper_pcmpestrm, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, 2049 uint32_t ctrl) 2050 { 2051 int i; 2052 unsigned int res = pcmpxstrx(env, d, s, ctrl, 2053 pcmp_elen(env, R_EDX, ctrl), 2054 pcmp_elen(env, R_EAX, ctrl)); 2055 2056 if ((ctrl >> 6) & 1) { 2057 if (ctrl & 1) { 2058 for (i = 0; i < 8; i++, res >>= 1) { 2059 env->xmm_regs[0].W(i) = (res & 1) ? ~0 : 0; 2060 } 2061 } else { 2062 for (i = 0; i < 16; i++, res >>= 1) { 2063 env->xmm_regs[0].B(i) = (res & 1) ? ~0 : 0; 2064 } 2065 } 2066 } else { 2067 env->xmm_regs[0].Q(1) = 0; 2068 env->xmm_regs[0].Q(0) = res; 2069 } 2070 } 2071 2072 void glue(helper_pcmpistri, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, 2073 uint32_t ctrl) 2074 { 2075 unsigned int res = pcmpxstrx(env, d, s, ctrl, 2076 pcmp_ilen(s, ctrl), 2077 pcmp_ilen(d, ctrl)); 2078 2079 if (res) { 2080 env->regs[R_ECX] = (ctrl & (1 << 6)) ? 31 - clz32(res) : ctz32(res); 2081 } else { 2082 env->regs[R_ECX] = 16 >> (ctrl & (1 << 0)); 2083 } 2084 } 2085 2086 void glue(helper_pcmpistrm, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, 2087 uint32_t ctrl) 2088 { 2089 int i; 2090 unsigned int res = pcmpxstrx(env, d, s, ctrl, 2091 pcmp_ilen(s, ctrl), 2092 pcmp_ilen(d, ctrl)); 2093 2094 if ((ctrl >> 6) & 1) { 2095 if (ctrl & 1) { 2096 for (i = 0; i < 8; i++, res >>= 1) { 2097 env->xmm_regs[0].W(i) = (res & 1) ? ~0 : 0; 2098 } 2099 } else { 2100 for (i = 0; i < 16; i++, res >>= 1) { 2101 env->xmm_regs[0].B(i) = (res & 1) ? ~0 : 0; 2102 } 2103 } 2104 } else { 2105 env->xmm_regs[0].Q(1) = 0; 2106 env->xmm_regs[0].Q(0) = res; 2107 } 2108 } 2109 2110 #define CRCPOLY 0x1edc6f41 2111 #define CRCPOLY_BITREV 0x82f63b78 2112 target_ulong helper_crc32(uint32_t crc1, target_ulong msg, uint32_t len) 2113 { 2114 target_ulong crc = (msg & ((target_ulong) -1 >> 2115 (TARGET_LONG_BITS - len))) ^ crc1; 2116 2117 while (len--) { 2118 crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_BITREV : 0); 2119 } 2120 2121 return crc; 2122 } 2123 2124 #endif 2125 2126 void glue(helper_pclmulqdq, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s, 2127 uint32_t ctrl) 2128 { 2129 int a_idx = (ctrl & 1) != 0; 2130 int b_idx = (ctrl & 16) != 0; 2131 2132 for (int i = 0; i < SHIFT; i++) { 2133 uint64_t a = v->Q(2 * i + a_idx); 2134 uint64_t b = s->Q(2 * i + b_idx); 2135 Int128 *r = (Int128 *)&d->ZMM_X(i); 2136 2137 *r = clmul_64(a, b); 2138 } 2139 } 2140 2141 void glue(helper_aesdec, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) 2142 { 2143 for (int i = 0; i < SHIFT; i++) { 2144 AESState *ad = (AESState *)&d->ZMM_X(i); 2145 AESState *st = (AESState *)&v->ZMM_X(i); 2146 AESState *rk = (AESState *)&s->ZMM_X(i); 2147 2148 aesdec_ISB_ISR_IMC_AK(ad, st, rk, false); 2149 } 2150 } 2151 2152 void glue(helper_aesdeclast, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) 2153 { 2154 for (int i = 0; i < SHIFT; i++) { 2155 AESState *ad = (AESState *)&d->ZMM_X(i); 2156 AESState *st = (AESState *)&v->ZMM_X(i); 2157 AESState *rk = (AESState *)&s->ZMM_X(i); 2158 2159 aesdec_ISB_ISR_AK(ad, st, rk, false); 2160 } 2161 } 2162 2163 void glue(helper_aesenc, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) 2164 { 2165 for (int i = 0; i < SHIFT; i++) { 2166 AESState *ad = (AESState *)&d->ZMM_X(i); 2167 AESState *st = (AESState *)&v->ZMM_X(i); 2168 AESState *rk = (AESState *)&s->ZMM_X(i); 2169 2170 aesenc_SB_SR_MC_AK(ad, st, rk, false); 2171 } 2172 } 2173 2174 void glue(helper_aesenclast, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) 2175 { 2176 for (int i = 0; i < SHIFT; i++) { 2177 AESState *ad = (AESState *)&d->ZMM_X(i); 2178 AESState *st = (AESState *)&v->ZMM_X(i); 2179 AESState *rk = (AESState *)&s->ZMM_X(i); 2180 2181 aesenc_SB_SR_AK(ad, st, rk, false); 2182 } 2183 } 2184 2185 #if SHIFT == 1 2186 void glue(helper_aesimc, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) 2187 { 2188 AESState *ad = (AESState *)&d->ZMM_X(0); 2189 AESState *st = (AESState *)&s->ZMM_X(0); 2190 2191 aesdec_IMC(ad, st, false); 2192 } 2193 2194 void glue(helper_aeskeygenassist, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, 2195 uint32_t ctrl) 2196 { 2197 int i; 2198 Reg tmp = *s; 2199 2200 for (i = 0 ; i < 4 ; i++) { 2201 d->B(i) = AES_sbox[tmp.B(i + 4)]; 2202 d->B(i + 8) = AES_sbox[tmp.B(i + 12)]; 2203 } 2204 d->L(1) = (d->L(0) << 24 | d->L(0) >> 8) ^ ctrl; 2205 d->L(3) = (d->L(2) << 24 | d->L(2) >> 8) ^ ctrl; 2206 } 2207 #endif 2208 #endif 2209 2210 #if SHIFT >= 1 2211 void glue(helper_vpermilpd, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) 2212 { 2213 uint64_t r0, r1; 2214 int i; 2215 2216 for (i = 0; i < 1 << SHIFT; i += 2) { 2217 r0 = v->Q(i + ((s->Q(i) >> 1) & 1)); 2218 r1 = v->Q(i + ((s->Q(i+1) >> 1) & 1)); 2219 d->Q(i) = r0; 2220 d->Q(i+1) = r1; 2221 } 2222 } 2223 2224 void glue(helper_vpermilps, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) 2225 { 2226 uint32_t r0, r1, r2, r3; 2227 int i; 2228 2229 for (i = 0; i < 2 << SHIFT; i += 4) { 2230 r0 = v->L(i + (s->L(i) & 3)); 2231 r1 = v->L(i + (s->L(i+1) & 3)); 2232 r2 = v->L(i + (s->L(i+2) & 3)); 2233 r3 = v->L(i + (s->L(i+3) & 3)); 2234 d->L(i) = r0; 2235 d->L(i+1) = r1; 2236 d->L(i+2) = r2; 2237 d->L(i+3) = r3; 2238 } 2239 } 2240 2241 void glue(helper_vpermilpd_imm, SUFFIX)(Reg *d, Reg *s, uint32_t order) 2242 { 2243 uint64_t r0, r1; 2244 int i; 2245 2246 for (i = 0; i < 1 << SHIFT; i += 2) { 2247 r0 = s->Q(i + ((order >> 0) & 1)); 2248 r1 = s->Q(i + ((order >> 1) & 1)); 2249 d->Q(i) = r0; 2250 d->Q(i+1) = r1; 2251 2252 order >>= 2; 2253 } 2254 } 2255 2256 void glue(helper_vpermilps_imm, SUFFIX)(Reg *d, Reg *s, uint32_t order) 2257 { 2258 uint32_t r0, r1, r2, r3; 2259 int i; 2260 2261 for (i = 0; i < 2 << SHIFT; i += 4) { 2262 r0 = s->L(i + ((order >> 0) & 3)); 2263 r1 = s->L(i + ((order >> 2) & 3)); 2264 r2 = s->L(i + ((order >> 4) & 3)); 2265 r3 = s->L(i + ((order >> 6) & 3)); 2266 d->L(i) = r0; 2267 d->L(i+1) = r1; 2268 d->L(i+2) = r2; 2269 d->L(i+3) = r3; 2270 } 2271 } 2272 2273 #if SHIFT == 1 2274 #define FPSRLVD(x, c) (c < 32 ? ((x) >> c) : 0) 2275 #define FPSRLVQ(x, c) (c < 64 ? ((x) >> c) : 0) 2276 #define FPSRAVD(x, c) ((int32_t)(x) >> (c < 32 ? c : 31)) 2277 #define FPSRAVQ(x, c) ((int64_t)(x) >> (c < 64 ? c : 63)) 2278 #define FPSLLVD(x, c) (c < 32 ? ((x) << c) : 0) 2279 #define FPSLLVQ(x, c) (c < 64 ? ((x) << c) : 0) 2280 #endif 2281 2282 SSE_HELPER_L(helper_vpsrlvd, FPSRLVD) 2283 SSE_HELPER_L(helper_vpsravd, FPSRAVD) 2284 SSE_HELPER_L(helper_vpsllvd, FPSLLVD) 2285 2286 SSE_HELPER_Q(helper_vpsrlvq, FPSRLVQ) 2287 SSE_HELPER_Q(helper_vpsravq, FPSRAVQ) 2288 SSE_HELPER_Q(helper_vpsllvq, FPSLLVQ) 2289 2290 void glue(helper_vtestps, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) 2291 { 2292 uint32_t zf = 0, cf = 0; 2293 int i; 2294 2295 for (i = 0; i < 2 << SHIFT; i++) { 2296 zf |= (s->L(i) & d->L(i)); 2297 cf |= (s->L(i) & ~d->L(i)); 2298 } 2299 CC_SRC = ((zf >> 31) ? 0 : CC_Z) | ((cf >> 31) ? 0 : CC_C); 2300 } 2301 2302 void glue(helper_vtestpd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) 2303 { 2304 uint64_t zf = 0, cf = 0; 2305 int i; 2306 2307 for (i = 0; i < 1 << SHIFT; i++) { 2308 zf |= (s->Q(i) & d->Q(i)); 2309 cf |= (s->Q(i) & ~d->Q(i)); 2310 } 2311 CC_SRC = ((zf >> 63) ? 0 : CC_Z) | ((cf >> 63) ? 0 : CC_C); 2312 } 2313 2314 void glue(helper_vpmaskmovd_st, SUFFIX)(CPUX86State *env, 2315 Reg *v, Reg *s, target_ulong a0) 2316 { 2317 int i; 2318 2319 for (i = 0; i < (2 << SHIFT); i++) { 2320 if (v->L(i) >> 31) { 2321 cpu_stl_data_ra(env, a0 + i * 4, s->L(i), GETPC()); 2322 } 2323 } 2324 } 2325 2326 void glue(helper_vpmaskmovq_st, SUFFIX)(CPUX86State *env, 2327 Reg *v, Reg *s, target_ulong a0) 2328 { 2329 int i; 2330 2331 for (i = 0; i < (1 << SHIFT); i++) { 2332 if (v->Q(i) >> 63) { 2333 cpu_stq_data_ra(env, a0 + i * 8, s->Q(i), GETPC()); 2334 } 2335 } 2336 } 2337 2338 void glue(helper_vpmaskmovd, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) 2339 { 2340 int i; 2341 2342 for (i = 0; i < (2 << SHIFT); i++) { 2343 d->L(i) = (v->L(i) >> 31) ? s->L(i) : 0; 2344 } 2345 } 2346 2347 void glue(helper_vpmaskmovq, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) 2348 { 2349 int i; 2350 2351 for (i = 0; i < (1 << SHIFT); i++) { 2352 d->Q(i) = (v->Q(i) >> 63) ? s->Q(i) : 0; 2353 } 2354 } 2355 2356 void glue(helper_vpgatherdd, SUFFIX)(CPUX86State *env, 2357 Reg *d, Reg *v, Reg *s, target_ulong a0, unsigned scale) 2358 { 2359 int i; 2360 for (i = 0; i < (2 << SHIFT); i++) { 2361 if (v->L(i) >> 31) { 2362 target_ulong addr = a0 2363 + ((target_ulong)(int32_t)s->L(i) << scale); 2364 d->L(i) = cpu_ldl_data_ra(env, addr, GETPC()); 2365 } 2366 v->L(i) = 0; 2367 } 2368 } 2369 2370 void glue(helper_vpgatherdq, SUFFIX)(CPUX86State *env, 2371 Reg *d, Reg *v, Reg *s, target_ulong a0, unsigned scale) 2372 { 2373 int i; 2374 for (i = 0; i < (1 << SHIFT); i++) { 2375 if (v->Q(i) >> 63) { 2376 target_ulong addr = a0 2377 + ((target_ulong)(int32_t)s->L(i) << scale); 2378 d->Q(i) = cpu_ldq_data_ra(env, addr, GETPC()); 2379 } 2380 v->Q(i) = 0; 2381 } 2382 } 2383 2384 void glue(helper_vpgatherqd, SUFFIX)(CPUX86State *env, 2385 Reg *d, Reg *v, Reg *s, target_ulong a0, unsigned scale) 2386 { 2387 int i; 2388 for (i = 0; i < (1 << SHIFT); i++) { 2389 if (v->L(i) >> 31) { 2390 target_ulong addr = a0 2391 + ((target_ulong)(int64_t)s->Q(i) << scale); 2392 d->L(i) = cpu_ldl_data_ra(env, addr, GETPC()); 2393 } 2394 v->L(i) = 0; 2395 } 2396 for (i /= 2; i < 1 << SHIFT; i++) { 2397 d->Q(i) = 0; 2398 v->Q(i) = 0; 2399 } 2400 } 2401 2402 void glue(helper_vpgatherqq, SUFFIX)(CPUX86State *env, 2403 Reg *d, Reg *v, Reg *s, target_ulong a0, unsigned scale) 2404 { 2405 int i; 2406 for (i = 0; i < (1 << SHIFT); i++) { 2407 if (v->Q(i) >> 63) { 2408 target_ulong addr = a0 2409 + ((target_ulong)(int64_t)s->Q(i) << scale); 2410 d->Q(i) = cpu_ldq_data_ra(env, addr, GETPC()); 2411 } 2412 v->Q(i) = 0; 2413 } 2414 } 2415 #endif 2416 2417 #if SHIFT >= 2 2418 void helper_vpermdq_ymm(Reg *d, Reg *v, Reg *s, uint32_t order) 2419 { 2420 uint64_t r0, r1, r2, r3; 2421 2422 switch (order & 3) { 2423 case 0: 2424 r0 = v->Q(0); 2425 r1 = v->Q(1); 2426 break; 2427 case 1: 2428 r0 = v->Q(2); 2429 r1 = v->Q(3); 2430 break; 2431 case 2: 2432 r0 = s->Q(0); 2433 r1 = s->Q(1); 2434 break; 2435 case 3: 2436 r0 = s->Q(2); 2437 r1 = s->Q(3); 2438 break; 2439 default: /* default case added to help the compiler to avoid warnings */ 2440 g_assert_not_reached(); 2441 } 2442 switch ((order >> 4) & 3) { 2443 case 0: 2444 r2 = v->Q(0); 2445 r3 = v->Q(1); 2446 break; 2447 case 1: 2448 r2 = v->Q(2); 2449 r3 = v->Q(3); 2450 break; 2451 case 2: 2452 r2 = s->Q(0); 2453 r3 = s->Q(1); 2454 break; 2455 case 3: 2456 r2 = s->Q(2); 2457 r3 = s->Q(3); 2458 break; 2459 default: /* default case added to help the compiler to avoid warnings */ 2460 g_assert_not_reached(); 2461 } 2462 d->Q(0) = r0; 2463 d->Q(1) = r1; 2464 d->Q(2) = r2; 2465 d->Q(3) = r3; 2466 if (order & 0x8) { 2467 d->Q(0) = 0; 2468 d->Q(1) = 0; 2469 } 2470 if (order & 0x80) { 2471 d->Q(2) = 0; 2472 d->Q(3) = 0; 2473 } 2474 } 2475 2476 void helper_vpermq_ymm(Reg *d, Reg *s, uint32_t order) 2477 { 2478 uint64_t r0, r1, r2, r3; 2479 r0 = s->Q(order & 3); 2480 r1 = s->Q((order >> 2) & 3); 2481 r2 = s->Q((order >> 4) & 3); 2482 r3 = s->Q((order >> 6) & 3); 2483 d->Q(0) = r0; 2484 d->Q(1) = r1; 2485 d->Q(2) = r2; 2486 d->Q(3) = r3; 2487 } 2488 2489 void helper_vpermd_ymm(Reg *d, Reg *v, Reg *s) 2490 { 2491 uint32_t r[8]; 2492 int i; 2493 2494 for (i = 0; i < 8; i++) { 2495 r[i] = s->L(v->L(i) & 7); 2496 } 2497 for (i = 0; i < 8; i++) { 2498 d->L(i) = r[i]; 2499 } 2500 } 2501 #endif 2502 2503 /* FMA3 op helpers */ 2504 #if SHIFT == 1 2505 #define SSE_HELPER_FMAS(name, elem, F) \ 2506 void name(CPUX86State *env, Reg *d, Reg *a, Reg *b, Reg *c, int flags) \ 2507 { \ 2508 d->elem(0) = F(a->elem(0), b->elem(0), c->elem(0), flags, &env->sse_status); \ 2509 } 2510 #define SSE_HELPER_FMAP(name, elem, num, F) \ 2511 void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *a, Reg *b, Reg *c, \ 2512 int flags, int flip) \ 2513 { \ 2514 int i; \ 2515 for (i = 0; i < num; i++) { \ 2516 d->elem(i) = F(a->elem(i), b->elem(i), c->elem(i), flags, &env->sse_status); \ 2517 flags ^= flip; \ 2518 } \ 2519 } 2520 2521 SSE_HELPER_FMAS(helper_fma4ss, ZMM_S, float32_muladd) 2522 SSE_HELPER_FMAS(helper_fma4sd, ZMM_D, float64_muladd) 2523 #endif 2524 2525 #if SHIFT >= 1 2526 SSE_HELPER_FMAP(helper_fma4ps, ZMM_S, 2 << SHIFT, float32_muladd) 2527 SSE_HELPER_FMAP(helper_fma4pd, ZMM_D, 1 << SHIFT, float64_muladd) 2528 #endif 2529 2530 #undef SSE_HELPER_S 2531 2532 #undef LANE_WIDTH 2533 #undef SHIFT 2534 #undef XMM_ONLY 2535 #undef Reg 2536 #undef B 2537 #undef W 2538 #undef L 2539 #undef Q 2540 #undef SUFFIX 2541