1 /* 2 * MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4/PNI support 3 * 4 * Copyright (c) 2005 Fabrice Bellard 5 * Copyright (c) 2008 Intel Corporation <andrew.zaborowski@intel.com> 6 * 7 * This library is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Lesser General Public 9 * License as published by the Free Software Foundation; either 10 * version 2.1 of the License, or (at your option) any later version. 11 * 12 * This library is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * Lesser General Public License for more details. 16 * 17 * You should have received a copy of the GNU Lesser General Public 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #include "crypto/aes.h" 22 #include "crypto/aes-round.h" 23 #include "crypto/clmul.h" 24 25 #if SHIFT == 0 26 #define Reg MMXReg 27 #define XMM_ONLY(...) 28 #define B(n) MMX_B(n) 29 #define W(n) MMX_W(n) 30 #define L(n) MMX_L(n) 31 #define Q(n) MMX_Q(n) 32 #define SUFFIX _mmx 33 #else 34 #define Reg ZMMReg 35 #define XMM_ONLY(...) __VA_ARGS__ 36 #define B(n) ZMM_B(n) 37 #define W(n) ZMM_W(n) 38 #define L(n) ZMM_L(n) 39 #define Q(n) ZMM_Q(n) 40 #if SHIFT == 1 41 #define SUFFIX _xmm 42 #else 43 #define SUFFIX _ymm 44 #endif 45 #endif 46 47 #define LANE_WIDTH (SHIFT ? 16 : 8) 48 #define PACK_WIDTH (LANE_WIDTH / 2) 49 50 #if SHIFT == 0 51 #define FPSRL(x, c) ((x) >> shift) 52 #define FPSRAW(x, c) ((int16_t)(x) >> shift) 53 #define FPSRAL(x, c) ((int32_t)(x) >> shift) 54 #define FPSLL(x, c) ((x) << shift) 55 #endif 56 57 void glue(helper_psrlw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, Reg *c) 58 { 59 int shift; 60 if (c->Q(0) > 15) { 61 for (int i = 0; i < 1 << SHIFT; i++) { 62 d->Q(i) = 0; 63 } 64 } else { 65 shift = c->B(0); 66 for (int i = 0; i < 4 << SHIFT; i++) { 67 d->W(i) = FPSRL(s->W(i), shift); 68 } 69 } 70 } 71 72 void glue(helper_psllw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, Reg *c) 73 { 74 int shift; 75 if (c->Q(0) > 15) { 76 for (int i = 0; i < 1 << SHIFT; i++) { 77 d->Q(i) = 0; 78 } 79 } else { 80 shift = c->B(0); 81 for (int i = 0; i < 4 << SHIFT; i++) { 82 d->W(i) = FPSLL(s->W(i), shift); 83 } 84 } 85 } 86 87 void glue(helper_psraw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, Reg *c) 88 { 89 int shift; 90 if (c->Q(0) > 15) { 91 shift = 15; 92 } else { 93 shift = c->B(0); 94 } 95 for (int i = 0; i < 4 << SHIFT; i++) { 96 d->W(i) = FPSRAW(s->W(i), shift); 97 } 98 } 99 100 void glue(helper_psrld, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, Reg *c) 101 { 102 int shift; 103 if (c->Q(0) > 31) { 104 for (int i = 0; i < 1 << SHIFT; i++) { 105 d->Q(i) = 0; 106 } 107 } else { 108 shift = c->B(0); 109 for (int i = 0; i < 2 << SHIFT; i++) { 110 d->L(i) = FPSRL(s->L(i), shift); 111 } 112 } 113 } 114 115 void glue(helper_pslld, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, Reg *c) 116 { 117 int shift; 118 if (c->Q(0) > 31) { 119 for (int i = 0; i < 1 << SHIFT; i++) { 120 d->Q(i) = 0; 121 } 122 } else { 123 shift = c->B(0); 124 for (int i = 0; i < 2 << SHIFT; i++) { 125 d->L(i) = FPSLL(s->L(i), shift); 126 } 127 } 128 } 129 130 void glue(helper_psrad, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, Reg *c) 131 { 132 int shift; 133 if (c->Q(0) > 31) { 134 shift = 31; 135 } else { 136 shift = c->B(0); 137 } 138 for (int i = 0; i < 2 << SHIFT; i++) { 139 d->L(i) = FPSRAL(s->L(i), shift); 140 } 141 } 142 143 void glue(helper_psrlq, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, Reg *c) 144 { 145 int shift; 146 if (c->Q(0) > 63) { 147 for (int i = 0; i < 1 << SHIFT; i++) { 148 d->Q(i) = 0; 149 } 150 } else { 151 shift = c->B(0); 152 for (int i = 0; i < 1 << SHIFT; i++) { 153 d->Q(i) = FPSRL(s->Q(i), shift); 154 } 155 } 156 } 157 158 void glue(helper_psllq, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, Reg *c) 159 { 160 int shift; 161 if (c->Q(0) > 63) { 162 for (int i = 0; i < 1 << SHIFT; i++) { 163 d->Q(i) = 0; 164 } 165 } else { 166 shift = c->B(0); 167 for (int i = 0; i < 1 << SHIFT; i++) { 168 d->Q(i) = FPSLL(s->Q(i), shift); 169 } 170 } 171 } 172 173 #if SHIFT >= 1 174 void glue(helper_psrldq, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, Reg *c) 175 { 176 int shift, i, j; 177 178 shift = c->L(0); 179 if (shift > 16) { 180 shift = 16; 181 } 182 for (j = 0; j < 8 << SHIFT; j += LANE_WIDTH) { 183 for (i = 0; i < 16 - shift; i++) { 184 d->B(j + i) = s->B(j + i + shift); 185 } 186 for (i = 16 - shift; i < 16; i++) { 187 d->B(j + i) = 0; 188 } 189 } 190 } 191 192 void glue(helper_pslldq, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, Reg *c) 193 { 194 int shift, i, j; 195 196 shift = c->L(0); 197 if (shift > 16) { 198 shift = 16; 199 } 200 for (j = 0; j < 8 << SHIFT; j += LANE_WIDTH) { 201 for (i = 15; i >= shift; i--) { 202 d->B(j + i) = s->B(j + i - shift); 203 } 204 for (i = 0; i < shift; i++) { 205 d->B(j + i) = 0; 206 } 207 } 208 } 209 #endif 210 211 #define SSE_HELPER_1(name, elem, num, F) \ 212 void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) \ 213 { \ 214 int n = num; \ 215 for (int i = 0; i < n; i++) { \ 216 d->elem(i) = F(s->elem(i)); \ 217 } \ 218 } 219 220 #define SSE_HELPER_2(name, elem, num, F) \ 221 void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) \ 222 { \ 223 int n = num; \ 224 for (int i = 0; i < n; i++) { \ 225 d->elem(i) = F(v->elem(i), s->elem(i)); \ 226 } \ 227 } 228 229 #define SSE_HELPER_B(name, F) \ 230 SSE_HELPER_2(name, B, 8 << SHIFT, F) 231 232 #define SSE_HELPER_W(name, F) \ 233 SSE_HELPER_2(name, W, 4 << SHIFT, F) 234 235 #define SSE_HELPER_L(name, F) \ 236 SSE_HELPER_2(name, L, 2 << SHIFT, F) 237 238 #define SSE_HELPER_Q(name, F) \ 239 SSE_HELPER_2(name, Q, 1 << SHIFT, F) 240 241 #if SHIFT == 0 242 static inline int satub(int x) 243 { 244 if (x < 0) { 245 return 0; 246 } else if (x > 255) { 247 return 255; 248 } else { 249 return x; 250 } 251 } 252 253 static inline int satuw(int x) 254 { 255 if (x < 0) { 256 return 0; 257 } else if (x > 65535) { 258 return 65535; 259 } else { 260 return x; 261 } 262 } 263 264 static inline int satsb(int x) 265 { 266 if (x < -128) { 267 return -128; 268 } else if (x > 127) { 269 return 127; 270 } else { 271 return x; 272 } 273 } 274 275 static inline int satsw(int x) 276 { 277 if (x < -32768) { 278 return -32768; 279 } else if (x > 32767) { 280 return 32767; 281 } else { 282 return x; 283 } 284 } 285 286 #define FADD(a, b) ((a) + (b)) 287 #define FADDUB(a, b) satub((a) + (b)) 288 #define FADDUW(a, b) satuw((a) + (b)) 289 #define FADDSB(a, b) satsb((int8_t)(a) + (int8_t)(b)) 290 #define FADDSW(a, b) satsw((int16_t)(a) + (int16_t)(b)) 291 292 #define FSUB(a, b) ((a) - (b)) 293 #define FSUBUB(a, b) satub((a) - (b)) 294 #define FSUBUW(a, b) satuw((a) - (b)) 295 #define FSUBSB(a, b) satsb((int8_t)(a) - (int8_t)(b)) 296 #define FSUBSW(a, b) satsw((int16_t)(a) - (int16_t)(b)) 297 #define FMINUB(a, b) ((a) < (b)) ? (a) : (b) 298 #define FMINSW(a, b) ((int16_t)(a) < (int16_t)(b)) ? (a) : (b) 299 #define FMAXUB(a, b) ((a) > (b)) ? (a) : (b) 300 #define FMAXSW(a, b) ((int16_t)(a) > (int16_t)(b)) ? (a) : (b) 301 302 #define FMULHRW(a, b) (((int16_t)(a) * (int16_t)(b) + 0x8000) >> 16) 303 #define FMULHUW(a, b) ((a) * (b) >> 16) 304 #define FMULHW(a, b) ((int16_t)(a) * (int16_t)(b) >> 16) 305 306 #define FAVG(a, b) (((a) + (b) + 1) >> 1) 307 #endif 308 309 SSE_HELPER_W(helper_pmulhuw, FMULHUW) 310 SSE_HELPER_W(helper_pmulhw, FMULHW) 311 312 #if SHIFT == 0 313 void glue(helper_pmulhrw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) 314 { 315 d->W(0) = FMULHRW(d->W(0), s->W(0)); 316 d->W(1) = FMULHRW(d->W(1), s->W(1)); 317 d->W(2) = FMULHRW(d->W(2), s->W(2)); 318 d->W(3) = FMULHRW(d->W(3), s->W(3)); 319 } 320 #endif 321 322 SSE_HELPER_B(helper_pavgb, FAVG) 323 SSE_HELPER_W(helper_pavgw, FAVG) 324 325 void glue(helper_pmuludq, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) 326 { 327 int i; 328 329 for (i = 0; i < (1 << SHIFT); i++) { 330 d->Q(i) = (uint64_t)s->L(i * 2) * (uint64_t)v->L(i * 2); 331 } 332 } 333 334 void glue(helper_pmaddwd, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) 335 { 336 int i; 337 338 for (i = 0; i < (2 << SHIFT); i++) { 339 d->L(i) = (int16_t)s->W(2 * i) * (int16_t)v->W(2 * i) + 340 (int16_t)s->W(2 * i + 1) * (int16_t)v->W(2 * i + 1); 341 } 342 } 343 344 #if SHIFT == 0 345 static inline int abs1(int a) 346 { 347 if (a < 0) { 348 return -a; 349 } else { 350 return a; 351 } 352 } 353 #endif 354 void glue(helper_psadbw, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) 355 { 356 int i; 357 358 for (i = 0; i < (1 << SHIFT); i++) { 359 unsigned int val = 0; 360 val += abs1(v->B(8 * i + 0) - s->B(8 * i + 0)); 361 val += abs1(v->B(8 * i + 1) - s->B(8 * i + 1)); 362 val += abs1(v->B(8 * i + 2) - s->B(8 * i + 2)); 363 val += abs1(v->B(8 * i + 3) - s->B(8 * i + 3)); 364 val += abs1(v->B(8 * i + 4) - s->B(8 * i + 4)); 365 val += abs1(v->B(8 * i + 5) - s->B(8 * i + 5)); 366 val += abs1(v->B(8 * i + 6) - s->B(8 * i + 6)); 367 val += abs1(v->B(8 * i + 7) - s->B(8 * i + 7)); 368 d->Q(i) = val; 369 } 370 } 371 372 #if SHIFT < 2 373 void glue(helper_maskmov, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, 374 target_ulong a0) 375 { 376 int i; 377 378 for (i = 0; i < (8 << SHIFT); i++) { 379 if (s->B(i) & 0x80) { 380 cpu_stb_data_ra(env, a0 + i, d->B(i), GETPC()); 381 } 382 } 383 } 384 #endif 385 386 #define SHUFFLE4(F, a, b, offset) do { \ 387 r0 = a->F((order & 3) + offset); \ 388 r1 = a->F(((order >> 2) & 3) + offset); \ 389 r2 = b->F(((order >> 4) & 3) + offset); \ 390 r3 = b->F(((order >> 6) & 3) + offset); \ 391 d->F(offset) = r0; \ 392 d->F(offset + 1) = r1; \ 393 d->F(offset + 2) = r2; \ 394 d->F(offset + 3) = r3; \ 395 } while (0) 396 397 #if SHIFT == 0 398 void glue(helper_pshufw, SUFFIX)(Reg *d, Reg *s, int order) 399 { 400 uint16_t r0, r1, r2, r3; 401 402 SHUFFLE4(W, s, s, 0); 403 } 404 #else 405 void glue(helper_shufps, SUFFIX)(Reg *d, Reg *v, Reg *s, int order) 406 { 407 uint32_t r0, r1, r2, r3; 408 int i; 409 410 for (i = 0; i < 2 << SHIFT; i += 4) { 411 SHUFFLE4(L, v, s, i); 412 } 413 } 414 415 void glue(helper_shufpd, SUFFIX)(Reg *d, Reg *v, Reg *s, int order) 416 { 417 uint64_t r0, r1; 418 int i; 419 420 for (i = 0; i < 1 << SHIFT; i += 2) { 421 r0 = v->Q(((order & 1) & 1) + i); 422 r1 = s->Q(((order >> 1) & 1) + i); 423 d->Q(i) = r0; 424 d->Q(i + 1) = r1; 425 order >>= 2; 426 } 427 } 428 429 void glue(helper_pshufd, SUFFIX)(Reg *d, Reg *s, int order) 430 { 431 uint32_t r0, r1, r2, r3; 432 int i; 433 434 for (i = 0; i < 2 << SHIFT; i += 4) { 435 SHUFFLE4(L, s, s, i); 436 } 437 } 438 439 void glue(helper_pshuflw, SUFFIX)(Reg *d, Reg *s, int order) 440 { 441 uint16_t r0, r1, r2, r3; 442 int i, j; 443 444 for (i = 0, j = 1; j < 1 << SHIFT; i += 8, j += 2) { 445 SHUFFLE4(W, s, s, i); 446 d->Q(j) = s->Q(j); 447 } 448 } 449 450 void glue(helper_pshufhw, SUFFIX)(Reg *d, Reg *s, int order) 451 { 452 uint16_t r0, r1, r2, r3; 453 int i, j; 454 455 for (i = 4, j = 0; j < 1 << SHIFT; i += 8, j += 2) { 456 d->Q(j) = s->Q(j); 457 SHUFFLE4(W, s, s, i); 458 } 459 } 460 #endif 461 462 #if SHIFT >= 1 463 /* FPU ops */ 464 /* XXX: not accurate */ 465 466 #define SSE_HELPER_P(name, F) \ 467 void glue(helper_ ## name ## ps, SUFFIX)(CPUX86State *env, \ 468 Reg *d, Reg *v, Reg *s) \ 469 { \ 470 int i; \ 471 for (i = 0; i < 2 << SHIFT; i++) { \ 472 d->ZMM_S(i) = F(32, v->ZMM_S(i), s->ZMM_S(i)); \ 473 } \ 474 } \ 475 \ 476 void glue(helper_ ## name ## pd, SUFFIX)(CPUX86State *env, \ 477 Reg *d, Reg *v, Reg *s) \ 478 { \ 479 int i; \ 480 for (i = 0; i < 1 << SHIFT; i++) { \ 481 d->ZMM_D(i) = F(64, v->ZMM_D(i), s->ZMM_D(i)); \ 482 } \ 483 } 484 485 #if SHIFT == 1 486 487 #define SSE_HELPER_S(name, F) \ 488 SSE_HELPER_P(name, F) \ 489 \ 490 void helper_ ## name ## ss(CPUX86State *env, Reg *d, Reg *v, Reg *s)\ 491 { \ 492 int i; \ 493 d->ZMM_S(0) = F(32, v->ZMM_S(0), s->ZMM_S(0)); \ 494 for (i = 1; i < 2 << SHIFT; i++) { \ 495 d->ZMM_L(i) = v->ZMM_L(i); \ 496 } \ 497 } \ 498 \ 499 void helper_ ## name ## sd(CPUX86State *env, Reg *d, Reg *v, Reg *s)\ 500 { \ 501 int i; \ 502 d->ZMM_D(0) = F(64, v->ZMM_D(0), s->ZMM_D(0)); \ 503 for (i = 1; i < 1 << SHIFT; i++) { \ 504 d->ZMM_Q(i) = v->ZMM_Q(i); \ 505 } \ 506 } 507 508 #else 509 510 #define SSE_HELPER_S(name, F) SSE_HELPER_P(name, F) 511 512 #endif 513 514 #define FPU_ADD(size, a, b) float ## size ## _add(a, b, &env->sse_status) 515 #define FPU_SUB(size, a, b) float ## size ## _sub(a, b, &env->sse_status) 516 #define FPU_MUL(size, a, b) float ## size ## _mul(a, b, &env->sse_status) 517 #define FPU_DIV(size, a, b) float ## size ## _div(a, b, &env->sse_status) 518 519 /* Note that the choice of comparison op here is important to get the 520 * special cases right: for min and max Intel specifies that (-0,0), 521 * (NaN, anything) and (anything, NaN) return the second argument. 522 */ 523 #define FPU_MIN(size, a, b) \ 524 (float ## size ## _lt(a, b, &env->sse_status) ? (a) : (b)) 525 #define FPU_MAX(size, a, b) \ 526 (float ## size ## _lt(b, a, &env->sse_status) ? (a) : (b)) 527 528 SSE_HELPER_S(add, FPU_ADD) 529 SSE_HELPER_S(sub, FPU_SUB) 530 SSE_HELPER_S(mul, FPU_MUL) 531 SSE_HELPER_S(div, FPU_DIV) 532 SSE_HELPER_S(min, FPU_MIN) 533 SSE_HELPER_S(max, FPU_MAX) 534 535 void glue(helper_sqrtps, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) 536 { 537 int i; 538 for (i = 0; i < 2 << SHIFT; i++) { 539 d->ZMM_S(i) = float32_sqrt(s->ZMM_S(i), &env->sse_status); 540 } 541 } 542 543 void glue(helper_sqrtpd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) 544 { 545 int i; 546 for (i = 0; i < 1 << SHIFT; i++) { 547 d->ZMM_D(i) = float64_sqrt(s->ZMM_D(i), &env->sse_status); 548 } 549 } 550 551 #if SHIFT == 1 552 void helper_sqrtss(CPUX86State *env, Reg *d, Reg *v, Reg *s) 553 { 554 int i; 555 d->ZMM_S(0) = float32_sqrt(s->ZMM_S(0), &env->sse_status); 556 for (i = 1; i < 2 << SHIFT; i++) { 557 d->ZMM_L(i) = v->ZMM_L(i); 558 } 559 } 560 561 void helper_sqrtsd(CPUX86State *env, Reg *d, Reg *v, Reg *s) 562 { 563 int i; 564 d->ZMM_D(0) = float64_sqrt(s->ZMM_D(0), &env->sse_status); 565 for (i = 1; i < 1 << SHIFT; i++) { 566 d->ZMM_Q(i) = v->ZMM_Q(i); 567 } 568 } 569 #endif 570 571 /* float to float conversions */ 572 void glue(helper_cvtps2pd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) 573 { 574 int i; 575 for (i = 1 << SHIFT; --i >= 0; ) { 576 d->ZMM_D(i) = float32_to_float64(s->ZMM_S(i), &env->sse_status); 577 } 578 } 579 580 void glue(helper_cvtpd2ps, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) 581 { 582 int i; 583 for (i = 0; i < 1 << SHIFT; i++) { 584 d->ZMM_S(i) = float64_to_float32(s->ZMM_D(i), &env->sse_status); 585 } 586 for (i >>= 1; i < 1 << SHIFT; i++) { 587 d->Q(i) = 0; 588 } 589 } 590 591 #if SHIFT >= 1 592 void glue(helper_cvtph2ps, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) 593 { 594 int i; 595 596 for (i = 2 << SHIFT; --i >= 0; ) { 597 d->ZMM_S(i) = float16_to_float32(s->ZMM_H(i), true, &env->sse_status); 598 } 599 } 600 601 void glue(helper_cvtps2ph, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, int mode) 602 { 603 int i; 604 FloatRoundMode prev_rounding_mode = env->sse_status.float_rounding_mode; 605 if (!(mode & (1 << 2))) { 606 set_x86_rounding_mode(mode & 3, &env->sse_status); 607 } 608 609 for (i = 0; i < 2 << SHIFT; i++) { 610 d->ZMM_H(i) = float32_to_float16(s->ZMM_S(i), true, &env->sse_status); 611 } 612 for (i >>= 2; i < 1 << SHIFT; i++) { 613 d->Q(i) = 0; 614 } 615 616 env->sse_status.float_rounding_mode = prev_rounding_mode; 617 } 618 #endif 619 620 #if SHIFT == 1 621 void helper_cvtss2sd(CPUX86State *env, Reg *d, Reg *v, Reg *s) 622 { 623 int i; 624 d->ZMM_D(0) = float32_to_float64(s->ZMM_S(0), &env->sse_status); 625 for (i = 1; i < 1 << SHIFT; i++) { 626 d->ZMM_Q(i) = v->ZMM_Q(i); 627 } 628 } 629 630 void helper_cvtsd2ss(CPUX86State *env, Reg *d, Reg *v, Reg *s) 631 { 632 int i; 633 d->ZMM_S(0) = float64_to_float32(s->ZMM_D(0), &env->sse_status); 634 for (i = 1; i < 2 << SHIFT; i++) { 635 d->ZMM_L(i) = v->ZMM_L(i); 636 } 637 } 638 #endif 639 640 /* integer to float */ 641 void glue(helper_cvtdq2ps, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) 642 { 643 int i; 644 for (i = 0; i < 2 << SHIFT; i++) { 645 d->ZMM_S(i) = int32_to_float32(s->ZMM_L(i), &env->sse_status); 646 } 647 } 648 649 void glue(helper_cvtdq2pd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) 650 { 651 int i; 652 for (i = 1 << SHIFT; --i >= 0; ) { 653 int32_t l = s->ZMM_L(i); 654 d->ZMM_D(i) = int32_to_float64(l, &env->sse_status); 655 } 656 } 657 658 #if SHIFT == 1 659 void helper_cvtpi2ps(CPUX86State *env, ZMMReg *d, MMXReg *s) 660 { 661 d->ZMM_S(0) = int32_to_float32(s->MMX_L(0), &env->sse_status); 662 d->ZMM_S(1) = int32_to_float32(s->MMX_L(1), &env->sse_status); 663 } 664 665 void helper_cvtpi2pd(CPUX86State *env, ZMMReg *d, MMXReg *s) 666 { 667 d->ZMM_D(0) = int32_to_float64(s->MMX_L(0), &env->sse_status); 668 d->ZMM_D(1) = int32_to_float64(s->MMX_L(1), &env->sse_status); 669 } 670 671 void helper_cvtsi2ss(CPUX86State *env, ZMMReg *d, uint32_t val) 672 { 673 d->ZMM_S(0) = int32_to_float32(val, &env->sse_status); 674 } 675 676 void helper_cvtsi2sd(CPUX86State *env, ZMMReg *d, uint32_t val) 677 { 678 d->ZMM_D(0) = int32_to_float64(val, &env->sse_status); 679 } 680 681 #ifdef TARGET_X86_64 682 void helper_cvtsq2ss(CPUX86State *env, ZMMReg *d, uint64_t val) 683 { 684 d->ZMM_S(0) = int64_to_float32(val, &env->sse_status); 685 } 686 687 void helper_cvtsq2sd(CPUX86State *env, ZMMReg *d, uint64_t val) 688 { 689 d->ZMM_D(0) = int64_to_float64(val, &env->sse_status); 690 } 691 #endif 692 693 #endif 694 695 /* float to integer */ 696 697 #if SHIFT == 1 698 /* 699 * x86 mandates that we return the indefinite integer value for the result 700 * of any float-to-integer conversion that raises the 'invalid' exception. 701 * Wrap the softfloat functions to get this behaviour. 702 */ 703 #define WRAP_FLOATCONV(RETTYPE, FN, FLOATTYPE, INDEFVALUE) \ 704 static inline RETTYPE x86_##FN(FLOATTYPE a, float_status *s) \ 705 { \ 706 int oldflags, newflags; \ 707 RETTYPE r; \ 708 \ 709 oldflags = get_float_exception_flags(s); \ 710 set_float_exception_flags(0, s); \ 711 r = FN(a, s); \ 712 newflags = get_float_exception_flags(s); \ 713 if (newflags & float_flag_invalid) { \ 714 r = INDEFVALUE; \ 715 } \ 716 set_float_exception_flags(newflags | oldflags, s); \ 717 return r; \ 718 } 719 720 WRAP_FLOATCONV(int32_t, float32_to_int32, float32, INT32_MIN) 721 WRAP_FLOATCONV(int32_t, float32_to_int32_round_to_zero, float32, INT32_MIN) 722 WRAP_FLOATCONV(int32_t, float64_to_int32, float64, INT32_MIN) 723 WRAP_FLOATCONV(int32_t, float64_to_int32_round_to_zero, float64, INT32_MIN) 724 WRAP_FLOATCONV(int64_t, float32_to_int64, float32, INT64_MIN) 725 WRAP_FLOATCONV(int64_t, float32_to_int64_round_to_zero, float32, INT64_MIN) 726 WRAP_FLOATCONV(int64_t, float64_to_int64, float64, INT64_MIN) 727 WRAP_FLOATCONV(int64_t, float64_to_int64_round_to_zero, float64, INT64_MIN) 728 #endif 729 730 void glue(helper_cvtps2dq, SUFFIX)(CPUX86State *env, ZMMReg *d, ZMMReg *s) 731 { 732 int i; 733 for (i = 0; i < 2 << SHIFT; i++) { 734 d->ZMM_L(i) = x86_float32_to_int32(s->ZMM_S(i), &env->sse_status); 735 } 736 } 737 738 void glue(helper_cvtpd2dq, SUFFIX)(CPUX86State *env, ZMMReg *d, ZMMReg *s) 739 { 740 int i; 741 for (i = 0; i < 1 << SHIFT; i++) { 742 d->ZMM_L(i) = x86_float64_to_int32(s->ZMM_D(i), &env->sse_status); 743 } 744 for (i >>= 1; i < 1 << SHIFT; i++) { 745 d->Q(i) = 0; 746 } 747 } 748 749 #if SHIFT == 1 750 void helper_cvtps2pi(CPUX86State *env, MMXReg *d, ZMMReg *s) 751 { 752 d->MMX_L(0) = x86_float32_to_int32(s->ZMM_S(0), &env->sse_status); 753 d->MMX_L(1) = x86_float32_to_int32(s->ZMM_S(1), &env->sse_status); 754 } 755 756 void helper_cvtpd2pi(CPUX86State *env, MMXReg *d, ZMMReg *s) 757 { 758 d->MMX_L(0) = x86_float64_to_int32(s->ZMM_D(0), &env->sse_status); 759 d->MMX_L(1) = x86_float64_to_int32(s->ZMM_D(1), &env->sse_status); 760 } 761 762 int32_t helper_cvtss2si(CPUX86State *env, ZMMReg *s) 763 { 764 return x86_float32_to_int32(s->ZMM_S(0), &env->sse_status); 765 } 766 767 int32_t helper_cvtsd2si(CPUX86State *env, ZMMReg *s) 768 { 769 return x86_float64_to_int32(s->ZMM_D(0), &env->sse_status); 770 } 771 772 #ifdef TARGET_X86_64 773 int64_t helper_cvtss2sq(CPUX86State *env, ZMMReg *s) 774 { 775 return x86_float32_to_int64(s->ZMM_S(0), &env->sse_status); 776 } 777 778 int64_t helper_cvtsd2sq(CPUX86State *env, ZMMReg *s) 779 { 780 return x86_float64_to_int64(s->ZMM_D(0), &env->sse_status); 781 } 782 #endif 783 #endif 784 785 /* float to integer truncated */ 786 void glue(helper_cvttps2dq, SUFFIX)(CPUX86State *env, ZMMReg *d, ZMMReg *s) 787 { 788 int i; 789 for (i = 0; i < 2 << SHIFT; i++) { 790 d->ZMM_L(i) = x86_float32_to_int32_round_to_zero(s->ZMM_S(i), 791 &env->sse_status); 792 } 793 } 794 795 void glue(helper_cvttpd2dq, SUFFIX)(CPUX86State *env, ZMMReg *d, ZMMReg *s) 796 { 797 int i; 798 for (i = 0; i < 1 << SHIFT; i++) { 799 d->ZMM_L(i) = x86_float64_to_int32_round_to_zero(s->ZMM_D(i), 800 &env->sse_status); 801 } 802 for (i >>= 1; i < 1 << SHIFT; i++) { 803 d->Q(i) = 0; 804 } 805 } 806 807 #if SHIFT == 1 808 void helper_cvttps2pi(CPUX86State *env, MMXReg *d, ZMMReg *s) 809 { 810 d->MMX_L(0) = x86_float32_to_int32_round_to_zero(s->ZMM_S(0), &env->sse_status); 811 d->MMX_L(1) = x86_float32_to_int32_round_to_zero(s->ZMM_S(1), &env->sse_status); 812 } 813 814 void helper_cvttpd2pi(CPUX86State *env, MMXReg *d, ZMMReg *s) 815 { 816 d->MMX_L(0) = x86_float64_to_int32_round_to_zero(s->ZMM_D(0), &env->sse_status); 817 d->MMX_L(1) = x86_float64_to_int32_round_to_zero(s->ZMM_D(1), &env->sse_status); 818 } 819 820 int32_t helper_cvttss2si(CPUX86State *env, ZMMReg *s) 821 { 822 return x86_float32_to_int32_round_to_zero(s->ZMM_S(0), &env->sse_status); 823 } 824 825 int32_t helper_cvttsd2si(CPUX86State *env, ZMMReg *s) 826 { 827 return x86_float64_to_int32_round_to_zero(s->ZMM_D(0), &env->sse_status); 828 } 829 830 #ifdef TARGET_X86_64 831 int64_t helper_cvttss2sq(CPUX86State *env, ZMMReg *s) 832 { 833 return x86_float32_to_int64_round_to_zero(s->ZMM_S(0), &env->sse_status); 834 } 835 836 int64_t helper_cvttsd2sq(CPUX86State *env, ZMMReg *s) 837 { 838 return x86_float64_to_int64_round_to_zero(s->ZMM_D(0), &env->sse_status); 839 } 840 #endif 841 #endif 842 843 void glue(helper_rsqrtps, SUFFIX)(CPUX86State *env, ZMMReg *d, ZMMReg *s) 844 { 845 uint8_t old_flags = get_float_exception_flags(&env->sse_status); 846 int i; 847 for (i = 0; i < 2 << SHIFT; i++) { 848 d->ZMM_S(i) = float32_div(float32_one, 849 float32_sqrt(s->ZMM_S(i), &env->sse_status), 850 &env->sse_status); 851 } 852 set_float_exception_flags(old_flags, &env->sse_status); 853 } 854 855 #if SHIFT == 1 856 void helper_rsqrtss(CPUX86State *env, ZMMReg *d, ZMMReg *v, ZMMReg *s) 857 { 858 uint8_t old_flags = get_float_exception_flags(&env->sse_status); 859 int i; 860 d->ZMM_S(0) = float32_div(float32_one, 861 float32_sqrt(s->ZMM_S(0), &env->sse_status), 862 &env->sse_status); 863 set_float_exception_flags(old_flags, &env->sse_status); 864 for (i = 1; i < 2 << SHIFT; i++) { 865 d->ZMM_L(i) = v->ZMM_L(i); 866 } 867 } 868 #endif 869 870 void glue(helper_rcpps, SUFFIX)(CPUX86State *env, ZMMReg *d, ZMMReg *s) 871 { 872 uint8_t old_flags = get_float_exception_flags(&env->sse_status); 873 int i; 874 for (i = 0; i < 2 << SHIFT; i++) { 875 d->ZMM_S(i) = float32_div(float32_one, s->ZMM_S(i), &env->sse_status); 876 } 877 set_float_exception_flags(old_flags, &env->sse_status); 878 } 879 880 #if SHIFT == 1 881 void helper_rcpss(CPUX86State *env, ZMMReg *d, ZMMReg *v, ZMMReg *s) 882 { 883 uint8_t old_flags = get_float_exception_flags(&env->sse_status); 884 int i; 885 d->ZMM_S(0) = float32_div(float32_one, s->ZMM_S(0), &env->sse_status); 886 for (i = 1; i < 2 << SHIFT; i++) { 887 d->ZMM_L(i) = v->ZMM_L(i); 888 } 889 set_float_exception_flags(old_flags, &env->sse_status); 890 } 891 #endif 892 893 #if SHIFT == 1 894 static inline uint64_t helper_extrq(uint64_t src, int shift, int len) 895 { 896 uint64_t mask; 897 898 if (len == 0) { 899 mask = ~0LL; 900 } else { 901 mask = (1ULL << len) - 1; 902 } 903 return (src >> shift) & mask; 904 } 905 906 void helper_extrq_r(CPUX86State *env, ZMMReg *d, ZMMReg *s) 907 { 908 d->ZMM_Q(0) = helper_extrq(d->ZMM_Q(0), s->ZMM_B(1) & 63, s->ZMM_B(0) & 63); 909 } 910 911 void helper_extrq_i(CPUX86State *env, ZMMReg *d, int index, int length) 912 { 913 d->ZMM_Q(0) = helper_extrq(d->ZMM_Q(0), index, length); 914 } 915 916 static inline uint64_t helper_insertq(uint64_t dest, uint64_t src, int shift, int len) 917 { 918 uint64_t mask; 919 920 if (len == 0) { 921 mask = ~0ULL; 922 } else { 923 mask = (1ULL << len) - 1; 924 } 925 return (dest & ~(mask << shift)) | ((src & mask) << shift); 926 } 927 928 void helper_insertq_r(CPUX86State *env, ZMMReg *d, ZMMReg *s) 929 { 930 d->ZMM_Q(0) = helper_insertq(d->ZMM_Q(0), s->ZMM_Q(0), s->ZMM_B(9) & 63, s->ZMM_B(8) & 63); 931 } 932 933 void helper_insertq_i(CPUX86State *env, ZMMReg *d, ZMMReg *s, int index, int length) 934 { 935 d->ZMM_Q(0) = helper_insertq(d->ZMM_Q(0), s->ZMM_Q(0), index, length); 936 } 937 #endif 938 939 #define SSE_HELPER_HPS(name, F) \ 940 void glue(helper_ ## name, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) \ 941 { \ 942 float32 r[2 << SHIFT]; \ 943 int i, j, k; \ 944 for (k = 0; k < 2 << SHIFT; k += LANE_WIDTH / 4) { \ 945 for (i = j = 0; j < 4; i++, j += 2) { \ 946 r[i + k] = F(v->ZMM_S(j + k), v->ZMM_S(j + k + 1), &env->sse_status); \ 947 } \ 948 for (j = 0; j < 4; i++, j += 2) { \ 949 r[i + k] = F(s->ZMM_S(j + k), s->ZMM_S(j + k + 1), &env->sse_status); \ 950 } \ 951 } \ 952 for (i = 0; i < 2 << SHIFT; i++) { \ 953 d->ZMM_S(i) = r[i]; \ 954 } \ 955 } 956 957 SSE_HELPER_HPS(haddps, float32_add) 958 SSE_HELPER_HPS(hsubps, float32_sub) 959 960 #define SSE_HELPER_HPD(name, F) \ 961 void glue(helper_ ## name, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) \ 962 { \ 963 float64 r[1 << SHIFT]; \ 964 int i, j, k; \ 965 for (k = 0; k < 1 << SHIFT; k += LANE_WIDTH / 8) { \ 966 for (i = j = 0; j < 2; i++, j += 2) { \ 967 r[i + k] = F(v->ZMM_D(j + k), v->ZMM_D(j + k + 1), &env->sse_status); \ 968 } \ 969 for (j = 0; j < 2; i++, j += 2) { \ 970 r[i + k] = F(s->ZMM_D(j + k), s->ZMM_D(j + k + 1), &env->sse_status); \ 971 } \ 972 } \ 973 for (i = 0; i < 1 << SHIFT; i++) { \ 974 d->ZMM_D(i) = r[i]; \ 975 } \ 976 } 977 978 SSE_HELPER_HPD(haddpd, float64_add) 979 SSE_HELPER_HPD(hsubpd, float64_sub) 980 981 void glue(helper_addsubps, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) 982 { 983 int i; 984 for (i = 0; i < 2 << SHIFT; i += 2) { 985 d->ZMM_S(i) = float32_sub(v->ZMM_S(i), s->ZMM_S(i), &env->sse_status); 986 d->ZMM_S(i+1) = float32_add(v->ZMM_S(i+1), s->ZMM_S(i+1), &env->sse_status); 987 } 988 } 989 990 void glue(helper_addsubpd, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) 991 { 992 int i; 993 for (i = 0; i < 1 << SHIFT; i += 2) { 994 d->ZMM_D(i) = float64_sub(v->ZMM_D(i), s->ZMM_D(i), &env->sse_status); 995 d->ZMM_D(i+1) = float64_add(v->ZMM_D(i+1), s->ZMM_D(i+1), &env->sse_status); 996 } 997 } 998 999 #define SSE_HELPER_CMP_P(name, F, C) \ 1000 void glue(helper_ ## name ## ps, SUFFIX)(CPUX86State *env, \ 1001 Reg *d, Reg *v, Reg *s) \ 1002 { \ 1003 int i; \ 1004 for (i = 0; i < 2 << SHIFT; i++) { \ 1005 d->ZMM_L(i) = C(F(32, v->ZMM_S(i), s->ZMM_S(i))) ? -1 : 0; \ 1006 } \ 1007 } \ 1008 \ 1009 void glue(helper_ ## name ## pd, SUFFIX)(CPUX86State *env, \ 1010 Reg *d, Reg *v, Reg *s) \ 1011 { \ 1012 int i; \ 1013 for (i = 0; i < 1 << SHIFT; i++) { \ 1014 d->ZMM_Q(i) = C(F(64, v->ZMM_D(i), s->ZMM_D(i))) ? -1 : 0; \ 1015 } \ 1016 } 1017 1018 #if SHIFT == 1 1019 #define SSE_HELPER_CMP(name, F, C) \ 1020 SSE_HELPER_CMP_P(name, F, C) \ 1021 void helper_ ## name ## ss(CPUX86State *env, Reg *d, Reg *v, Reg *s) \ 1022 { \ 1023 int i; \ 1024 d->ZMM_L(0) = C(F(32, v->ZMM_S(0), s->ZMM_S(0))) ? -1 : 0; \ 1025 for (i = 1; i < 2 << SHIFT; i++) { \ 1026 d->ZMM_L(i) = v->ZMM_L(i); \ 1027 } \ 1028 } \ 1029 \ 1030 void helper_ ## name ## sd(CPUX86State *env, Reg *d, Reg *v, Reg *s) \ 1031 { \ 1032 int i; \ 1033 d->ZMM_Q(0) = C(F(64, v->ZMM_D(0), s->ZMM_D(0))) ? -1 : 0; \ 1034 for (i = 1; i < 1 << SHIFT; i++) { \ 1035 d->ZMM_Q(i) = v->ZMM_Q(i); \ 1036 } \ 1037 } 1038 1039 static inline bool FPU_EQU(FloatRelation x) 1040 { 1041 return (x == float_relation_equal || x == float_relation_unordered); 1042 } 1043 static inline bool FPU_GE(FloatRelation x) 1044 { 1045 return (x == float_relation_equal || x == float_relation_greater); 1046 } 1047 #define FPU_EQ(x) (x == float_relation_equal) 1048 #define FPU_LT(x) (x == float_relation_less) 1049 #define FPU_LE(x) (x <= float_relation_equal) 1050 #define FPU_GT(x) (x == float_relation_greater) 1051 #define FPU_UNORD(x) (x == float_relation_unordered) 1052 /* We must make sure we evaluate the argument in case it is a signalling NAN */ 1053 #define FPU_FALSE(x) (x == float_relation_equal && 0) 1054 1055 #define FPU_CMPQ(size, a, b) \ 1056 float ## size ## _compare_quiet(a, b, &env->sse_status) 1057 #define FPU_CMPS(size, a, b) \ 1058 float ## size ## _compare(a, b, &env->sse_status) 1059 1060 #else 1061 #define SSE_HELPER_CMP(name, F, C) SSE_HELPER_CMP_P(name, F, C) 1062 #endif 1063 1064 SSE_HELPER_CMP(cmpeq, FPU_CMPQ, FPU_EQ) 1065 SSE_HELPER_CMP(cmplt, FPU_CMPS, FPU_LT) 1066 SSE_HELPER_CMP(cmple, FPU_CMPS, FPU_LE) 1067 SSE_HELPER_CMP(cmpunord, FPU_CMPQ, FPU_UNORD) 1068 SSE_HELPER_CMP(cmpneq, FPU_CMPQ, !FPU_EQ) 1069 SSE_HELPER_CMP(cmpnlt, FPU_CMPS, !FPU_LT) 1070 SSE_HELPER_CMP(cmpnle, FPU_CMPS, !FPU_LE) 1071 SSE_HELPER_CMP(cmpord, FPU_CMPQ, !FPU_UNORD) 1072 1073 SSE_HELPER_CMP(cmpequ, FPU_CMPQ, FPU_EQU) 1074 SSE_HELPER_CMP(cmpnge, FPU_CMPS, !FPU_GE) 1075 SSE_HELPER_CMP(cmpngt, FPU_CMPS, !FPU_GT) 1076 SSE_HELPER_CMP(cmpfalse, FPU_CMPQ, FPU_FALSE) 1077 SSE_HELPER_CMP(cmpnequ, FPU_CMPQ, !FPU_EQU) 1078 SSE_HELPER_CMP(cmpge, FPU_CMPS, FPU_GE) 1079 SSE_HELPER_CMP(cmpgt, FPU_CMPS, FPU_GT) 1080 SSE_HELPER_CMP(cmptrue, FPU_CMPQ, !FPU_FALSE) 1081 1082 SSE_HELPER_CMP(cmpeqs, FPU_CMPS, FPU_EQ) 1083 SSE_HELPER_CMP(cmpltq, FPU_CMPQ, FPU_LT) 1084 SSE_HELPER_CMP(cmpleq, FPU_CMPQ, FPU_LE) 1085 SSE_HELPER_CMP(cmpunords, FPU_CMPS, FPU_UNORD) 1086 SSE_HELPER_CMP(cmpneqq, FPU_CMPS, !FPU_EQ) 1087 SSE_HELPER_CMP(cmpnltq, FPU_CMPQ, !FPU_LT) 1088 SSE_HELPER_CMP(cmpnleq, FPU_CMPQ, !FPU_LE) 1089 SSE_HELPER_CMP(cmpords, FPU_CMPS, !FPU_UNORD) 1090 1091 SSE_HELPER_CMP(cmpequs, FPU_CMPS, FPU_EQU) 1092 SSE_HELPER_CMP(cmpngeq, FPU_CMPQ, !FPU_GE) 1093 SSE_HELPER_CMP(cmpngtq, FPU_CMPQ, !FPU_GT) 1094 SSE_HELPER_CMP(cmpfalses, FPU_CMPS, FPU_FALSE) 1095 SSE_HELPER_CMP(cmpnequs, FPU_CMPS, !FPU_EQU) 1096 SSE_HELPER_CMP(cmpgeq, FPU_CMPQ, FPU_GE) 1097 SSE_HELPER_CMP(cmpgtq, FPU_CMPQ, FPU_GT) 1098 SSE_HELPER_CMP(cmptrues, FPU_CMPS, !FPU_FALSE) 1099 1100 #undef SSE_HELPER_CMP 1101 1102 #if SHIFT == 1 1103 static const int comis_eflags[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C}; 1104 1105 void helper_ucomiss(CPUX86State *env, Reg *d, Reg *s) 1106 { 1107 FloatRelation ret; 1108 float32 s0, s1; 1109 1110 s0 = d->ZMM_S(0); 1111 s1 = s->ZMM_S(0); 1112 ret = float32_compare_quiet(s0, s1, &env->sse_status); 1113 CC_SRC = comis_eflags[ret + 1]; 1114 CC_OP = CC_OP_EFLAGS; 1115 } 1116 1117 void helper_comiss(CPUX86State *env, Reg *d, Reg *s) 1118 { 1119 FloatRelation ret; 1120 float32 s0, s1; 1121 1122 s0 = d->ZMM_S(0); 1123 s1 = s->ZMM_S(0); 1124 ret = float32_compare(s0, s1, &env->sse_status); 1125 CC_SRC = comis_eflags[ret + 1]; 1126 CC_OP = CC_OP_EFLAGS; 1127 } 1128 1129 void helper_ucomisd(CPUX86State *env, Reg *d, Reg *s) 1130 { 1131 FloatRelation ret; 1132 float64 d0, d1; 1133 1134 d0 = d->ZMM_D(0); 1135 d1 = s->ZMM_D(0); 1136 ret = float64_compare_quiet(d0, d1, &env->sse_status); 1137 CC_SRC = comis_eflags[ret + 1]; 1138 CC_OP = CC_OP_EFLAGS; 1139 } 1140 1141 void helper_comisd(CPUX86State *env, Reg *d, Reg *s) 1142 { 1143 FloatRelation ret; 1144 float64 d0, d1; 1145 1146 d0 = d->ZMM_D(0); 1147 d1 = s->ZMM_D(0); 1148 ret = float64_compare(d0, d1, &env->sse_status); 1149 CC_SRC = comis_eflags[ret + 1]; 1150 CC_OP = CC_OP_EFLAGS; 1151 } 1152 #endif 1153 1154 uint32_t glue(helper_movmskps, SUFFIX)(CPUX86State *env, Reg *s) 1155 { 1156 uint32_t mask; 1157 int i; 1158 1159 mask = 0; 1160 for (i = 0; i < 2 << SHIFT; i++) { 1161 mask |= (s->ZMM_L(i) >> (31 - i)) & (1 << i); 1162 } 1163 return mask; 1164 } 1165 1166 uint32_t glue(helper_movmskpd, SUFFIX)(CPUX86State *env, Reg *s) 1167 { 1168 uint32_t mask; 1169 int i; 1170 1171 mask = 0; 1172 for (i = 0; i < 1 << SHIFT; i++) { 1173 mask |= (s->ZMM_Q(i) >> (63 - i)) & (1 << i); 1174 } 1175 return mask; 1176 } 1177 1178 #endif 1179 1180 #define PACK_HELPER_B(name, F) \ 1181 void glue(helper_pack ## name, SUFFIX)(CPUX86State *env, \ 1182 Reg *d, Reg *v, Reg *s) \ 1183 { \ 1184 uint8_t r[PACK_WIDTH * 2]; \ 1185 int j, k; \ 1186 for (j = 0; j < 4 << SHIFT; j += PACK_WIDTH) { \ 1187 for (k = 0; k < PACK_WIDTH; k++) { \ 1188 r[k] = F((int16_t)v->W(j + k)); \ 1189 } \ 1190 for (k = 0; k < PACK_WIDTH; k++) { \ 1191 r[PACK_WIDTH + k] = F((int16_t)s->W(j + k)); \ 1192 } \ 1193 for (k = 0; k < PACK_WIDTH * 2; k++) { \ 1194 d->B(2 * j + k) = r[k]; \ 1195 } \ 1196 } \ 1197 } 1198 1199 PACK_HELPER_B(sswb, satsb) 1200 PACK_HELPER_B(uswb, satub) 1201 1202 void glue(helper_packssdw, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) 1203 { 1204 uint16_t r[PACK_WIDTH]; 1205 int j, k; 1206 1207 for (j = 0; j < 2 << SHIFT; j += PACK_WIDTH / 2) { 1208 for (k = 0; k < PACK_WIDTH / 2; k++) { 1209 r[k] = satsw(v->L(j + k)); 1210 } 1211 for (k = 0; k < PACK_WIDTH / 2; k++) { 1212 r[PACK_WIDTH / 2 + k] = satsw(s->L(j + k)); 1213 } 1214 for (k = 0; k < PACK_WIDTH; k++) { 1215 d->W(2 * j + k) = r[k]; 1216 } 1217 } 1218 } 1219 1220 #define UNPCK_OP(base_name, base) \ 1221 \ 1222 void glue(helper_punpck ## base_name ## bw, SUFFIX)(CPUX86State *env,\ 1223 Reg *d, Reg *v, Reg *s) \ 1224 { \ 1225 uint8_t r[PACK_WIDTH * 2]; \ 1226 int j, i; \ 1227 \ 1228 for (j = 0; j < 8 << SHIFT; ) { \ 1229 int k = j + base * PACK_WIDTH; \ 1230 for (i = 0; i < PACK_WIDTH; i++) { \ 1231 r[2 * i] = v->B(k + i); \ 1232 r[2 * i + 1] = s->B(k + i); \ 1233 } \ 1234 for (i = 0; i < PACK_WIDTH * 2; i++, j++) { \ 1235 d->B(j) = r[i]; \ 1236 } \ 1237 } \ 1238 } \ 1239 \ 1240 void glue(helper_punpck ## base_name ## wd, SUFFIX)(CPUX86State *env,\ 1241 Reg *d, Reg *v, Reg *s) \ 1242 { \ 1243 uint16_t r[PACK_WIDTH]; \ 1244 int j, i; \ 1245 \ 1246 for (j = 0; j < 4 << SHIFT; ) { \ 1247 int k = j + base * PACK_WIDTH / 2; \ 1248 for (i = 0; i < PACK_WIDTH / 2; i++) { \ 1249 r[2 * i] = v->W(k + i); \ 1250 r[2 * i + 1] = s->W(k + i); \ 1251 } \ 1252 for (i = 0; i < PACK_WIDTH; i++, j++) { \ 1253 d->W(j) = r[i]; \ 1254 } \ 1255 } \ 1256 } \ 1257 \ 1258 void glue(helper_punpck ## base_name ## dq, SUFFIX)(CPUX86State *env,\ 1259 Reg *d, Reg *v, Reg *s) \ 1260 { \ 1261 uint32_t r[PACK_WIDTH / 2]; \ 1262 int j, i; \ 1263 \ 1264 for (j = 0; j < 2 << SHIFT; ) { \ 1265 int k = j + base * PACK_WIDTH / 4; \ 1266 for (i = 0; i < PACK_WIDTH / 4; i++) { \ 1267 r[2 * i] = v->L(k + i); \ 1268 r[2 * i + 1] = s->L(k + i); \ 1269 } \ 1270 for (i = 0; i < PACK_WIDTH / 2; i++, j++) { \ 1271 d->L(j) = r[i]; \ 1272 } \ 1273 } \ 1274 } \ 1275 \ 1276 XMM_ONLY( \ 1277 void glue(helper_punpck ## base_name ## qdq, SUFFIX)( \ 1278 CPUX86State *env, Reg *d, Reg *v, Reg *s) \ 1279 { \ 1280 uint64_t r[2]; \ 1281 int i; \ 1282 \ 1283 for (i = 0; i < 1 << SHIFT; i += 2) { \ 1284 r[0] = v->Q(base + i); \ 1285 r[1] = s->Q(base + i); \ 1286 d->Q(i) = r[0]; \ 1287 d->Q(i + 1) = r[1]; \ 1288 } \ 1289 } \ 1290 ) 1291 1292 UNPCK_OP(l, 0) 1293 UNPCK_OP(h, 1) 1294 1295 #undef PACK_WIDTH 1296 #undef PACK_HELPER_B 1297 #undef UNPCK_OP 1298 1299 1300 /* 3DNow! float ops */ 1301 #if SHIFT == 0 1302 void helper_pi2fd(CPUX86State *env, MMXReg *d, MMXReg *s) 1303 { 1304 d->MMX_S(0) = int32_to_float32(s->MMX_L(0), &env->mmx_status); 1305 d->MMX_S(1) = int32_to_float32(s->MMX_L(1), &env->mmx_status); 1306 } 1307 1308 void helper_pi2fw(CPUX86State *env, MMXReg *d, MMXReg *s) 1309 { 1310 d->MMX_S(0) = int32_to_float32((int16_t)s->MMX_W(0), &env->mmx_status); 1311 d->MMX_S(1) = int32_to_float32((int16_t)s->MMX_W(2), &env->mmx_status); 1312 } 1313 1314 void helper_pf2id(CPUX86State *env, MMXReg *d, MMXReg *s) 1315 { 1316 d->MMX_L(0) = float32_to_int32_round_to_zero(s->MMX_S(0), &env->mmx_status); 1317 d->MMX_L(1) = float32_to_int32_round_to_zero(s->MMX_S(1), &env->mmx_status); 1318 } 1319 1320 void helper_pf2iw(CPUX86State *env, MMXReg *d, MMXReg *s) 1321 { 1322 d->MMX_L(0) = satsw(float32_to_int32_round_to_zero(s->MMX_S(0), 1323 &env->mmx_status)); 1324 d->MMX_L(1) = satsw(float32_to_int32_round_to_zero(s->MMX_S(1), 1325 &env->mmx_status)); 1326 } 1327 1328 void helper_pfacc(CPUX86State *env, MMXReg *d, MMXReg *s) 1329 { 1330 float32 r; 1331 1332 r = float32_add(d->MMX_S(0), d->MMX_S(1), &env->mmx_status); 1333 d->MMX_S(1) = float32_add(s->MMX_S(0), s->MMX_S(1), &env->mmx_status); 1334 d->MMX_S(0) = r; 1335 } 1336 1337 void helper_pfadd(CPUX86State *env, MMXReg *d, MMXReg *s) 1338 { 1339 d->MMX_S(0) = float32_add(d->MMX_S(0), s->MMX_S(0), &env->mmx_status); 1340 d->MMX_S(1) = float32_add(d->MMX_S(1), s->MMX_S(1), &env->mmx_status); 1341 } 1342 1343 void helper_pfcmpeq(CPUX86State *env, MMXReg *d, MMXReg *s) 1344 { 1345 d->MMX_L(0) = float32_eq_quiet(d->MMX_S(0), s->MMX_S(0), 1346 &env->mmx_status) ? -1 : 0; 1347 d->MMX_L(1) = float32_eq_quiet(d->MMX_S(1), s->MMX_S(1), 1348 &env->mmx_status) ? -1 : 0; 1349 } 1350 1351 void helper_pfcmpge(CPUX86State *env, MMXReg *d, MMXReg *s) 1352 { 1353 d->MMX_L(0) = float32_le(s->MMX_S(0), d->MMX_S(0), 1354 &env->mmx_status) ? -1 : 0; 1355 d->MMX_L(1) = float32_le(s->MMX_S(1), d->MMX_S(1), 1356 &env->mmx_status) ? -1 : 0; 1357 } 1358 1359 void helper_pfcmpgt(CPUX86State *env, MMXReg *d, MMXReg *s) 1360 { 1361 d->MMX_L(0) = float32_lt(s->MMX_S(0), d->MMX_S(0), 1362 &env->mmx_status) ? -1 : 0; 1363 d->MMX_L(1) = float32_lt(s->MMX_S(1), d->MMX_S(1), 1364 &env->mmx_status) ? -1 : 0; 1365 } 1366 1367 void helper_pfmax(CPUX86State *env, MMXReg *d, MMXReg *s) 1368 { 1369 if (float32_lt(d->MMX_S(0), s->MMX_S(0), &env->mmx_status)) { 1370 d->MMX_S(0) = s->MMX_S(0); 1371 } 1372 if (float32_lt(d->MMX_S(1), s->MMX_S(1), &env->mmx_status)) { 1373 d->MMX_S(1) = s->MMX_S(1); 1374 } 1375 } 1376 1377 void helper_pfmin(CPUX86State *env, MMXReg *d, MMXReg *s) 1378 { 1379 if (float32_lt(s->MMX_S(0), d->MMX_S(0), &env->mmx_status)) { 1380 d->MMX_S(0) = s->MMX_S(0); 1381 } 1382 if (float32_lt(s->MMX_S(1), d->MMX_S(1), &env->mmx_status)) { 1383 d->MMX_S(1) = s->MMX_S(1); 1384 } 1385 } 1386 1387 void helper_pfmul(CPUX86State *env, MMXReg *d, MMXReg *s) 1388 { 1389 d->MMX_S(0) = float32_mul(d->MMX_S(0), s->MMX_S(0), &env->mmx_status); 1390 d->MMX_S(1) = float32_mul(d->MMX_S(1), s->MMX_S(1), &env->mmx_status); 1391 } 1392 1393 void helper_pfnacc(CPUX86State *env, MMXReg *d, MMXReg *s) 1394 { 1395 float32 r; 1396 1397 r = float32_sub(d->MMX_S(0), d->MMX_S(1), &env->mmx_status); 1398 d->MMX_S(1) = float32_sub(s->MMX_S(0), s->MMX_S(1), &env->mmx_status); 1399 d->MMX_S(0) = r; 1400 } 1401 1402 void helper_pfpnacc(CPUX86State *env, MMXReg *d, MMXReg *s) 1403 { 1404 float32 r; 1405 1406 r = float32_sub(d->MMX_S(0), d->MMX_S(1), &env->mmx_status); 1407 d->MMX_S(1) = float32_add(s->MMX_S(0), s->MMX_S(1), &env->mmx_status); 1408 d->MMX_S(0) = r; 1409 } 1410 1411 void helper_pfrcp(CPUX86State *env, MMXReg *d, MMXReg *s) 1412 { 1413 d->MMX_S(0) = float32_div(float32_one, s->MMX_S(0), &env->mmx_status); 1414 d->MMX_S(1) = d->MMX_S(0); 1415 } 1416 1417 void helper_pfrsqrt(CPUX86State *env, MMXReg *d, MMXReg *s) 1418 { 1419 d->MMX_L(1) = s->MMX_L(0) & 0x7fffffff; 1420 d->MMX_S(1) = float32_div(float32_one, 1421 float32_sqrt(d->MMX_S(1), &env->mmx_status), 1422 &env->mmx_status); 1423 d->MMX_L(1) |= s->MMX_L(0) & 0x80000000; 1424 d->MMX_L(0) = d->MMX_L(1); 1425 } 1426 1427 void helper_pfsub(CPUX86State *env, MMXReg *d, MMXReg *s) 1428 { 1429 d->MMX_S(0) = float32_sub(d->MMX_S(0), s->MMX_S(0), &env->mmx_status); 1430 d->MMX_S(1) = float32_sub(d->MMX_S(1), s->MMX_S(1), &env->mmx_status); 1431 } 1432 1433 void helper_pfsubr(CPUX86State *env, MMXReg *d, MMXReg *s) 1434 { 1435 d->MMX_S(0) = float32_sub(s->MMX_S(0), d->MMX_S(0), &env->mmx_status); 1436 d->MMX_S(1) = float32_sub(s->MMX_S(1), d->MMX_S(1), &env->mmx_status); 1437 } 1438 1439 void helper_pswapd(CPUX86State *env, MMXReg *d, MMXReg *s) 1440 { 1441 uint32_t r; 1442 1443 r = s->MMX_L(0); 1444 d->MMX_L(0) = s->MMX_L(1); 1445 d->MMX_L(1) = r; 1446 } 1447 #endif 1448 1449 /* SSSE3 op helpers */ 1450 void glue(helper_pshufb, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) 1451 { 1452 int i; 1453 #if SHIFT == 0 1454 uint8_t r[8]; 1455 1456 for (i = 0; i < 8; i++) { 1457 r[i] = (s->B(i) & 0x80) ? 0 : (v->B(s->B(i) & 7)); 1458 } 1459 for (i = 0; i < 8; i++) { 1460 d->B(i) = r[i]; 1461 } 1462 #else 1463 uint8_t r[8 << SHIFT]; 1464 1465 for (i = 0; i < 8 << SHIFT; i++) { 1466 int j = i & ~0xf; 1467 r[i] = (s->B(i) & 0x80) ? 0 : v->B(j | (s->B(i) & 0xf)); 1468 } 1469 for (i = 0; i < 8 << SHIFT; i++) { 1470 d->B(i) = r[i]; 1471 } 1472 #endif 1473 } 1474 1475 #define SSE_HELPER_HW(name, F) \ 1476 void glue(helper_ ## name, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) \ 1477 { \ 1478 uint16_t r[4 << SHIFT]; \ 1479 int i, j, k; \ 1480 for (k = 0; k < 4 << SHIFT; k += LANE_WIDTH / 2) { \ 1481 for (i = j = 0; j < LANE_WIDTH / 2; i++, j += 2) { \ 1482 r[i + k] = F(v->W(j + k), v->W(j + k + 1)); \ 1483 } \ 1484 for (j = 0; j < LANE_WIDTH / 2; i++, j += 2) { \ 1485 r[i + k] = F(s->W(j + k), s->W(j + k + 1)); \ 1486 } \ 1487 } \ 1488 for (i = 0; i < 4 << SHIFT; i++) { \ 1489 d->W(i) = r[i]; \ 1490 } \ 1491 } 1492 1493 #define SSE_HELPER_HL(name, F) \ 1494 void glue(helper_ ## name, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) \ 1495 { \ 1496 uint32_t r[2 << SHIFT]; \ 1497 int i, j, k; \ 1498 for (k = 0; k < 2 << SHIFT; k += LANE_WIDTH / 4) { \ 1499 for (i = j = 0; j < LANE_WIDTH / 4; i++, j += 2) { \ 1500 r[i + k] = F(v->L(j + k), v->L(j + k + 1)); \ 1501 } \ 1502 for (j = 0; j < LANE_WIDTH / 4; i++, j += 2) { \ 1503 r[i + k] = F(s->L(j + k), s->L(j + k + 1)); \ 1504 } \ 1505 } \ 1506 for (i = 0; i < 2 << SHIFT; i++) { \ 1507 d->L(i) = r[i]; \ 1508 } \ 1509 } 1510 1511 SSE_HELPER_HW(phaddw, FADD) 1512 SSE_HELPER_HW(phsubw, FSUB) 1513 SSE_HELPER_HW(phaddsw, FADDSW) 1514 SSE_HELPER_HW(phsubsw, FSUBSW) 1515 SSE_HELPER_HL(phaddd, FADD) 1516 SSE_HELPER_HL(phsubd, FSUB) 1517 1518 #undef SSE_HELPER_HW 1519 #undef SSE_HELPER_HL 1520 1521 void glue(helper_pmaddubsw, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) 1522 { 1523 int i; 1524 for (i = 0; i < 4 << SHIFT; i++) { 1525 d->W(i) = satsw((int8_t)s->B(i * 2) * (uint8_t)v->B(i * 2) + 1526 (int8_t)s->B(i * 2 + 1) * (uint8_t)v->B(i * 2 + 1)); 1527 } 1528 } 1529 1530 #define FMULHRSW(d, s) (((int16_t) d * (int16_t)s + 0x4000) >> 15) 1531 SSE_HELPER_W(helper_pmulhrsw, FMULHRSW) 1532 1533 #define FSIGNB(d, s) (s <= INT8_MAX ? s ? d : 0 : -(int8_t)d) 1534 #define FSIGNW(d, s) (s <= INT16_MAX ? s ? d : 0 : -(int16_t)d) 1535 #define FSIGNL(d, s) (s <= INT32_MAX ? s ? d : 0 : -(int32_t)d) 1536 SSE_HELPER_B(helper_psignb, FSIGNB) 1537 SSE_HELPER_W(helper_psignw, FSIGNW) 1538 SSE_HELPER_L(helper_psignd, FSIGNL) 1539 1540 void glue(helper_palignr, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s, 1541 uint32_t imm) 1542 { 1543 int i; 1544 1545 /* XXX could be checked during translation */ 1546 if (imm >= (SHIFT ? 32 : 16)) { 1547 for (i = 0; i < (1 << SHIFT); i++) { 1548 d->Q(i) = 0; 1549 } 1550 } else { 1551 int shift = imm * 8; 1552 #define SHR(v, i) (i < 64 && i > -64 ? i > 0 ? v >> (i) : (v << -(i)) : 0) 1553 #if SHIFT == 0 1554 d->Q(0) = SHR(s->Q(0), shift - 0) | 1555 SHR(v->Q(0), shift - 64); 1556 #else 1557 for (i = 0; i < (1 << SHIFT); i += 2) { 1558 uint64_t r0, r1; 1559 1560 r0 = SHR(s->Q(i), shift - 0) | 1561 SHR(s->Q(i + 1), shift - 64) | 1562 SHR(v->Q(i), shift - 128) | 1563 SHR(v->Q(i + 1), shift - 192); 1564 r1 = SHR(s->Q(i), shift + 64) | 1565 SHR(s->Q(i + 1), shift - 0) | 1566 SHR(v->Q(i), shift - 64) | 1567 SHR(v->Q(i + 1), shift - 128); 1568 d->Q(i) = r0; 1569 d->Q(i + 1) = r1; 1570 } 1571 #endif 1572 #undef SHR 1573 } 1574 } 1575 1576 #if SHIFT >= 1 1577 1578 #define SSE_HELPER_V(name, elem, num, F) \ 1579 void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s, \ 1580 Reg *m) \ 1581 { \ 1582 int i; \ 1583 for (i = 0; i < num; i++) { \ 1584 d->elem(i) = F(v->elem(i), s->elem(i), m->elem(i)); \ 1585 } \ 1586 } 1587 1588 #define SSE_HELPER_I(name, elem, num, F) \ 1589 void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s, \ 1590 uint32_t imm) \ 1591 { \ 1592 int i; \ 1593 for (i = 0; i < num; i++) { \ 1594 int j = i & 7; \ 1595 d->elem(i) = F(v->elem(i), s->elem(i), (imm >> j) & 1); \ 1596 } \ 1597 } 1598 1599 /* SSE4.1 op helpers */ 1600 #define FBLENDVB(v, s, m) ((m & 0x80) ? s : v) 1601 #define FBLENDVPS(v, s, m) ((m & 0x80000000) ? s : v) 1602 #define FBLENDVPD(v, s, m) ((m & 0x8000000000000000LL) ? s : v) 1603 SSE_HELPER_V(helper_pblendvb, B, 8 << SHIFT, FBLENDVB) 1604 SSE_HELPER_V(helper_blendvps, L, 2 << SHIFT, FBLENDVPS) 1605 SSE_HELPER_V(helper_blendvpd, Q, 1 << SHIFT, FBLENDVPD) 1606 1607 void glue(helper_ptest, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) 1608 { 1609 uint64_t zf = 0, cf = 0; 1610 int i; 1611 1612 for (i = 0; i < 1 << SHIFT; i++) { 1613 zf |= (s->Q(i) & d->Q(i)); 1614 cf |= (s->Q(i) & ~d->Q(i)); 1615 } 1616 CC_SRC = (zf ? 0 : CC_Z) | (cf ? 0 : CC_C); 1617 CC_OP = CC_OP_EFLAGS; 1618 } 1619 1620 #define FMOVSLDUP(i) s->L((i) & ~1) 1621 #define FMOVSHDUP(i) s->L((i) | 1) 1622 #define FMOVDLDUP(i) s->Q((i) & ~1) 1623 1624 #define SSE_HELPER_F(name, elem, num, F) \ 1625 void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) \ 1626 { \ 1627 int n = num; \ 1628 for (int i = n; --i >= 0; ) { \ 1629 d->elem(i) = F(i); \ 1630 } \ 1631 } 1632 1633 #if SHIFT > 0 1634 SSE_HELPER_F(helper_pmovsxbw, W, 4 << SHIFT, (int8_t) s->B) 1635 SSE_HELPER_F(helper_pmovsxbd, L, 2 << SHIFT, (int8_t) s->B) 1636 SSE_HELPER_F(helper_pmovsxbq, Q, 1 << SHIFT, (int8_t) s->B) 1637 SSE_HELPER_F(helper_pmovsxwd, L, 2 << SHIFT, (int16_t) s->W) 1638 SSE_HELPER_F(helper_pmovsxwq, Q, 1 << SHIFT, (int16_t) s->W) 1639 SSE_HELPER_F(helper_pmovsxdq, Q, 1 << SHIFT, (int32_t) s->L) 1640 SSE_HELPER_F(helper_pmovzxbw, W, 4 << SHIFT, s->B) 1641 SSE_HELPER_F(helper_pmovzxbd, L, 2 << SHIFT, s->B) 1642 SSE_HELPER_F(helper_pmovzxbq, Q, 1 << SHIFT, s->B) 1643 SSE_HELPER_F(helper_pmovzxwd, L, 2 << SHIFT, s->W) 1644 SSE_HELPER_F(helper_pmovzxwq, Q, 1 << SHIFT, s->W) 1645 SSE_HELPER_F(helper_pmovzxdq, Q, 1 << SHIFT, s->L) 1646 SSE_HELPER_F(helper_pmovsldup, L, 2 << SHIFT, FMOVSLDUP) 1647 SSE_HELPER_F(helper_pmovshdup, L, 2 << SHIFT, FMOVSHDUP) 1648 SSE_HELPER_F(helper_pmovdldup, Q, 1 << SHIFT, FMOVDLDUP) 1649 #endif 1650 1651 void glue(helper_pmuldq, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) 1652 { 1653 int i; 1654 1655 for (i = 0; i < 1 << SHIFT; i++) { 1656 d->Q(i) = (int64_t)(int32_t) v->L(2 * i) * (int32_t) s->L(2 * i); 1657 } 1658 } 1659 1660 void glue(helper_packusdw, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) 1661 { 1662 uint16_t r[8]; 1663 int i, j, k; 1664 1665 for (i = 0, j = 0; i <= 2 << SHIFT; i += 8, j += 4) { 1666 r[0] = satuw(v->L(j)); 1667 r[1] = satuw(v->L(j + 1)); 1668 r[2] = satuw(v->L(j + 2)); 1669 r[3] = satuw(v->L(j + 3)); 1670 r[4] = satuw(s->L(j)); 1671 r[5] = satuw(s->L(j + 1)); 1672 r[6] = satuw(s->L(j + 2)); 1673 r[7] = satuw(s->L(j + 3)); 1674 for (k = 0; k < 8; k++) { 1675 d->W(i + k) = r[k]; 1676 } 1677 } 1678 } 1679 1680 #if SHIFT == 1 1681 void glue(helper_phminposuw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) 1682 { 1683 int idx = 0; 1684 1685 if (s->W(1) < s->W(idx)) { 1686 idx = 1; 1687 } 1688 if (s->W(2) < s->W(idx)) { 1689 idx = 2; 1690 } 1691 if (s->W(3) < s->W(idx)) { 1692 idx = 3; 1693 } 1694 if (s->W(4) < s->W(idx)) { 1695 idx = 4; 1696 } 1697 if (s->W(5) < s->W(idx)) { 1698 idx = 5; 1699 } 1700 if (s->W(6) < s->W(idx)) { 1701 idx = 6; 1702 } 1703 if (s->W(7) < s->W(idx)) { 1704 idx = 7; 1705 } 1706 1707 d->W(0) = s->W(idx); 1708 d->W(1) = idx; 1709 d->L(1) = 0; 1710 d->Q(1) = 0; 1711 } 1712 #endif 1713 1714 void glue(helper_roundps, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, 1715 uint32_t mode) 1716 { 1717 uint8_t old_flags = get_float_exception_flags(&env->sse_status); 1718 signed char prev_rounding_mode; 1719 int i; 1720 1721 prev_rounding_mode = env->sse_status.float_rounding_mode; 1722 if (!(mode & (1 << 2))) { 1723 set_x86_rounding_mode(mode & 3, &env->sse_status); 1724 } 1725 1726 for (i = 0; i < 2 << SHIFT; i++) { 1727 d->ZMM_S(i) = float32_round_to_int(s->ZMM_S(i), &env->sse_status); 1728 } 1729 1730 if (mode & (1 << 3) && !(old_flags & float_flag_inexact)) { 1731 set_float_exception_flags(get_float_exception_flags(&env->sse_status) & 1732 ~float_flag_inexact, 1733 &env->sse_status); 1734 } 1735 env->sse_status.float_rounding_mode = prev_rounding_mode; 1736 } 1737 1738 void glue(helper_roundpd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, 1739 uint32_t mode) 1740 { 1741 uint8_t old_flags = get_float_exception_flags(&env->sse_status); 1742 signed char prev_rounding_mode; 1743 int i; 1744 1745 prev_rounding_mode = env->sse_status.float_rounding_mode; 1746 if (!(mode & (1 << 2))) { 1747 set_x86_rounding_mode(mode & 3, &env->sse_status); 1748 } 1749 1750 for (i = 0; i < 1 << SHIFT; i++) { 1751 d->ZMM_D(i) = float64_round_to_int(s->ZMM_D(i), &env->sse_status); 1752 } 1753 1754 if (mode & (1 << 3) && !(old_flags & float_flag_inexact)) { 1755 set_float_exception_flags(get_float_exception_flags(&env->sse_status) & 1756 ~float_flag_inexact, 1757 &env->sse_status); 1758 } 1759 env->sse_status.float_rounding_mode = prev_rounding_mode; 1760 } 1761 1762 #if SHIFT == 1 1763 void glue(helper_roundss, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s, 1764 uint32_t mode) 1765 { 1766 uint8_t old_flags = get_float_exception_flags(&env->sse_status); 1767 signed char prev_rounding_mode; 1768 int i; 1769 1770 prev_rounding_mode = env->sse_status.float_rounding_mode; 1771 if (!(mode & (1 << 2))) { 1772 set_x86_rounding_mode(mode & 3, &env->sse_status); 1773 } 1774 1775 d->ZMM_S(0) = float32_round_to_int(s->ZMM_S(0), &env->sse_status); 1776 for (i = 1; i < 2 << SHIFT; i++) { 1777 d->ZMM_L(i) = v->ZMM_L(i); 1778 } 1779 1780 if (mode & (1 << 3) && !(old_flags & float_flag_inexact)) { 1781 set_float_exception_flags(get_float_exception_flags(&env->sse_status) & 1782 ~float_flag_inexact, 1783 &env->sse_status); 1784 } 1785 env->sse_status.float_rounding_mode = prev_rounding_mode; 1786 } 1787 1788 void glue(helper_roundsd, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s, 1789 uint32_t mode) 1790 { 1791 uint8_t old_flags = get_float_exception_flags(&env->sse_status); 1792 signed char prev_rounding_mode; 1793 int i; 1794 1795 prev_rounding_mode = env->sse_status.float_rounding_mode; 1796 if (!(mode & (1 << 2))) { 1797 set_x86_rounding_mode(mode & 3, &env->sse_status); 1798 } 1799 1800 d->ZMM_D(0) = float64_round_to_int(s->ZMM_D(0), &env->sse_status); 1801 for (i = 1; i < 1 << SHIFT; i++) { 1802 d->ZMM_Q(i) = v->ZMM_Q(i); 1803 } 1804 1805 if (mode & (1 << 3) && !(old_flags & float_flag_inexact)) { 1806 set_float_exception_flags(get_float_exception_flags(&env->sse_status) & 1807 ~float_flag_inexact, 1808 &env->sse_status); 1809 } 1810 env->sse_status.float_rounding_mode = prev_rounding_mode; 1811 } 1812 #endif 1813 1814 #define FBLENDP(v, s, m) (m ? s : v) 1815 SSE_HELPER_I(helper_blendps, L, 2 << SHIFT, FBLENDP) 1816 SSE_HELPER_I(helper_blendpd, Q, 1 << SHIFT, FBLENDP) 1817 SSE_HELPER_I(helper_pblendw, W, 4 << SHIFT, FBLENDP) 1818 1819 void glue(helper_dpps, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s, 1820 uint32_t mask) 1821 { 1822 float32 prod1, prod2, temp2, temp3, temp4; 1823 int i; 1824 1825 for (i = 0; i < 2 << SHIFT; i += 4) { 1826 /* 1827 * We must evaluate (A+B)+(C+D), not ((A+B)+C)+D 1828 * to correctly round the intermediate results 1829 */ 1830 if (mask & (1 << 4)) { 1831 prod1 = float32_mul(v->ZMM_S(i), s->ZMM_S(i), &env->sse_status); 1832 } else { 1833 prod1 = float32_zero; 1834 } 1835 if (mask & (1 << 5)) { 1836 prod2 = float32_mul(v->ZMM_S(i+1), s->ZMM_S(i+1), &env->sse_status); 1837 } else { 1838 prod2 = float32_zero; 1839 } 1840 temp2 = float32_add(prod1, prod2, &env->sse_status); 1841 if (mask & (1 << 6)) { 1842 prod1 = float32_mul(v->ZMM_S(i+2), s->ZMM_S(i+2), &env->sse_status); 1843 } else { 1844 prod1 = float32_zero; 1845 } 1846 if (mask & (1 << 7)) { 1847 prod2 = float32_mul(v->ZMM_S(i+3), s->ZMM_S(i+3), &env->sse_status); 1848 } else { 1849 prod2 = float32_zero; 1850 } 1851 temp3 = float32_add(prod1, prod2, &env->sse_status); 1852 temp4 = float32_add(temp2, temp3, &env->sse_status); 1853 1854 d->ZMM_S(i) = (mask & (1 << 0)) ? temp4 : float32_zero; 1855 d->ZMM_S(i+1) = (mask & (1 << 1)) ? temp4 : float32_zero; 1856 d->ZMM_S(i+2) = (mask & (1 << 2)) ? temp4 : float32_zero; 1857 d->ZMM_S(i+3) = (mask & (1 << 3)) ? temp4 : float32_zero; 1858 } 1859 } 1860 1861 #if SHIFT == 1 1862 /* Oddly, there is no ymm version of dppd */ 1863 void glue(helper_dppd, SUFFIX)(CPUX86State *env, 1864 Reg *d, Reg *v, Reg *s, uint32_t mask) 1865 { 1866 float64 prod1, prod2, temp2; 1867 1868 if (mask & (1 << 4)) { 1869 prod1 = float64_mul(v->ZMM_D(0), s->ZMM_D(0), &env->sse_status); 1870 } else { 1871 prod1 = float64_zero; 1872 } 1873 if (mask & (1 << 5)) { 1874 prod2 = float64_mul(v->ZMM_D(1), s->ZMM_D(1), &env->sse_status); 1875 } else { 1876 prod2 = float64_zero; 1877 } 1878 temp2 = float64_add(prod1, prod2, &env->sse_status); 1879 d->ZMM_D(0) = (mask & (1 << 0)) ? temp2 : float64_zero; 1880 d->ZMM_D(1) = (mask & (1 << 1)) ? temp2 : float64_zero; 1881 } 1882 #endif 1883 1884 void glue(helper_mpsadbw, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s, 1885 uint32_t offset) 1886 { 1887 int i, j; 1888 uint16_t r[8]; 1889 1890 for (j = 0; j < 4 << SHIFT; ) { 1891 int s0 = (j * 2) + ((offset & 3) << 2); 1892 int d0 = (j * 2) + ((offset & 4) << 0); 1893 for (i = 0; i < LANE_WIDTH / 2; i++, d0++) { 1894 r[i] = 0; 1895 r[i] += abs1(v->B(d0 + 0) - s->B(s0 + 0)); 1896 r[i] += abs1(v->B(d0 + 1) - s->B(s0 + 1)); 1897 r[i] += abs1(v->B(d0 + 2) - s->B(s0 + 2)); 1898 r[i] += abs1(v->B(d0 + 3) - s->B(s0 + 3)); 1899 } 1900 for (i = 0; i < LANE_WIDTH / 2; i++, j++) { 1901 d->W(j) = r[i]; 1902 } 1903 offset >>= 3; 1904 } 1905 } 1906 1907 /* SSE4.2 op helpers */ 1908 #if SHIFT == 1 1909 static inline int pcmp_elen(CPUX86State *env, int reg, uint32_t ctrl) 1910 { 1911 target_long val, limit; 1912 1913 /* Presence of REX.W is indicated by a bit higher than 7 set */ 1914 if (ctrl >> 8) { 1915 val = (target_long)env->regs[reg]; 1916 } else { 1917 val = (int32_t)env->regs[reg]; 1918 } 1919 if (ctrl & 1) { 1920 limit = 8; 1921 } else { 1922 limit = 16; 1923 } 1924 if ((val > limit) || (val < -limit)) { 1925 return limit; 1926 } 1927 return abs1(val); 1928 } 1929 1930 static inline int pcmp_ilen(Reg *r, uint8_t ctrl) 1931 { 1932 int val = 0; 1933 1934 if (ctrl & 1) { 1935 while (val < 8 && r->W(val)) { 1936 val++; 1937 } 1938 } else { 1939 while (val < 16 && r->B(val)) { 1940 val++; 1941 } 1942 } 1943 1944 return val; 1945 } 1946 1947 static inline int pcmp_val(Reg *r, uint8_t ctrl, int i) 1948 { 1949 switch ((ctrl >> 0) & 3) { 1950 case 0: 1951 return r->B(i); 1952 case 1: 1953 return r->W(i); 1954 case 2: 1955 return (int8_t)r->B(i); 1956 case 3: 1957 default: 1958 return (int16_t)r->W(i); 1959 } 1960 } 1961 1962 static inline unsigned pcmpxstrx(CPUX86State *env, Reg *d, Reg *s, 1963 uint8_t ctrl, int valids, int validd) 1964 { 1965 unsigned int res = 0; 1966 int v; 1967 int j, i; 1968 int upper = (ctrl & 1) ? 7 : 15; 1969 1970 valids--; 1971 validd--; 1972 1973 CC_SRC = (valids < upper ? CC_Z : 0) | (validd < upper ? CC_S : 0); 1974 CC_OP = CC_OP_EFLAGS; 1975 1976 switch ((ctrl >> 2) & 3) { 1977 case 0: 1978 for (j = valids; j >= 0; j--) { 1979 res <<= 1; 1980 v = pcmp_val(s, ctrl, j); 1981 for (i = validd; i >= 0; i--) { 1982 res |= (v == pcmp_val(d, ctrl, i)); 1983 } 1984 } 1985 break; 1986 case 1: 1987 for (j = valids; j >= 0; j--) { 1988 res <<= 1; 1989 v = pcmp_val(s, ctrl, j); 1990 for (i = ((validd - 1) | 1); i >= 0; i -= 2) { 1991 res |= (pcmp_val(d, ctrl, i - 0) >= v && 1992 pcmp_val(d, ctrl, i - 1) <= v); 1993 } 1994 } 1995 break; 1996 case 2: 1997 res = (1 << (upper - MAX(valids, validd))) - 1; 1998 res <<= MAX(valids, validd) - MIN(valids, validd); 1999 for (i = MIN(valids, validd); i >= 0; i--) { 2000 res <<= 1; 2001 v = pcmp_val(s, ctrl, i); 2002 res |= (v == pcmp_val(d, ctrl, i)); 2003 } 2004 break; 2005 case 3: 2006 if (validd == -1) { 2007 res = (2 << upper) - 1; 2008 break; 2009 } 2010 for (j = valids == upper ? valids : valids - validd; j >= 0; j--) { 2011 res <<= 1; 2012 v = 1; 2013 for (i = MIN(valids - j, validd); i >= 0; i--) { 2014 v &= (pcmp_val(s, ctrl, i + j) == pcmp_val(d, ctrl, i)); 2015 } 2016 res |= v; 2017 } 2018 break; 2019 } 2020 2021 switch ((ctrl >> 4) & 3) { 2022 case 1: 2023 res ^= (2 << upper) - 1; 2024 break; 2025 case 3: 2026 res ^= (1 << (valids + 1)) - 1; 2027 break; 2028 } 2029 2030 if (res) { 2031 CC_SRC |= CC_C; 2032 } 2033 if (res & 1) { 2034 CC_SRC |= CC_O; 2035 } 2036 2037 return res; 2038 } 2039 2040 void glue(helper_pcmpestri, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, 2041 uint32_t ctrl) 2042 { 2043 unsigned int res = pcmpxstrx(env, d, s, ctrl, 2044 pcmp_elen(env, R_EDX, ctrl), 2045 pcmp_elen(env, R_EAX, ctrl)); 2046 2047 if (res) { 2048 env->regs[R_ECX] = (ctrl & (1 << 6)) ? 31 - clz32(res) : ctz32(res); 2049 } else { 2050 env->regs[R_ECX] = 16 >> (ctrl & (1 << 0)); 2051 } 2052 } 2053 2054 void glue(helper_pcmpestrm, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, 2055 uint32_t ctrl) 2056 { 2057 int i; 2058 unsigned int res = pcmpxstrx(env, d, s, ctrl, 2059 pcmp_elen(env, R_EDX, ctrl), 2060 pcmp_elen(env, R_EAX, ctrl)); 2061 2062 if ((ctrl >> 6) & 1) { 2063 if (ctrl & 1) { 2064 for (i = 0; i < 8; i++, res >>= 1) { 2065 env->xmm_regs[0].W(i) = (res & 1) ? ~0 : 0; 2066 } 2067 } else { 2068 for (i = 0; i < 16; i++, res >>= 1) { 2069 env->xmm_regs[0].B(i) = (res & 1) ? ~0 : 0; 2070 } 2071 } 2072 } else { 2073 env->xmm_regs[0].Q(1) = 0; 2074 env->xmm_regs[0].Q(0) = res; 2075 } 2076 } 2077 2078 void glue(helper_pcmpistri, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, 2079 uint32_t ctrl) 2080 { 2081 unsigned int res = pcmpxstrx(env, d, s, ctrl, 2082 pcmp_ilen(s, ctrl), 2083 pcmp_ilen(d, ctrl)); 2084 2085 if (res) { 2086 env->regs[R_ECX] = (ctrl & (1 << 6)) ? 31 - clz32(res) : ctz32(res); 2087 } else { 2088 env->regs[R_ECX] = 16 >> (ctrl & (1 << 0)); 2089 } 2090 } 2091 2092 void glue(helper_pcmpistrm, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, 2093 uint32_t ctrl) 2094 { 2095 int i; 2096 unsigned int res = pcmpxstrx(env, d, s, ctrl, 2097 pcmp_ilen(s, ctrl), 2098 pcmp_ilen(d, ctrl)); 2099 2100 if ((ctrl >> 6) & 1) { 2101 if (ctrl & 1) { 2102 for (i = 0; i < 8; i++, res >>= 1) { 2103 env->xmm_regs[0].W(i) = (res & 1) ? ~0 : 0; 2104 } 2105 } else { 2106 for (i = 0; i < 16; i++, res >>= 1) { 2107 env->xmm_regs[0].B(i) = (res & 1) ? ~0 : 0; 2108 } 2109 } 2110 } else { 2111 env->xmm_regs[0].Q(1) = 0; 2112 env->xmm_regs[0].Q(0) = res; 2113 } 2114 } 2115 2116 #define CRCPOLY 0x1edc6f41 2117 #define CRCPOLY_BITREV 0x82f63b78 2118 target_ulong helper_crc32(uint32_t crc1, target_ulong msg, uint32_t len) 2119 { 2120 target_ulong crc = (msg & ((target_ulong) -1 >> 2121 (TARGET_LONG_BITS - len))) ^ crc1; 2122 2123 while (len--) { 2124 crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_BITREV : 0); 2125 } 2126 2127 return crc; 2128 } 2129 2130 #endif 2131 2132 void glue(helper_pclmulqdq, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s, 2133 uint32_t ctrl) 2134 { 2135 int a_idx = (ctrl & 1) != 0; 2136 int b_idx = (ctrl & 16) != 0; 2137 2138 for (int i = 0; i < SHIFT; i++) { 2139 uint64_t a = v->Q(2 * i + a_idx); 2140 uint64_t b = s->Q(2 * i + b_idx); 2141 Int128 *r = (Int128 *)&d->ZMM_X(i); 2142 2143 *r = clmul_64(a, b); 2144 } 2145 } 2146 2147 void glue(helper_aesdec, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) 2148 { 2149 for (int i = 0; i < SHIFT; i++) { 2150 AESState *ad = (AESState *)&d->ZMM_X(i); 2151 AESState *st = (AESState *)&v->ZMM_X(i); 2152 AESState *rk = (AESState *)&s->ZMM_X(i); 2153 2154 aesdec_ISB_ISR_IMC_AK(ad, st, rk, false); 2155 } 2156 } 2157 2158 void glue(helper_aesdeclast, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) 2159 { 2160 for (int i = 0; i < SHIFT; i++) { 2161 AESState *ad = (AESState *)&d->ZMM_X(i); 2162 AESState *st = (AESState *)&v->ZMM_X(i); 2163 AESState *rk = (AESState *)&s->ZMM_X(i); 2164 2165 aesdec_ISB_ISR_AK(ad, st, rk, false); 2166 } 2167 } 2168 2169 void glue(helper_aesenc, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) 2170 { 2171 for (int i = 0; i < SHIFT; i++) { 2172 AESState *ad = (AESState *)&d->ZMM_X(i); 2173 AESState *st = (AESState *)&v->ZMM_X(i); 2174 AESState *rk = (AESState *)&s->ZMM_X(i); 2175 2176 aesenc_SB_SR_MC_AK(ad, st, rk, false); 2177 } 2178 } 2179 2180 void glue(helper_aesenclast, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) 2181 { 2182 for (int i = 0; i < SHIFT; i++) { 2183 AESState *ad = (AESState *)&d->ZMM_X(i); 2184 AESState *st = (AESState *)&v->ZMM_X(i); 2185 AESState *rk = (AESState *)&s->ZMM_X(i); 2186 2187 aesenc_SB_SR_AK(ad, st, rk, false); 2188 } 2189 } 2190 2191 #if SHIFT == 1 2192 void glue(helper_aesimc, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) 2193 { 2194 AESState *ad = (AESState *)&d->ZMM_X(0); 2195 AESState *st = (AESState *)&s->ZMM_X(0); 2196 2197 aesdec_IMC(ad, st, false); 2198 } 2199 2200 void glue(helper_aeskeygenassist, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, 2201 uint32_t ctrl) 2202 { 2203 int i; 2204 Reg tmp = *s; 2205 2206 for (i = 0 ; i < 4 ; i++) { 2207 d->B(i) = AES_sbox[tmp.B(i + 4)]; 2208 d->B(i + 8) = AES_sbox[tmp.B(i + 12)]; 2209 } 2210 d->L(1) = (d->L(0) << 24 | d->L(0) >> 8) ^ ctrl; 2211 d->L(3) = (d->L(2) << 24 | d->L(2) >> 8) ^ ctrl; 2212 } 2213 #endif 2214 #endif 2215 2216 #if SHIFT >= 1 2217 void glue(helper_vpermilpd, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) 2218 { 2219 uint64_t r0, r1; 2220 int i; 2221 2222 for (i = 0; i < 1 << SHIFT; i += 2) { 2223 r0 = v->Q(i + ((s->Q(i) >> 1) & 1)); 2224 r1 = v->Q(i + ((s->Q(i+1) >> 1) & 1)); 2225 d->Q(i) = r0; 2226 d->Q(i+1) = r1; 2227 } 2228 } 2229 2230 void glue(helper_vpermilps, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) 2231 { 2232 uint32_t r0, r1, r2, r3; 2233 int i; 2234 2235 for (i = 0; i < 2 << SHIFT; i += 4) { 2236 r0 = v->L(i + (s->L(i) & 3)); 2237 r1 = v->L(i + (s->L(i+1) & 3)); 2238 r2 = v->L(i + (s->L(i+2) & 3)); 2239 r3 = v->L(i + (s->L(i+3) & 3)); 2240 d->L(i) = r0; 2241 d->L(i+1) = r1; 2242 d->L(i+2) = r2; 2243 d->L(i+3) = r3; 2244 } 2245 } 2246 2247 void glue(helper_vpermilpd_imm, SUFFIX)(Reg *d, Reg *s, uint32_t order) 2248 { 2249 uint64_t r0, r1; 2250 int i; 2251 2252 for (i = 0; i < 1 << SHIFT; i += 2) { 2253 r0 = s->Q(i + ((order >> 0) & 1)); 2254 r1 = s->Q(i + ((order >> 1) & 1)); 2255 d->Q(i) = r0; 2256 d->Q(i+1) = r1; 2257 2258 order >>= 2; 2259 } 2260 } 2261 2262 void glue(helper_vpermilps_imm, SUFFIX)(Reg *d, Reg *s, uint32_t order) 2263 { 2264 uint32_t r0, r1, r2, r3; 2265 int i; 2266 2267 for (i = 0; i < 2 << SHIFT; i += 4) { 2268 r0 = s->L(i + ((order >> 0) & 3)); 2269 r1 = s->L(i + ((order >> 2) & 3)); 2270 r2 = s->L(i + ((order >> 4) & 3)); 2271 r3 = s->L(i + ((order >> 6) & 3)); 2272 d->L(i) = r0; 2273 d->L(i+1) = r1; 2274 d->L(i+2) = r2; 2275 d->L(i+3) = r3; 2276 } 2277 } 2278 2279 #if SHIFT == 1 2280 #define FPSRLVD(x, c) (c < 32 ? ((x) >> c) : 0) 2281 #define FPSRLVQ(x, c) (c < 64 ? ((x) >> c) : 0) 2282 #define FPSRAVD(x, c) ((int32_t)(x) >> (c < 32 ? c : 31)) 2283 #define FPSRAVQ(x, c) ((int64_t)(x) >> (c < 64 ? c : 63)) 2284 #define FPSLLVD(x, c) (c < 32 ? ((x) << c) : 0) 2285 #define FPSLLVQ(x, c) (c < 64 ? ((x) << c) : 0) 2286 #endif 2287 2288 SSE_HELPER_L(helper_vpsrlvd, FPSRLVD) 2289 SSE_HELPER_L(helper_vpsravd, FPSRAVD) 2290 SSE_HELPER_L(helper_vpsllvd, FPSLLVD) 2291 2292 SSE_HELPER_Q(helper_vpsrlvq, FPSRLVQ) 2293 SSE_HELPER_Q(helper_vpsravq, FPSRAVQ) 2294 SSE_HELPER_Q(helper_vpsllvq, FPSLLVQ) 2295 2296 void glue(helper_vtestps, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) 2297 { 2298 uint32_t zf = 0, cf = 0; 2299 int i; 2300 2301 for (i = 0; i < 2 << SHIFT; i++) { 2302 zf |= (s->L(i) & d->L(i)); 2303 cf |= (s->L(i) & ~d->L(i)); 2304 } 2305 CC_SRC = ((zf >> 31) ? 0 : CC_Z) | ((cf >> 31) ? 0 : CC_C); 2306 CC_OP = CC_OP_EFLAGS; 2307 } 2308 2309 void glue(helper_vtestpd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) 2310 { 2311 uint64_t zf = 0, cf = 0; 2312 int i; 2313 2314 for (i = 0; i < 1 << SHIFT; i++) { 2315 zf |= (s->Q(i) & d->Q(i)); 2316 cf |= (s->Q(i) & ~d->Q(i)); 2317 } 2318 CC_SRC = ((zf >> 63) ? 0 : CC_Z) | ((cf >> 63) ? 0 : CC_C); 2319 CC_OP = CC_OP_EFLAGS; 2320 } 2321 2322 void glue(helper_vpmaskmovd_st, SUFFIX)(CPUX86State *env, 2323 Reg *v, Reg *s, target_ulong a0) 2324 { 2325 int i; 2326 2327 for (i = 0; i < (2 << SHIFT); i++) { 2328 if (v->L(i) >> 31) { 2329 cpu_stl_data_ra(env, a0 + i * 4, s->L(i), GETPC()); 2330 } 2331 } 2332 } 2333 2334 void glue(helper_vpmaskmovq_st, SUFFIX)(CPUX86State *env, 2335 Reg *v, Reg *s, target_ulong a0) 2336 { 2337 int i; 2338 2339 for (i = 0; i < (1 << SHIFT); i++) { 2340 if (v->Q(i) >> 63) { 2341 cpu_stq_data_ra(env, a0 + i * 8, s->Q(i), GETPC()); 2342 } 2343 } 2344 } 2345 2346 void glue(helper_vpmaskmovd, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) 2347 { 2348 int i; 2349 2350 for (i = 0; i < (2 << SHIFT); i++) { 2351 d->L(i) = (v->L(i) >> 31) ? s->L(i) : 0; 2352 } 2353 } 2354 2355 void glue(helper_vpmaskmovq, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) 2356 { 2357 int i; 2358 2359 for (i = 0; i < (1 << SHIFT); i++) { 2360 d->Q(i) = (v->Q(i) >> 63) ? s->Q(i) : 0; 2361 } 2362 } 2363 2364 void glue(helper_vpgatherdd, SUFFIX)(CPUX86State *env, 2365 Reg *d, Reg *v, Reg *s, target_ulong a0, unsigned scale) 2366 { 2367 int i; 2368 for (i = 0; i < (2 << SHIFT); i++) { 2369 if (v->L(i) >> 31) { 2370 target_ulong addr = a0 2371 + ((target_ulong)(int32_t)s->L(i) << scale); 2372 d->L(i) = cpu_ldl_data_ra(env, addr, GETPC()); 2373 } 2374 v->L(i) = 0; 2375 } 2376 } 2377 2378 void glue(helper_vpgatherdq, SUFFIX)(CPUX86State *env, 2379 Reg *d, Reg *v, Reg *s, target_ulong a0, unsigned scale) 2380 { 2381 int i; 2382 for (i = 0; i < (1 << SHIFT); i++) { 2383 if (v->Q(i) >> 63) { 2384 target_ulong addr = a0 2385 + ((target_ulong)(int32_t)s->L(i) << scale); 2386 d->Q(i) = cpu_ldq_data_ra(env, addr, GETPC()); 2387 } 2388 v->Q(i) = 0; 2389 } 2390 } 2391 2392 void glue(helper_vpgatherqd, SUFFIX)(CPUX86State *env, 2393 Reg *d, Reg *v, Reg *s, target_ulong a0, unsigned scale) 2394 { 2395 int i; 2396 for (i = 0; i < (1 << SHIFT); i++) { 2397 if (v->L(i) >> 31) { 2398 target_ulong addr = a0 2399 + ((target_ulong)(int64_t)s->Q(i) << scale); 2400 d->L(i) = cpu_ldl_data_ra(env, addr, GETPC()); 2401 } 2402 v->L(i) = 0; 2403 } 2404 for (i /= 2; i < 1 << SHIFT; i++) { 2405 d->Q(i) = 0; 2406 v->Q(i) = 0; 2407 } 2408 } 2409 2410 void glue(helper_vpgatherqq, SUFFIX)(CPUX86State *env, 2411 Reg *d, Reg *v, Reg *s, target_ulong a0, unsigned scale) 2412 { 2413 int i; 2414 for (i = 0; i < (1 << SHIFT); i++) { 2415 if (v->Q(i) >> 63) { 2416 target_ulong addr = a0 2417 + ((target_ulong)(int64_t)s->Q(i) << scale); 2418 d->Q(i) = cpu_ldq_data_ra(env, addr, GETPC()); 2419 } 2420 v->Q(i) = 0; 2421 } 2422 } 2423 #endif 2424 2425 #if SHIFT >= 2 2426 void helper_vpermdq_ymm(Reg *d, Reg *v, Reg *s, uint32_t order) 2427 { 2428 uint64_t r0, r1, r2, r3; 2429 2430 switch (order & 3) { 2431 case 0: 2432 r0 = v->Q(0); 2433 r1 = v->Q(1); 2434 break; 2435 case 1: 2436 r0 = v->Q(2); 2437 r1 = v->Q(3); 2438 break; 2439 case 2: 2440 r0 = s->Q(0); 2441 r1 = s->Q(1); 2442 break; 2443 case 3: 2444 r0 = s->Q(2); 2445 r1 = s->Q(3); 2446 break; 2447 default: /* default case added to help the compiler to avoid warnings */ 2448 g_assert_not_reached(); 2449 } 2450 switch ((order >> 4) & 3) { 2451 case 0: 2452 r2 = v->Q(0); 2453 r3 = v->Q(1); 2454 break; 2455 case 1: 2456 r2 = v->Q(2); 2457 r3 = v->Q(3); 2458 break; 2459 case 2: 2460 r2 = s->Q(0); 2461 r3 = s->Q(1); 2462 break; 2463 case 3: 2464 r2 = s->Q(2); 2465 r3 = s->Q(3); 2466 break; 2467 default: /* default case added to help the compiler to avoid warnings */ 2468 g_assert_not_reached(); 2469 } 2470 d->Q(0) = r0; 2471 d->Q(1) = r1; 2472 d->Q(2) = r2; 2473 d->Q(3) = r3; 2474 if (order & 0x8) { 2475 d->Q(0) = 0; 2476 d->Q(1) = 0; 2477 } 2478 if (order & 0x80) { 2479 d->Q(2) = 0; 2480 d->Q(3) = 0; 2481 } 2482 } 2483 2484 void helper_vpermq_ymm(Reg *d, Reg *s, uint32_t order) 2485 { 2486 uint64_t r0, r1, r2, r3; 2487 r0 = s->Q(order & 3); 2488 r1 = s->Q((order >> 2) & 3); 2489 r2 = s->Q((order >> 4) & 3); 2490 r3 = s->Q((order >> 6) & 3); 2491 d->Q(0) = r0; 2492 d->Q(1) = r1; 2493 d->Q(2) = r2; 2494 d->Q(3) = r3; 2495 } 2496 2497 void helper_vpermd_ymm(Reg *d, Reg *v, Reg *s) 2498 { 2499 uint32_t r[8]; 2500 int i; 2501 2502 for (i = 0; i < 8; i++) { 2503 r[i] = s->L(v->L(i) & 7); 2504 } 2505 for (i = 0; i < 8; i++) { 2506 d->L(i) = r[i]; 2507 } 2508 } 2509 #endif 2510 2511 /* FMA3 op helpers */ 2512 #if SHIFT == 1 2513 #define SSE_HELPER_FMAS(name, elem, F) \ 2514 void name(CPUX86State *env, Reg *d, Reg *a, Reg *b, Reg *c, int flags) \ 2515 { \ 2516 d->elem(0) = F(a->elem(0), b->elem(0), c->elem(0), flags, &env->sse_status); \ 2517 } 2518 #define SSE_HELPER_FMAP(name, elem, num, F) \ 2519 void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *a, Reg *b, Reg *c, \ 2520 int flags, int flip) \ 2521 { \ 2522 int i; \ 2523 for (i = 0; i < num; i++) { \ 2524 d->elem(i) = F(a->elem(i), b->elem(i), c->elem(i), flags, &env->sse_status); \ 2525 flags ^= flip; \ 2526 } \ 2527 } 2528 2529 SSE_HELPER_FMAS(helper_fma4ss, ZMM_S, float32_muladd) 2530 SSE_HELPER_FMAS(helper_fma4sd, ZMM_D, float64_muladd) 2531 #endif 2532 2533 #if SHIFT >= 1 2534 SSE_HELPER_FMAP(helper_fma4ps, ZMM_S, 2 << SHIFT, float32_muladd) 2535 SSE_HELPER_FMAP(helper_fma4pd, ZMM_D, 1 << SHIFT, float64_muladd) 2536 #endif 2537 2538 #if SHIFT == 1 2539 #define SSE_HELPER_SHA1RNDS4(name, F, K) \ 2540 void name(Reg *d, Reg *a, Reg *b) \ 2541 { \ 2542 uint32_t A, B, C, D, E, t, i; \ 2543 \ 2544 A = a->L(3); \ 2545 B = a->L(2); \ 2546 C = a->L(1); \ 2547 D = a->L(0); \ 2548 E = 0; \ 2549 \ 2550 for (i = 0; i <= 3; i++) { \ 2551 t = F(B, C, D) + rol32(A, 5) + b->L(3 - i) + E + K; \ 2552 E = D; \ 2553 D = C; \ 2554 C = rol32(B, 30); \ 2555 B = A; \ 2556 A = t; \ 2557 } \ 2558 \ 2559 d->L(3) = A; \ 2560 d->L(2) = B; \ 2561 d->L(1) = C; \ 2562 d->L(0) = D; \ 2563 } 2564 2565 #define SHA1_F0(b, c, d) (((b) & (c)) ^ (~(b) & (d))) 2566 #define SHA1_F1(b, c, d) ((b) ^ (c) ^ (d)) 2567 #define SHA1_F2(b, c, d) (((b) & (c)) ^ ((b) & (d)) ^ ((c) & (d))) 2568 2569 SSE_HELPER_SHA1RNDS4(helper_sha1rnds4_f0, SHA1_F0, 0x5A827999) 2570 SSE_HELPER_SHA1RNDS4(helper_sha1rnds4_f1, SHA1_F1, 0x6ED9EBA1) 2571 SSE_HELPER_SHA1RNDS4(helper_sha1rnds4_f2, SHA1_F2, 0x8F1BBCDC) 2572 SSE_HELPER_SHA1RNDS4(helper_sha1rnds4_f3, SHA1_F1, 0xCA62C1D6) 2573 2574 void helper_sha1nexte(Reg *d, Reg *a, Reg *b) 2575 { 2576 d->L(3) = b->L(3) + rol32(a->L(3), 30); 2577 d->L(2) = b->L(2); 2578 d->L(1) = b->L(1); 2579 d->L(0) = b->L(0); 2580 } 2581 2582 void helper_sha1msg1(Reg *d, Reg *a, Reg *b) 2583 { 2584 /* These could be overwritten by the first two assignments, save them. */ 2585 uint32_t b3 = b->L(3); 2586 uint32_t b2 = b->L(2); 2587 2588 d->L(3) = a->L(3) ^ a->L(1); 2589 d->L(2) = a->L(2) ^ a->L(0); 2590 d->L(1) = a->L(1) ^ b3; 2591 d->L(0) = a->L(0) ^ b2; 2592 } 2593 2594 void helper_sha1msg2(Reg *d, Reg *a, Reg *b) 2595 { 2596 d->L(3) = rol32(a->L(3) ^ b->L(2), 1); 2597 d->L(2) = rol32(a->L(2) ^ b->L(1), 1); 2598 d->L(1) = rol32(a->L(1) ^ b->L(0), 1); 2599 d->L(0) = rol32(a->L(0) ^ d->L(3), 1); 2600 } 2601 2602 #define SHA256_CH(e, f, g) (((e) & (f)) ^ (~(e) & (g))) 2603 #define SHA256_MAJ(a, b, c) (((a) & (b)) ^ ((a) & (c)) ^ ((b) & (c))) 2604 2605 #define SHA256_RNDS0(w) (ror32((w), 2) ^ ror32((w), 13) ^ ror32((w), 22)) 2606 #define SHA256_RNDS1(w) (ror32((w), 6) ^ ror32((w), 11) ^ ror32((w), 25)) 2607 #define SHA256_MSGS0(w) (ror32((w), 7) ^ ror32((w), 18) ^ ((w) >> 3)) 2608 #define SHA256_MSGS1(w) (ror32((w), 17) ^ ror32((w), 19) ^ ((w) >> 10)) 2609 2610 void helper_sha256rnds2(Reg *d, Reg *a, Reg *b, uint32_t wk0, uint32_t wk1) 2611 { 2612 uint32_t t, AA, EE; 2613 2614 uint32_t A = b->L(3); 2615 uint32_t B = b->L(2); 2616 uint32_t C = a->L(3); 2617 uint32_t D = a->L(2); 2618 uint32_t E = b->L(1); 2619 uint32_t F = b->L(0); 2620 uint32_t G = a->L(1); 2621 uint32_t H = a->L(0); 2622 2623 /* Even round */ 2624 t = SHA256_CH(E, F, G) + SHA256_RNDS1(E) + wk0 + H; 2625 AA = t + SHA256_MAJ(A, B, C) + SHA256_RNDS0(A); 2626 EE = t + D; 2627 2628 /* These will be B and F at the end of the odd round */ 2629 d->L(2) = AA; 2630 d->L(0) = EE; 2631 2632 D = C, C = B, B = A, A = AA; 2633 H = G, G = F, F = E, E = EE; 2634 2635 /* Odd round */ 2636 t = SHA256_CH(E, F, G) + SHA256_RNDS1(E) + wk1 + H; 2637 AA = t + SHA256_MAJ(A, B, C) + SHA256_RNDS0(A); 2638 EE = t + D; 2639 2640 d->L(3) = AA; 2641 d->L(1) = EE; 2642 } 2643 2644 void helper_sha256msg1(Reg *d, Reg *a, Reg *b) 2645 { 2646 /* b->L(0) could be overwritten by the first assignment, save it. */ 2647 uint32_t b0 = b->L(0); 2648 2649 d->L(0) = a->L(0) + SHA256_MSGS0(a->L(1)); 2650 d->L(1) = a->L(1) + SHA256_MSGS0(a->L(2)); 2651 d->L(2) = a->L(2) + SHA256_MSGS0(a->L(3)); 2652 d->L(3) = a->L(3) + SHA256_MSGS0(b0); 2653 } 2654 2655 void helper_sha256msg2(Reg *d, Reg *a, Reg *b) 2656 { 2657 /* Earlier assignments cannot overwrite any of the two operands. */ 2658 d->L(0) = a->L(0) + SHA256_MSGS1(b->L(2)); 2659 d->L(1) = a->L(1) + SHA256_MSGS1(b->L(3)); 2660 /* Yes, this reuses the previously computed values. */ 2661 d->L(2) = a->L(2) + SHA256_MSGS1(d->L(0)); 2662 d->L(3) = a->L(3) + SHA256_MSGS1(d->L(1)); 2663 } 2664 #endif 2665 2666 #undef SSE_HELPER_S 2667 2668 #undef LANE_WIDTH 2669 #undef SHIFT 2670 #undef XMM_ONLY 2671 #undef Reg 2672 #undef B 2673 #undef W 2674 #undef L 2675 #undef Q 2676 #undef SUFFIX 2677