1 /* 2 * PowerPC floating point and SPE emulation helpers for QEMU. 3 * 4 * Copyright (c) 2003-2007 Jocelyn Mayer 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include "qemu/osdep.h" 20 #include "cpu.h" 21 #include "exec/helper-proto.h" 22 #include "exec/exec-all.h" 23 #include "internal.h" 24 25 static inline float128 float128_snan_to_qnan(float128 x) 26 { 27 float128 r; 28 29 r.high = x.high | 0x0000800000000000; 30 r.low = x.low; 31 return r; 32 } 33 34 #define float64_snan_to_qnan(x) ((x) | 0x0008000000000000ULL) 35 #define float32_snan_to_qnan(x) ((x) | 0x00400000) 36 #define float16_snan_to_qnan(x) ((x) | 0x0200) 37 38 /*****************************************************************************/ 39 /* Floating point operations helpers */ 40 uint64_t helper_float32_to_float64(CPUPPCState *env, uint32_t arg) 41 { 42 CPU_FloatU f; 43 CPU_DoubleU d; 44 45 f.l = arg; 46 d.d = float32_to_float64(f.f, &env->fp_status); 47 return d.ll; 48 } 49 50 uint32_t helper_float64_to_float32(CPUPPCState *env, uint64_t arg) 51 { 52 CPU_FloatU f; 53 CPU_DoubleU d; 54 55 d.ll = arg; 56 f.f = float64_to_float32(d.d, &env->fp_status); 57 return f.l; 58 } 59 60 static inline int ppc_float32_get_unbiased_exp(float32 f) 61 { 62 return ((f >> 23) & 0xFF) - 127; 63 } 64 65 static inline int ppc_float64_get_unbiased_exp(float64 f) 66 { 67 return ((f >> 52) & 0x7FF) - 1023; 68 } 69 70 #define COMPUTE_FPRF(tp) \ 71 void helper_compute_fprf_##tp(CPUPPCState *env, tp arg) \ 72 { \ 73 int isneg; \ 74 int fprf; \ 75 \ 76 isneg = tp##_is_neg(arg); \ 77 if (unlikely(tp##_is_any_nan(arg))) { \ 78 if (tp##_is_signaling_nan(arg, &env->fp_status)) { \ 79 /* Signaling NaN: flags are undefined */ \ 80 fprf = 0x00; \ 81 } else { \ 82 /* Quiet NaN */ \ 83 fprf = 0x11; \ 84 } \ 85 } else if (unlikely(tp##_is_infinity(arg))) { \ 86 /* +/- infinity */ \ 87 if (isneg) { \ 88 fprf = 0x09; \ 89 } else { \ 90 fprf = 0x05; \ 91 } \ 92 } else { \ 93 if (tp##_is_zero(arg)) { \ 94 /* +/- zero */ \ 95 if (isneg) { \ 96 fprf = 0x12; \ 97 } else { \ 98 fprf = 0x02; \ 99 } \ 100 } else { \ 101 if (tp##_is_zero_or_denormal(arg)) { \ 102 /* Denormalized numbers */ \ 103 fprf = 0x10; \ 104 } else { \ 105 /* Normalized numbers */ \ 106 fprf = 0x00; \ 107 } \ 108 if (isneg) { \ 109 fprf |= 0x08; \ 110 } else { \ 111 fprf |= 0x04; \ 112 } \ 113 } \ 114 } \ 115 /* We update FPSCR_FPRF */ \ 116 env->fpscr &= ~(0x1F << FPSCR_FPRF); \ 117 env->fpscr |= fprf << FPSCR_FPRF; \ 118 } 119 120 COMPUTE_FPRF(float16) 121 COMPUTE_FPRF(float32) 122 COMPUTE_FPRF(float64) 123 COMPUTE_FPRF(float128) 124 125 /* Floating-point invalid operations exception */ 126 static inline __attribute__((__always_inline__)) 127 uint64_t float_invalid_op_excp(CPUPPCState *env, int op, int set_fpcc) 128 { 129 CPUState *cs = CPU(ppc_env_get_cpu(env)); 130 uint64_t ret = 0; 131 int ve; 132 133 ve = fpscr_ve; 134 switch (op) { 135 case POWERPC_EXCP_FP_VXSNAN: 136 env->fpscr |= 1 << FPSCR_VXSNAN; 137 break; 138 case POWERPC_EXCP_FP_VXSOFT: 139 env->fpscr |= 1 << FPSCR_VXSOFT; 140 break; 141 case POWERPC_EXCP_FP_VXISI: 142 /* Magnitude subtraction of infinities */ 143 env->fpscr |= 1 << FPSCR_VXISI; 144 goto update_arith; 145 case POWERPC_EXCP_FP_VXIDI: 146 /* Division of infinity by infinity */ 147 env->fpscr |= 1 << FPSCR_VXIDI; 148 goto update_arith; 149 case POWERPC_EXCP_FP_VXZDZ: 150 /* Division of zero by zero */ 151 env->fpscr |= 1 << FPSCR_VXZDZ; 152 goto update_arith; 153 case POWERPC_EXCP_FP_VXIMZ: 154 /* Multiplication of zero by infinity */ 155 env->fpscr |= 1 << FPSCR_VXIMZ; 156 goto update_arith; 157 case POWERPC_EXCP_FP_VXVC: 158 /* Ordered comparison of NaN */ 159 env->fpscr |= 1 << FPSCR_VXVC; 160 if (set_fpcc) { 161 env->fpscr &= ~(0xF << FPSCR_FPCC); 162 env->fpscr |= 0x11 << FPSCR_FPCC; 163 } 164 /* We must update the target FPR before raising the exception */ 165 if (ve != 0) { 166 cs->exception_index = POWERPC_EXCP_PROGRAM; 167 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC; 168 /* Update the floating-point enabled exception summary */ 169 env->fpscr |= 1 << FPSCR_FEX; 170 /* Exception is differed */ 171 ve = 0; 172 } 173 break; 174 case POWERPC_EXCP_FP_VXSQRT: 175 /* Square root of a negative number */ 176 env->fpscr |= 1 << FPSCR_VXSQRT; 177 update_arith: 178 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI)); 179 if (ve == 0) { 180 /* Set the result to quiet NaN */ 181 ret = 0x7FF8000000000000ULL; 182 if (set_fpcc) { 183 env->fpscr &= ~(0xF << FPSCR_FPCC); 184 env->fpscr |= 0x11 << FPSCR_FPCC; 185 } 186 } 187 break; 188 case POWERPC_EXCP_FP_VXCVI: 189 /* Invalid conversion */ 190 env->fpscr |= 1 << FPSCR_VXCVI; 191 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI)); 192 if (ve == 0) { 193 /* Set the result to quiet NaN */ 194 ret = 0x7FF8000000000000ULL; 195 if (set_fpcc) { 196 env->fpscr &= ~(0xF << FPSCR_FPCC); 197 env->fpscr |= 0x11 << FPSCR_FPCC; 198 } 199 } 200 break; 201 } 202 /* Update the floating-point invalid operation summary */ 203 env->fpscr |= 1 << FPSCR_VX; 204 /* Update the floating-point exception summary */ 205 env->fpscr |= FP_FX; 206 if (ve != 0) { 207 /* Update the floating-point enabled exception summary */ 208 env->fpscr |= 1 << FPSCR_FEX; 209 if (msr_fe0 != 0 || msr_fe1 != 0) { 210 /* GETPC() works here because this is inline */ 211 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 212 POWERPC_EXCP_FP | op, GETPC()); 213 } 214 } 215 return ret; 216 } 217 218 static inline void float_zero_divide_excp(CPUPPCState *env, uintptr_t raddr) 219 { 220 env->fpscr |= 1 << FPSCR_ZX; 221 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI)); 222 /* Update the floating-point exception summary */ 223 env->fpscr |= FP_FX; 224 if (fpscr_ze != 0) { 225 /* Update the floating-point enabled exception summary */ 226 env->fpscr |= 1 << FPSCR_FEX; 227 if (msr_fe0 != 0 || msr_fe1 != 0) { 228 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, 229 POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX, 230 raddr); 231 } 232 } 233 } 234 235 static inline void float_overflow_excp(CPUPPCState *env) 236 { 237 CPUState *cs = CPU(ppc_env_get_cpu(env)); 238 239 env->fpscr |= 1 << FPSCR_OX; 240 /* Update the floating-point exception summary */ 241 env->fpscr |= FP_FX; 242 if (fpscr_oe != 0) { 243 /* XXX: should adjust the result */ 244 /* Update the floating-point enabled exception summary */ 245 env->fpscr |= 1 << FPSCR_FEX; 246 /* We must update the target FPR before raising the exception */ 247 cs->exception_index = POWERPC_EXCP_PROGRAM; 248 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX; 249 } else { 250 env->fpscr |= 1 << FPSCR_XX; 251 env->fpscr |= 1 << FPSCR_FI; 252 } 253 } 254 255 static inline void float_underflow_excp(CPUPPCState *env) 256 { 257 CPUState *cs = CPU(ppc_env_get_cpu(env)); 258 259 env->fpscr |= 1 << FPSCR_UX; 260 /* Update the floating-point exception summary */ 261 env->fpscr |= FP_FX; 262 if (fpscr_ue != 0) { 263 /* XXX: should adjust the result */ 264 /* Update the floating-point enabled exception summary */ 265 env->fpscr |= 1 << FPSCR_FEX; 266 /* We must update the target FPR before raising the exception */ 267 cs->exception_index = POWERPC_EXCP_PROGRAM; 268 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX; 269 } 270 } 271 272 static inline void float_inexact_excp(CPUPPCState *env) 273 { 274 CPUState *cs = CPU(ppc_env_get_cpu(env)); 275 276 env->fpscr |= 1 << FPSCR_XX; 277 /* Update the floating-point exception summary */ 278 env->fpscr |= FP_FX; 279 if (fpscr_xe != 0) { 280 /* Update the floating-point enabled exception summary */ 281 env->fpscr |= 1 << FPSCR_FEX; 282 /* We must update the target FPR before raising the exception */ 283 cs->exception_index = POWERPC_EXCP_PROGRAM; 284 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX; 285 } 286 } 287 288 static inline void fpscr_set_rounding_mode(CPUPPCState *env) 289 { 290 int rnd_type; 291 292 /* Set rounding mode */ 293 switch (fpscr_rn) { 294 case 0: 295 /* Best approximation (round to nearest) */ 296 rnd_type = float_round_nearest_even; 297 break; 298 case 1: 299 /* Smaller magnitude (round toward zero) */ 300 rnd_type = float_round_to_zero; 301 break; 302 case 2: 303 /* Round toward +infinite */ 304 rnd_type = float_round_up; 305 break; 306 default: 307 case 3: 308 /* Round toward -infinite */ 309 rnd_type = float_round_down; 310 break; 311 } 312 set_float_rounding_mode(rnd_type, &env->fp_status); 313 } 314 315 void helper_fpscr_clrbit(CPUPPCState *env, uint32_t bit) 316 { 317 int prev; 318 319 prev = (env->fpscr >> bit) & 1; 320 env->fpscr &= ~(1 << bit); 321 if (prev == 1) { 322 switch (bit) { 323 case FPSCR_RN1: 324 case FPSCR_RN: 325 fpscr_set_rounding_mode(env); 326 break; 327 default: 328 break; 329 } 330 } 331 } 332 333 void helper_fpscr_setbit(CPUPPCState *env, uint32_t bit) 334 { 335 CPUState *cs = CPU(ppc_env_get_cpu(env)); 336 int prev; 337 338 prev = (env->fpscr >> bit) & 1; 339 env->fpscr |= 1 << bit; 340 if (prev == 0) { 341 switch (bit) { 342 case FPSCR_VX: 343 env->fpscr |= FP_FX; 344 if (fpscr_ve) { 345 goto raise_ve; 346 } 347 break; 348 case FPSCR_OX: 349 env->fpscr |= FP_FX; 350 if (fpscr_oe) { 351 goto raise_oe; 352 } 353 break; 354 case FPSCR_UX: 355 env->fpscr |= FP_FX; 356 if (fpscr_ue) { 357 goto raise_ue; 358 } 359 break; 360 case FPSCR_ZX: 361 env->fpscr |= FP_FX; 362 if (fpscr_ze) { 363 goto raise_ze; 364 } 365 break; 366 case FPSCR_XX: 367 env->fpscr |= FP_FX; 368 if (fpscr_xe) { 369 goto raise_xe; 370 } 371 break; 372 case FPSCR_VXSNAN: 373 case FPSCR_VXISI: 374 case FPSCR_VXIDI: 375 case FPSCR_VXZDZ: 376 case FPSCR_VXIMZ: 377 case FPSCR_VXVC: 378 case FPSCR_VXSOFT: 379 case FPSCR_VXSQRT: 380 case FPSCR_VXCVI: 381 env->fpscr |= 1 << FPSCR_VX; 382 env->fpscr |= FP_FX; 383 if (fpscr_ve != 0) { 384 goto raise_ve; 385 } 386 break; 387 case FPSCR_VE: 388 if (fpscr_vx != 0) { 389 raise_ve: 390 env->error_code = POWERPC_EXCP_FP; 391 if (fpscr_vxsnan) { 392 env->error_code |= POWERPC_EXCP_FP_VXSNAN; 393 } 394 if (fpscr_vxisi) { 395 env->error_code |= POWERPC_EXCP_FP_VXISI; 396 } 397 if (fpscr_vxidi) { 398 env->error_code |= POWERPC_EXCP_FP_VXIDI; 399 } 400 if (fpscr_vxzdz) { 401 env->error_code |= POWERPC_EXCP_FP_VXZDZ; 402 } 403 if (fpscr_vximz) { 404 env->error_code |= POWERPC_EXCP_FP_VXIMZ; 405 } 406 if (fpscr_vxvc) { 407 env->error_code |= POWERPC_EXCP_FP_VXVC; 408 } 409 if (fpscr_vxsoft) { 410 env->error_code |= POWERPC_EXCP_FP_VXSOFT; 411 } 412 if (fpscr_vxsqrt) { 413 env->error_code |= POWERPC_EXCP_FP_VXSQRT; 414 } 415 if (fpscr_vxcvi) { 416 env->error_code |= POWERPC_EXCP_FP_VXCVI; 417 } 418 goto raise_excp; 419 } 420 break; 421 case FPSCR_OE: 422 if (fpscr_ox != 0) { 423 raise_oe: 424 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX; 425 goto raise_excp; 426 } 427 break; 428 case FPSCR_UE: 429 if (fpscr_ux != 0) { 430 raise_ue: 431 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX; 432 goto raise_excp; 433 } 434 break; 435 case FPSCR_ZE: 436 if (fpscr_zx != 0) { 437 raise_ze: 438 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX; 439 goto raise_excp; 440 } 441 break; 442 case FPSCR_XE: 443 if (fpscr_xx != 0) { 444 raise_xe: 445 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX; 446 goto raise_excp; 447 } 448 break; 449 case FPSCR_RN1: 450 case FPSCR_RN: 451 fpscr_set_rounding_mode(env); 452 break; 453 default: 454 break; 455 raise_excp: 456 /* Update the floating-point enabled exception summary */ 457 env->fpscr |= 1 << FPSCR_FEX; 458 /* We have to update Rc1 before raising the exception */ 459 cs->exception_index = POWERPC_EXCP_PROGRAM; 460 break; 461 } 462 } 463 } 464 465 void helper_store_fpscr(CPUPPCState *env, uint64_t arg, uint32_t mask) 466 { 467 CPUState *cs = CPU(ppc_env_get_cpu(env)); 468 target_ulong prev, new; 469 int i; 470 471 prev = env->fpscr; 472 new = (target_ulong)arg; 473 new &= ~0x60000000LL; 474 new |= prev & 0x60000000LL; 475 for (i = 0; i < sizeof(target_ulong) * 2; i++) { 476 if (mask & (1 << i)) { 477 env->fpscr &= ~(0xFLL << (4 * i)); 478 env->fpscr |= new & (0xFLL << (4 * i)); 479 } 480 } 481 /* Update VX and FEX */ 482 if (fpscr_ix != 0) { 483 env->fpscr |= 1 << FPSCR_VX; 484 } else { 485 env->fpscr &= ~(1 << FPSCR_VX); 486 } 487 if ((fpscr_ex & fpscr_eex) != 0) { 488 env->fpscr |= 1 << FPSCR_FEX; 489 cs->exception_index = POWERPC_EXCP_PROGRAM; 490 /* XXX: we should compute it properly */ 491 env->error_code = POWERPC_EXCP_FP; 492 } else { 493 env->fpscr &= ~(1 << FPSCR_FEX); 494 } 495 fpscr_set_rounding_mode(env); 496 } 497 498 void store_fpscr(CPUPPCState *env, uint64_t arg, uint32_t mask) 499 { 500 helper_store_fpscr(env, arg, mask); 501 } 502 503 static void do_float_check_status(CPUPPCState *env, uintptr_t raddr) 504 { 505 CPUState *cs = CPU(ppc_env_get_cpu(env)); 506 int status = get_float_exception_flags(&env->fp_status); 507 508 if (status & float_flag_divbyzero) { 509 float_zero_divide_excp(env, raddr); 510 } else if (status & float_flag_overflow) { 511 float_overflow_excp(env); 512 } else if (status & float_flag_underflow) { 513 float_underflow_excp(env); 514 } else if (status & float_flag_inexact) { 515 float_inexact_excp(env); 516 } 517 518 if (cs->exception_index == POWERPC_EXCP_PROGRAM && 519 (env->error_code & POWERPC_EXCP_FP)) { 520 /* Differred floating-point exception after target FPR update */ 521 if (msr_fe0 != 0 || msr_fe1 != 0) { 522 raise_exception_err_ra(env, cs->exception_index, 523 env->error_code, raddr); 524 } 525 } 526 } 527 528 static inline __attribute__((__always_inline__)) 529 void float_check_status(CPUPPCState *env) 530 { 531 /* GETPC() works here because this is inline */ 532 do_float_check_status(env, GETPC()); 533 } 534 535 void helper_float_check_status(CPUPPCState *env) 536 { 537 do_float_check_status(env, GETPC()); 538 } 539 540 void helper_reset_fpstatus(CPUPPCState *env) 541 { 542 set_float_exception_flags(0, &env->fp_status); 543 } 544 545 /* fadd - fadd. */ 546 uint64_t helper_fadd(CPUPPCState *env, uint64_t arg1, uint64_t arg2) 547 { 548 CPU_DoubleU farg1, farg2; 549 550 farg1.ll = arg1; 551 farg2.ll = arg2; 552 553 if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) && 554 float64_is_neg(farg1.d) != float64_is_neg(farg2.d))) { 555 /* Magnitude subtraction of infinities */ 556 farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1); 557 } else { 558 if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) || 559 float64_is_signaling_nan(farg2.d, &env->fp_status))) { 560 /* sNaN addition */ 561 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); 562 } 563 farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status); 564 } 565 566 return farg1.ll; 567 } 568 569 /* fsub - fsub. */ 570 uint64_t helper_fsub(CPUPPCState *env, uint64_t arg1, uint64_t arg2) 571 { 572 CPU_DoubleU farg1, farg2; 573 574 farg1.ll = arg1; 575 farg2.ll = arg2; 576 577 if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) && 578 float64_is_neg(farg1.d) == float64_is_neg(farg2.d))) { 579 /* Magnitude subtraction of infinities */ 580 farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1); 581 } else { 582 if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) || 583 float64_is_signaling_nan(farg2.d, &env->fp_status))) { 584 /* sNaN subtraction */ 585 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); 586 } 587 farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status); 588 } 589 590 return farg1.ll; 591 } 592 593 /* fmul - fmul. */ 594 uint64_t helper_fmul(CPUPPCState *env, uint64_t arg1, uint64_t arg2) 595 { 596 CPU_DoubleU farg1, farg2; 597 598 farg1.ll = arg1; 599 farg2.ll = arg2; 600 601 if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) || 602 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) { 603 /* Multiplication of zero by infinity */ 604 farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1); 605 } else { 606 if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) || 607 float64_is_signaling_nan(farg2.d, &env->fp_status))) { 608 /* sNaN multiplication */ 609 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); 610 } 611 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status); 612 } 613 614 return farg1.ll; 615 } 616 617 /* fdiv - fdiv. */ 618 uint64_t helper_fdiv(CPUPPCState *env, uint64_t arg1, uint64_t arg2) 619 { 620 CPU_DoubleU farg1, farg2; 621 622 farg1.ll = arg1; 623 farg2.ll = arg2; 624 625 if (unlikely(float64_is_infinity(farg1.d) && 626 float64_is_infinity(farg2.d))) { 627 /* Division of infinity by infinity */ 628 farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIDI, 1); 629 } else if (unlikely(float64_is_zero(farg1.d) && float64_is_zero(farg2.d))) { 630 /* Division of zero by zero */ 631 farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXZDZ, 1); 632 } else { 633 if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) || 634 float64_is_signaling_nan(farg2.d, &env->fp_status))) { 635 /* sNaN division */ 636 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); 637 } 638 farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status); 639 } 640 641 return farg1.ll; 642 } 643 644 645 #define FPU_FCTI(op, cvt, nanval) \ 646 uint64_t helper_##op(CPUPPCState *env, uint64_t arg) \ 647 { \ 648 CPU_DoubleU farg; \ 649 \ 650 farg.ll = arg; \ 651 farg.ll = float64_to_##cvt(farg.d, &env->fp_status); \ 652 \ 653 if (unlikely(env->fp_status.float_exception_flags)) { \ 654 if (float64_is_any_nan(arg)) { \ 655 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 1); \ 656 if (float64_is_signaling_nan(arg, &env->fp_status)) { \ 657 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); \ 658 } \ 659 farg.ll = nanval; \ 660 } else if (env->fp_status.float_exception_flags & \ 661 float_flag_invalid) { \ 662 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 1); \ 663 } \ 664 float_check_status(env); \ 665 } \ 666 return farg.ll; \ 667 } 668 669 FPU_FCTI(fctiw, int32, 0x80000000U) 670 FPU_FCTI(fctiwz, int32_round_to_zero, 0x80000000U) 671 FPU_FCTI(fctiwu, uint32, 0x00000000U) 672 FPU_FCTI(fctiwuz, uint32_round_to_zero, 0x00000000U) 673 FPU_FCTI(fctid, int64, 0x8000000000000000ULL) 674 FPU_FCTI(fctidz, int64_round_to_zero, 0x8000000000000000ULL) 675 FPU_FCTI(fctidu, uint64, 0x0000000000000000ULL) 676 FPU_FCTI(fctiduz, uint64_round_to_zero, 0x0000000000000000ULL) 677 678 #define FPU_FCFI(op, cvtr, is_single) \ 679 uint64_t helper_##op(CPUPPCState *env, uint64_t arg) \ 680 { \ 681 CPU_DoubleU farg; \ 682 \ 683 if (is_single) { \ 684 float32 tmp = cvtr(arg, &env->fp_status); \ 685 farg.d = float32_to_float64(tmp, &env->fp_status); \ 686 } else { \ 687 farg.d = cvtr(arg, &env->fp_status); \ 688 } \ 689 float_check_status(env); \ 690 return farg.ll; \ 691 } 692 693 FPU_FCFI(fcfid, int64_to_float64, 0) 694 FPU_FCFI(fcfids, int64_to_float32, 1) 695 FPU_FCFI(fcfidu, uint64_to_float64, 0) 696 FPU_FCFI(fcfidus, uint64_to_float32, 1) 697 698 static inline uint64_t do_fri(CPUPPCState *env, uint64_t arg, 699 int rounding_mode) 700 { 701 CPU_DoubleU farg; 702 703 farg.ll = arg; 704 705 if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) { 706 /* sNaN round */ 707 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); 708 farg.ll = arg | 0x0008000000000000ULL; 709 } else { 710 int inexact = get_float_exception_flags(&env->fp_status) & 711 float_flag_inexact; 712 set_float_rounding_mode(rounding_mode, &env->fp_status); 713 farg.ll = float64_round_to_int(farg.d, &env->fp_status); 714 /* Restore rounding mode from FPSCR */ 715 fpscr_set_rounding_mode(env); 716 717 /* fri* does not set FPSCR[XX] */ 718 if (!inexact) { 719 env->fp_status.float_exception_flags &= ~float_flag_inexact; 720 } 721 } 722 float_check_status(env); 723 return farg.ll; 724 } 725 726 uint64_t helper_frin(CPUPPCState *env, uint64_t arg) 727 { 728 return do_fri(env, arg, float_round_ties_away); 729 } 730 731 uint64_t helper_friz(CPUPPCState *env, uint64_t arg) 732 { 733 return do_fri(env, arg, float_round_to_zero); 734 } 735 736 uint64_t helper_frip(CPUPPCState *env, uint64_t arg) 737 { 738 return do_fri(env, arg, float_round_up); 739 } 740 741 uint64_t helper_frim(CPUPPCState *env, uint64_t arg) 742 { 743 return do_fri(env, arg, float_round_down); 744 } 745 746 /* fmadd - fmadd. */ 747 uint64_t helper_fmadd(CPUPPCState *env, uint64_t arg1, uint64_t arg2, 748 uint64_t arg3) 749 { 750 CPU_DoubleU farg1, farg2, farg3; 751 752 farg1.ll = arg1; 753 farg2.ll = arg2; 754 farg3.ll = arg3; 755 756 if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) || 757 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) { 758 /* Multiplication of zero by infinity */ 759 farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1); 760 } else { 761 if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) || 762 float64_is_signaling_nan(farg2.d, &env->fp_status) || 763 float64_is_signaling_nan(farg3.d, &env->fp_status))) { 764 /* sNaN operation */ 765 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); 766 } 767 /* This is the way the PowerPC specification defines it */ 768 float128 ft0_128, ft1_128; 769 770 ft0_128 = float64_to_float128(farg1.d, &env->fp_status); 771 ft1_128 = float64_to_float128(farg2.d, &env->fp_status); 772 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status); 773 if (unlikely(float128_is_infinity(ft0_128) && 774 float64_is_infinity(farg3.d) && 775 float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) { 776 /* Magnitude subtraction of infinities */ 777 farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1); 778 } else { 779 ft1_128 = float64_to_float128(farg3.d, &env->fp_status); 780 ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status); 781 farg1.d = float128_to_float64(ft0_128, &env->fp_status); 782 } 783 } 784 785 return farg1.ll; 786 } 787 788 /* fmsub - fmsub. */ 789 uint64_t helper_fmsub(CPUPPCState *env, uint64_t arg1, uint64_t arg2, 790 uint64_t arg3) 791 { 792 CPU_DoubleU farg1, farg2, farg3; 793 794 farg1.ll = arg1; 795 farg2.ll = arg2; 796 farg3.ll = arg3; 797 798 if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) || 799 (float64_is_zero(farg1.d) && 800 float64_is_infinity(farg2.d)))) { 801 /* Multiplication of zero by infinity */ 802 farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1); 803 } else { 804 if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) || 805 float64_is_signaling_nan(farg2.d, &env->fp_status) || 806 float64_is_signaling_nan(farg3.d, &env->fp_status))) { 807 /* sNaN operation */ 808 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); 809 } 810 /* This is the way the PowerPC specification defines it */ 811 float128 ft0_128, ft1_128; 812 813 ft0_128 = float64_to_float128(farg1.d, &env->fp_status); 814 ft1_128 = float64_to_float128(farg2.d, &env->fp_status); 815 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status); 816 if (unlikely(float128_is_infinity(ft0_128) && 817 float64_is_infinity(farg3.d) && 818 float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) { 819 /* Magnitude subtraction of infinities */ 820 farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1); 821 } else { 822 ft1_128 = float64_to_float128(farg3.d, &env->fp_status); 823 ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status); 824 farg1.d = float128_to_float64(ft0_128, &env->fp_status); 825 } 826 } 827 return farg1.ll; 828 } 829 830 /* fnmadd - fnmadd. */ 831 uint64_t helper_fnmadd(CPUPPCState *env, uint64_t arg1, uint64_t arg2, 832 uint64_t arg3) 833 { 834 CPU_DoubleU farg1, farg2, farg3; 835 836 farg1.ll = arg1; 837 farg2.ll = arg2; 838 farg3.ll = arg3; 839 840 if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) || 841 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) { 842 /* Multiplication of zero by infinity */ 843 farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1); 844 } else { 845 if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) || 846 float64_is_signaling_nan(farg2.d, &env->fp_status) || 847 float64_is_signaling_nan(farg3.d, &env->fp_status))) { 848 /* sNaN operation */ 849 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); 850 } 851 /* This is the way the PowerPC specification defines it */ 852 float128 ft0_128, ft1_128; 853 854 ft0_128 = float64_to_float128(farg1.d, &env->fp_status); 855 ft1_128 = float64_to_float128(farg2.d, &env->fp_status); 856 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status); 857 if (unlikely(float128_is_infinity(ft0_128) && 858 float64_is_infinity(farg3.d) && 859 float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) { 860 /* Magnitude subtraction of infinities */ 861 farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1); 862 } else { 863 ft1_128 = float64_to_float128(farg3.d, &env->fp_status); 864 ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status); 865 farg1.d = float128_to_float64(ft0_128, &env->fp_status); 866 } 867 if (likely(!float64_is_any_nan(farg1.d))) { 868 farg1.d = float64_chs(farg1.d); 869 } 870 } 871 return farg1.ll; 872 } 873 874 /* fnmsub - fnmsub. */ 875 uint64_t helper_fnmsub(CPUPPCState *env, uint64_t arg1, uint64_t arg2, 876 uint64_t arg3) 877 { 878 CPU_DoubleU farg1, farg2, farg3; 879 880 farg1.ll = arg1; 881 farg2.ll = arg2; 882 farg3.ll = arg3; 883 884 if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) || 885 (float64_is_zero(farg1.d) && 886 float64_is_infinity(farg2.d)))) { 887 /* Multiplication of zero by infinity */ 888 farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1); 889 } else { 890 if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) || 891 float64_is_signaling_nan(farg2.d, &env->fp_status) || 892 float64_is_signaling_nan(farg3.d, &env->fp_status))) { 893 /* sNaN operation */ 894 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); 895 } 896 /* This is the way the PowerPC specification defines it */ 897 float128 ft0_128, ft1_128; 898 899 ft0_128 = float64_to_float128(farg1.d, &env->fp_status); 900 ft1_128 = float64_to_float128(farg2.d, &env->fp_status); 901 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status); 902 if (unlikely(float128_is_infinity(ft0_128) && 903 float64_is_infinity(farg3.d) && 904 float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) { 905 /* Magnitude subtraction of infinities */ 906 farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1); 907 } else { 908 ft1_128 = float64_to_float128(farg3.d, &env->fp_status); 909 ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status); 910 farg1.d = float128_to_float64(ft0_128, &env->fp_status); 911 } 912 if (likely(!float64_is_any_nan(farg1.d))) { 913 farg1.d = float64_chs(farg1.d); 914 } 915 } 916 return farg1.ll; 917 } 918 919 /* frsp - frsp. */ 920 uint64_t helper_frsp(CPUPPCState *env, uint64_t arg) 921 { 922 CPU_DoubleU farg; 923 float32 f32; 924 925 farg.ll = arg; 926 927 if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) { 928 /* sNaN square root */ 929 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); 930 } 931 f32 = float64_to_float32(farg.d, &env->fp_status); 932 farg.d = float32_to_float64(f32, &env->fp_status); 933 934 return farg.ll; 935 } 936 937 /* fsqrt - fsqrt. */ 938 uint64_t helper_fsqrt(CPUPPCState *env, uint64_t arg) 939 { 940 CPU_DoubleU farg; 941 942 farg.ll = arg; 943 944 if (unlikely(float64_is_any_nan(farg.d))) { 945 if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) { 946 /* sNaN reciprocal square root */ 947 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); 948 farg.ll = float64_snan_to_qnan(farg.ll); 949 } 950 } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) { 951 /* Square root of a negative nonzero number */ 952 farg.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, 1); 953 } else { 954 farg.d = float64_sqrt(farg.d, &env->fp_status); 955 } 956 return farg.ll; 957 } 958 959 /* fre - fre. */ 960 uint64_t helper_fre(CPUPPCState *env, uint64_t arg) 961 { 962 CPU_DoubleU farg; 963 964 farg.ll = arg; 965 966 if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) { 967 /* sNaN reciprocal */ 968 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); 969 } 970 farg.d = float64_div(float64_one, farg.d, &env->fp_status); 971 return farg.d; 972 } 973 974 /* fres - fres. */ 975 uint64_t helper_fres(CPUPPCState *env, uint64_t arg) 976 { 977 CPU_DoubleU farg; 978 float32 f32; 979 980 farg.ll = arg; 981 982 if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) { 983 /* sNaN reciprocal */ 984 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); 985 } 986 farg.d = float64_div(float64_one, farg.d, &env->fp_status); 987 f32 = float64_to_float32(farg.d, &env->fp_status); 988 farg.d = float32_to_float64(f32, &env->fp_status); 989 990 return farg.ll; 991 } 992 993 /* frsqrte - frsqrte. */ 994 uint64_t helper_frsqrte(CPUPPCState *env, uint64_t arg) 995 { 996 CPU_DoubleU farg; 997 998 farg.ll = arg; 999 1000 if (unlikely(float64_is_any_nan(farg.d))) { 1001 if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) { 1002 /* sNaN reciprocal square root */ 1003 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); 1004 farg.ll = float64_snan_to_qnan(farg.ll); 1005 } 1006 } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) { 1007 /* Reciprocal square root of a negative nonzero number */ 1008 farg.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, 1); 1009 } else { 1010 farg.d = float64_sqrt(farg.d, &env->fp_status); 1011 farg.d = float64_div(float64_one, farg.d, &env->fp_status); 1012 } 1013 1014 return farg.ll; 1015 } 1016 1017 /* fsel - fsel. */ 1018 uint64_t helper_fsel(CPUPPCState *env, uint64_t arg1, uint64_t arg2, 1019 uint64_t arg3) 1020 { 1021 CPU_DoubleU farg1; 1022 1023 farg1.ll = arg1; 1024 1025 if ((!float64_is_neg(farg1.d) || float64_is_zero(farg1.d)) && 1026 !float64_is_any_nan(farg1.d)) { 1027 return arg2; 1028 } else { 1029 return arg3; 1030 } 1031 } 1032 1033 uint32_t helper_ftdiv(uint64_t fra, uint64_t frb) 1034 { 1035 int fe_flag = 0; 1036 int fg_flag = 0; 1037 1038 if (unlikely(float64_is_infinity(fra) || 1039 float64_is_infinity(frb) || 1040 float64_is_zero(frb))) { 1041 fe_flag = 1; 1042 fg_flag = 1; 1043 } else { 1044 int e_a = ppc_float64_get_unbiased_exp(fra); 1045 int e_b = ppc_float64_get_unbiased_exp(frb); 1046 1047 if (unlikely(float64_is_any_nan(fra) || 1048 float64_is_any_nan(frb))) { 1049 fe_flag = 1; 1050 } else if ((e_b <= -1022) || (e_b >= 1021)) { 1051 fe_flag = 1; 1052 } else if (!float64_is_zero(fra) && 1053 (((e_a - e_b) >= 1023) || 1054 ((e_a - e_b) <= -1021) || 1055 (e_a <= -970))) { 1056 fe_flag = 1; 1057 } 1058 1059 if (unlikely(float64_is_zero_or_denormal(frb))) { 1060 /* XB is not zero because of the above check and */ 1061 /* so must be denormalized. */ 1062 fg_flag = 1; 1063 } 1064 } 1065 1066 return 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); 1067 } 1068 1069 uint32_t helper_ftsqrt(uint64_t frb) 1070 { 1071 int fe_flag = 0; 1072 int fg_flag = 0; 1073 1074 if (unlikely(float64_is_infinity(frb) || float64_is_zero(frb))) { 1075 fe_flag = 1; 1076 fg_flag = 1; 1077 } else { 1078 int e_b = ppc_float64_get_unbiased_exp(frb); 1079 1080 if (unlikely(float64_is_any_nan(frb))) { 1081 fe_flag = 1; 1082 } else if (unlikely(float64_is_zero(frb))) { 1083 fe_flag = 1; 1084 } else if (unlikely(float64_is_neg(frb))) { 1085 fe_flag = 1; 1086 } else if (!float64_is_zero(frb) && (e_b <= (-1022+52))) { 1087 fe_flag = 1; 1088 } 1089 1090 if (unlikely(float64_is_zero_or_denormal(frb))) { 1091 /* XB is not zero because of the above check and */ 1092 /* therefore must be denormalized. */ 1093 fg_flag = 1; 1094 } 1095 } 1096 1097 return 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); 1098 } 1099 1100 void helper_fcmpu(CPUPPCState *env, uint64_t arg1, uint64_t arg2, 1101 uint32_t crfD) 1102 { 1103 CPU_DoubleU farg1, farg2; 1104 uint32_t ret = 0; 1105 1106 farg1.ll = arg1; 1107 farg2.ll = arg2; 1108 1109 if (unlikely(float64_is_any_nan(farg1.d) || 1110 float64_is_any_nan(farg2.d))) { 1111 ret = 0x01UL; 1112 } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) { 1113 ret = 0x08UL; 1114 } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) { 1115 ret = 0x04UL; 1116 } else { 1117 ret = 0x02UL; 1118 } 1119 1120 env->fpscr &= ~(0x0F << FPSCR_FPRF); 1121 env->fpscr |= ret << FPSCR_FPRF; 1122 env->crf[crfD] = ret; 1123 if (unlikely(ret == 0x01UL 1124 && (float64_is_signaling_nan(farg1.d, &env->fp_status) || 1125 float64_is_signaling_nan(farg2.d, &env->fp_status)))) { 1126 /* sNaN comparison */ 1127 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); 1128 } 1129 } 1130 1131 void helper_fcmpo(CPUPPCState *env, uint64_t arg1, uint64_t arg2, 1132 uint32_t crfD) 1133 { 1134 CPU_DoubleU farg1, farg2; 1135 uint32_t ret = 0; 1136 1137 farg1.ll = arg1; 1138 farg2.ll = arg2; 1139 1140 if (unlikely(float64_is_any_nan(farg1.d) || 1141 float64_is_any_nan(farg2.d))) { 1142 ret = 0x01UL; 1143 } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) { 1144 ret = 0x08UL; 1145 } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) { 1146 ret = 0x04UL; 1147 } else { 1148 ret = 0x02UL; 1149 } 1150 1151 env->fpscr &= ~(0x0F << FPSCR_FPRF); 1152 env->fpscr |= ret << FPSCR_FPRF; 1153 env->crf[crfD] = ret; 1154 if (unlikely(ret == 0x01UL)) { 1155 if (float64_is_signaling_nan(farg1.d, &env->fp_status) || 1156 float64_is_signaling_nan(farg2.d, &env->fp_status)) { 1157 /* sNaN comparison */ 1158 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN | 1159 POWERPC_EXCP_FP_VXVC, 1); 1160 } else { 1161 /* qNaN comparison */ 1162 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 1); 1163 } 1164 } 1165 } 1166 1167 /* Single-precision floating-point conversions */ 1168 static inline uint32_t efscfsi(CPUPPCState *env, uint32_t val) 1169 { 1170 CPU_FloatU u; 1171 1172 u.f = int32_to_float32(val, &env->vec_status); 1173 1174 return u.l; 1175 } 1176 1177 static inline uint32_t efscfui(CPUPPCState *env, uint32_t val) 1178 { 1179 CPU_FloatU u; 1180 1181 u.f = uint32_to_float32(val, &env->vec_status); 1182 1183 return u.l; 1184 } 1185 1186 static inline int32_t efsctsi(CPUPPCState *env, uint32_t val) 1187 { 1188 CPU_FloatU u; 1189 1190 u.l = val; 1191 /* NaN are not treated the same way IEEE 754 does */ 1192 if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) { 1193 return 0; 1194 } 1195 1196 return float32_to_int32(u.f, &env->vec_status); 1197 } 1198 1199 static inline uint32_t efsctui(CPUPPCState *env, uint32_t val) 1200 { 1201 CPU_FloatU u; 1202 1203 u.l = val; 1204 /* NaN are not treated the same way IEEE 754 does */ 1205 if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) { 1206 return 0; 1207 } 1208 1209 return float32_to_uint32(u.f, &env->vec_status); 1210 } 1211 1212 static inline uint32_t efsctsiz(CPUPPCState *env, uint32_t val) 1213 { 1214 CPU_FloatU u; 1215 1216 u.l = val; 1217 /* NaN are not treated the same way IEEE 754 does */ 1218 if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) { 1219 return 0; 1220 } 1221 1222 return float32_to_int32_round_to_zero(u.f, &env->vec_status); 1223 } 1224 1225 static inline uint32_t efsctuiz(CPUPPCState *env, uint32_t val) 1226 { 1227 CPU_FloatU u; 1228 1229 u.l = val; 1230 /* NaN are not treated the same way IEEE 754 does */ 1231 if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) { 1232 return 0; 1233 } 1234 1235 return float32_to_uint32_round_to_zero(u.f, &env->vec_status); 1236 } 1237 1238 static inline uint32_t efscfsf(CPUPPCState *env, uint32_t val) 1239 { 1240 CPU_FloatU u; 1241 float32 tmp; 1242 1243 u.f = int32_to_float32(val, &env->vec_status); 1244 tmp = int64_to_float32(1ULL << 32, &env->vec_status); 1245 u.f = float32_div(u.f, tmp, &env->vec_status); 1246 1247 return u.l; 1248 } 1249 1250 static inline uint32_t efscfuf(CPUPPCState *env, uint32_t val) 1251 { 1252 CPU_FloatU u; 1253 float32 tmp; 1254 1255 u.f = uint32_to_float32(val, &env->vec_status); 1256 tmp = uint64_to_float32(1ULL << 32, &env->vec_status); 1257 u.f = float32_div(u.f, tmp, &env->vec_status); 1258 1259 return u.l; 1260 } 1261 1262 static inline uint32_t efsctsf(CPUPPCState *env, uint32_t val) 1263 { 1264 CPU_FloatU u; 1265 float32 tmp; 1266 1267 u.l = val; 1268 /* NaN are not treated the same way IEEE 754 does */ 1269 if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) { 1270 return 0; 1271 } 1272 tmp = uint64_to_float32(1ULL << 32, &env->vec_status); 1273 u.f = float32_mul(u.f, tmp, &env->vec_status); 1274 1275 return float32_to_int32(u.f, &env->vec_status); 1276 } 1277 1278 static inline uint32_t efsctuf(CPUPPCState *env, uint32_t val) 1279 { 1280 CPU_FloatU u; 1281 float32 tmp; 1282 1283 u.l = val; 1284 /* NaN are not treated the same way IEEE 754 does */ 1285 if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) { 1286 return 0; 1287 } 1288 tmp = uint64_to_float32(1ULL << 32, &env->vec_status); 1289 u.f = float32_mul(u.f, tmp, &env->vec_status); 1290 1291 return float32_to_uint32(u.f, &env->vec_status); 1292 } 1293 1294 #define HELPER_SPE_SINGLE_CONV(name) \ 1295 uint32_t helper_e##name(CPUPPCState *env, uint32_t val) \ 1296 { \ 1297 return e##name(env, val); \ 1298 } 1299 /* efscfsi */ 1300 HELPER_SPE_SINGLE_CONV(fscfsi); 1301 /* efscfui */ 1302 HELPER_SPE_SINGLE_CONV(fscfui); 1303 /* efscfuf */ 1304 HELPER_SPE_SINGLE_CONV(fscfuf); 1305 /* efscfsf */ 1306 HELPER_SPE_SINGLE_CONV(fscfsf); 1307 /* efsctsi */ 1308 HELPER_SPE_SINGLE_CONV(fsctsi); 1309 /* efsctui */ 1310 HELPER_SPE_SINGLE_CONV(fsctui); 1311 /* efsctsiz */ 1312 HELPER_SPE_SINGLE_CONV(fsctsiz); 1313 /* efsctuiz */ 1314 HELPER_SPE_SINGLE_CONV(fsctuiz); 1315 /* efsctsf */ 1316 HELPER_SPE_SINGLE_CONV(fsctsf); 1317 /* efsctuf */ 1318 HELPER_SPE_SINGLE_CONV(fsctuf); 1319 1320 #define HELPER_SPE_VECTOR_CONV(name) \ 1321 uint64_t helper_ev##name(CPUPPCState *env, uint64_t val) \ 1322 { \ 1323 return ((uint64_t)e##name(env, val >> 32) << 32) | \ 1324 (uint64_t)e##name(env, val); \ 1325 } 1326 /* evfscfsi */ 1327 HELPER_SPE_VECTOR_CONV(fscfsi); 1328 /* evfscfui */ 1329 HELPER_SPE_VECTOR_CONV(fscfui); 1330 /* evfscfuf */ 1331 HELPER_SPE_VECTOR_CONV(fscfuf); 1332 /* evfscfsf */ 1333 HELPER_SPE_VECTOR_CONV(fscfsf); 1334 /* evfsctsi */ 1335 HELPER_SPE_VECTOR_CONV(fsctsi); 1336 /* evfsctui */ 1337 HELPER_SPE_VECTOR_CONV(fsctui); 1338 /* evfsctsiz */ 1339 HELPER_SPE_VECTOR_CONV(fsctsiz); 1340 /* evfsctuiz */ 1341 HELPER_SPE_VECTOR_CONV(fsctuiz); 1342 /* evfsctsf */ 1343 HELPER_SPE_VECTOR_CONV(fsctsf); 1344 /* evfsctuf */ 1345 HELPER_SPE_VECTOR_CONV(fsctuf); 1346 1347 /* Single-precision floating-point arithmetic */ 1348 static inline uint32_t efsadd(CPUPPCState *env, uint32_t op1, uint32_t op2) 1349 { 1350 CPU_FloatU u1, u2; 1351 1352 u1.l = op1; 1353 u2.l = op2; 1354 u1.f = float32_add(u1.f, u2.f, &env->vec_status); 1355 return u1.l; 1356 } 1357 1358 static inline uint32_t efssub(CPUPPCState *env, uint32_t op1, uint32_t op2) 1359 { 1360 CPU_FloatU u1, u2; 1361 1362 u1.l = op1; 1363 u2.l = op2; 1364 u1.f = float32_sub(u1.f, u2.f, &env->vec_status); 1365 return u1.l; 1366 } 1367 1368 static inline uint32_t efsmul(CPUPPCState *env, uint32_t op1, uint32_t op2) 1369 { 1370 CPU_FloatU u1, u2; 1371 1372 u1.l = op1; 1373 u2.l = op2; 1374 u1.f = float32_mul(u1.f, u2.f, &env->vec_status); 1375 return u1.l; 1376 } 1377 1378 static inline uint32_t efsdiv(CPUPPCState *env, uint32_t op1, uint32_t op2) 1379 { 1380 CPU_FloatU u1, u2; 1381 1382 u1.l = op1; 1383 u2.l = op2; 1384 u1.f = float32_div(u1.f, u2.f, &env->vec_status); 1385 return u1.l; 1386 } 1387 1388 #define HELPER_SPE_SINGLE_ARITH(name) \ 1389 uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \ 1390 { \ 1391 return e##name(env, op1, op2); \ 1392 } 1393 /* efsadd */ 1394 HELPER_SPE_SINGLE_ARITH(fsadd); 1395 /* efssub */ 1396 HELPER_SPE_SINGLE_ARITH(fssub); 1397 /* efsmul */ 1398 HELPER_SPE_SINGLE_ARITH(fsmul); 1399 /* efsdiv */ 1400 HELPER_SPE_SINGLE_ARITH(fsdiv); 1401 1402 #define HELPER_SPE_VECTOR_ARITH(name) \ 1403 uint64_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \ 1404 { \ 1405 return ((uint64_t)e##name(env, op1 >> 32, op2 >> 32) << 32) | \ 1406 (uint64_t)e##name(env, op1, op2); \ 1407 } 1408 /* evfsadd */ 1409 HELPER_SPE_VECTOR_ARITH(fsadd); 1410 /* evfssub */ 1411 HELPER_SPE_VECTOR_ARITH(fssub); 1412 /* evfsmul */ 1413 HELPER_SPE_VECTOR_ARITH(fsmul); 1414 /* evfsdiv */ 1415 HELPER_SPE_VECTOR_ARITH(fsdiv); 1416 1417 /* Single-precision floating-point comparisons */ 1418 static inline uint32_t efscmplt(CPUPPCState *env, uint32_t op1, uint32_t op2) 1419 { 1420 CPU_FloatU u1, u2; 1421 1422 u1.l = op1; 1423 u2.l = op2; 1424 return float32_lt(u1.f, u2.f, &env->vec_status) ? 4 : 0; 1425 } 1426 1427 static inline uint32_t efscmpgt(CPUPPCState *env, uint32_t op1, uint32_t op2) 1428 { 1429 CPU_FloatU u1, u2; 1430 1431 u1.l = op1; 1432 u2.l = op2; 1433 return float32_le(u1.f, u2.f, &env->vec_status) ? 0 : 4; 1434 } 1435 1436 static inline uint32_t efscmpeq(CPUPPCState *env, uint32_t op1, uint32_t op2) 1437 { 1438 CPU_FloatU u1, u2; 1439 1440 u1.l = op1; 1441 u2.l = op2; 1442 return float32_eq(u1.f, u2.f, &env->vec_status) ? 4 : 0; 1443 } 1444 1445 static inline uint32_t efststlt(CPUPPCState *env, uint32_t op1, uint32_t op2) 1446 { 1447 /* XXX: TODO: ignore special values (NaN, infinites, ...) */ 1448 return efscmplt(env, op1, op2); 1449 } 1450 1451 static inline uint32_t efststgt(CPUPPCState *env, uint32_t op1, uint32_t op2) 1452 { 1453 /* XXX: TODO: ignore special values (NaN, infinites, ...) */ 1454 return efscmpgt(env, op1, op2); 1455 } 1456 1457 static inline uint32_t efststeq(CPUPPCState *env, uint32_t op1, uint32_t op2) 1458 { 1459 /* XXX: TODO: ignore special values (NaN, infinites, ...) */ 1460 return efscmpeq(env, op1, op2); 1461 } 1462 1463 #define HELPER_SINGLE_SPE_CMP(name) \ 1464 uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \ 1465 { \ 1466 return e##name(env, op1, op2); \ 1467 } 1468 /* efststlt */ 1469 HELPER_SINGLE_SPE_CMP(fststlt); 1470 /* efststgt */ 1471 HELPER_SINGLE_SPE_CMP(fststgt); 1472 /* efststeq */ 1473 HELPER_SINGLE_SPE_CMP(fststeq); 1474 /* efscmplt */ 1475 HELPER_SINGLE_SPE_CMP(fscmplt); 1476 /* efscmpgt */ 1477 HELPER_SINGLE_SPE_CMP(fscmpgt); 1478 /* efscmpeq */ 1479 HELPER_SINGLE_SPE_CMP(fscmpeq); 1480 1481 static inline uint32_t evcmp_merge(int t0, int t1) 1482 { 1483 return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1); 1484 } 1485 1486 #define HELPER_VECTOR_SPE_CMP(name) \ 1487 uint32_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \ 1488 { \ 1489 return evcmp_merge(e##name(env, op1 >> 32, op2 >> 32), \ 1490 e##name(env, op1, op2)); \ 1491 } 1492 /* evfststlt */ 1493 HELPER_VECTOR_SPE_CMP(fststlt); 1494 /* evfststgt */ 1495 HELPER_VECTOR_SPE_CMP(fststgt); 1496 /* evfststeq */ 1497 HELPER_VECTOR_SPE_CMP(fststeq); 1498 /* evfscmplt */ 1499 HELPER_VECTOR_SPE_CMP(fscmplt); 1500 /* evfscmpgt */ 1501 HELPER_VECTOR_SPE_CMP(fscmpgt); 1502 /* evfscmpeq */ 1503 HELPER_VECTOR_SPE_CMP(fscmpeq); 1504 1505 /* Double-precision floating-point conversion */ 1506 uint64_t helper_efdcfsi(CPUPPCState *env, uint32_t val) 1507 { 1508 CPU_DoubleU u; 1509 1510 u.d = int32_to_float64(val, &env->vec_status); 1511 1512 return u.ll; 1513 } 1514 1515 uint64_t helper_efdcfsid(CPUPPCState *env, uint64_t val) 1516 { 1517 CPU_DoubleU u; 1518 1519 u.d = int64_to_float64(val, &env->vec_status); 1520 1521 return u.ll; 1522 } 1523 1524 uint64_t helper_efdcfui(CPUPPCState *env, uint32_t val) 1525 { 1526 CPU_DoubleU u; 1527 1528 u.d = uint32_to_float64(val, &env->vec_status); 1529 1530 return u.ll; 1531 } 1532 1533 uint64_t helper_efdcfuid(CPUPPCState *env, uint64_t val) 1534 { 1535 CPU_DoubleU u; 1536 1537 u.d = uint64_to_float64(val, &env->vec_status); 1538 1539 return u.ll; 1540 } 1541 1542 uint32_t helper_efdctsi(CPUPPCState *env, uint64_t val) 1543 { 1544 CPU_DoubleU u; 1545 1546 u.ll = val; 1547 /* NaN are not treated the same way IEEE 754 does */ 1548 if (unlikely(float64_is_any_nan(u.d))) { 1549 return 0; 1550 } 1551 1552 return float64_to_int32(u.d, &env->vec_status); 1553 } 1554 1555 uint32_t helper_efdctui(CPUPPCState *env, uint64_t val) 1556 { 1557 CPU_DoubleU u; 1558 1559 u.ll = val; 1560 /* NaN are not treated the same way IEEE 754 does */ 1561 if (unlikely(float64_is_any_nan(u.d))) { 1562 return 0; 1563 } 1564 1565 return float64_to_uint32(u.d, &env->vec_status); 1566 } 1567 1568 uint32_t helper_efdctsiz(CPUPPCState *env, uint64_t val) 1569 { 1570 CPU_DoubleU u; 1571 1572 u.ll = val; 1573 /* NaN are not treated the same way IEEE 754 does */ 1574 if (unlikely(float64_is_any_nan(u.d))) { 1575 return 0; 1576 } 1577 1578 return float64_to_int32_round_to_zero(u.d, &env->vec_status); 1579 } 1580 1581 uint64_t helper_efdctsidz(CPUPPCState *env, uint64_t val) 1582 { 1583 CPU_DoubleU u; 1584 1585 u.ll = val; 1586 /* NaN are not treated the same way IEEE 754 does */ 1587 if (unlikely(float64_is_any_nan(u.d))) { 1588 return 0; 1589 } 1590 1591 return float64_to_int64_round_to_zero(u.d, &env->vec_status); 1592 } 1593 1594 uint32_t helper_efdctuiz(CPUPPCState *env, uint64_t val) 1595 { 1596 CPU_DoubleU u; 1597 1598 u.ll = val; 1599 /* NaN are not treated the same way IEEE 754 does */ 1600 if (unlikely(float64_is_any_nan(u.d))) { 1601 return 0; 1602 } 1603 1604 return float64_to_uint32_round_to_zero(u.d, &env->vec_status); 1605 } 1606 1607 uint64_t helper_efdctuidz(CPUPPCState *env, uint64_t val) 1608 { 1609 CPU_DoubleU u; 1610 1611 u.ll = val; 1612 /* NaN are not treated the same way IEEE 754 does */ 1613 if (unlikely(float64_is_any_nan(u.d))) { 1614 return 0; 1615 } 1616 1617 return float64_to_uint64_round_to_zero(u.d, &env->vec_status); 1618 } 1619 1620 uint64_t helper_efdcfsf(CPUPPCState *env, uint32_t val) 1621 { 1622 CPU_DoubleU u; 1623 float64 tmp; 1624 1625 u.d = int32_to_float64(val, &env->vec_status); 1626 tmp = int64_to_float64(1ULL << 32, &env->vec_status); 1627 u.d = float64_div(u.d, tmp, &env->vec_status); 1628 1629 return u.ll; 1630 } 1631 1632 uint64_t helper_efdcfuf(CPUPPCState *env, uint32_t val) 1633 { 1634 CPU_DoubleU u; 1635 float64 tmp; 1636 1637 u.d = uint32_to_float64(val, &env->vec_status); 1638 tmp = int64_to_float64(1ULL << 32, &env->vec_status); 1639 u.d = float64_div(u.d, tmp, &env->vec_status); 1640 1641 return u.ll; 1642 } 1643 1644 uint32_t helper_efdctsf(CPUPPCState *env, uint64_t val) 1645 { 1646 CPU_DoubleU u; 1647 float64 tmp; 1648 1649 u.ll = val; 1650 /* NaN are not treated the same way IEEE 754 does */ 1651 if (unlikely(float64_is_any_nan(u.d))) { 1652 return 0; 1653 } 1654 tmp = uint64_to_float64(1ULL << 32, &env->vec_status); 1655 u.d = float64_mul(u.d, tmp, &env->vec_status); 1656 1657 return float64_to_int32(u.d, &env->vec_status); 1658 } 1659 1660 uint32_t helper_efdctuf(CPUPPCState *env, uint64_t val) 1661 { 1662 CPU_DoubleU u; 1663 float64 tmp; 1664 1665 u.ll = val; 1666 /* NaN are not treated the same way IEEE 754 does */ 1667 if (unlikely(float64_is_any_nan(u.d))) { 1668 return 0; 1669 } 1670 tmp = uint64_to_float64(1ULL << 32, &env->vec_status); 1671 u.d = float64_mul(u.d, tmp, &env->vec_status); 1672 1673 return float64_to_uint32(u.d, &env->vec_status); 1674 } 1675 1676 uint32_t helper_efscfd(CPUPPCState *env, uint64_t val) 1677 { 1678 CPU_DoubleU u1; 1679 CPU_FloatU u2; 1680 1681 u1.ll = val; 1682 u2.f = float64_to_float32(u1.d, &env->vec_status); 1683 1684 return u2.l; 1685 } 1686 1687 uint64_t helper_efdcfs(CPUPPCState *env, uint32_t val) 1688 { 1689 CPU_DoubleU u2; 1690 CPU_FloatU u1; 1691 1692 u1.l = val; 1693 u2.d = float32_to_float64(u1.f, &env->vec_status); 1694 1695 return u2.ll; 1696 } 1697 1698 /* Double precision fixed-point arithmetic */ 1699 uint64_t helper_efdadd(CPUPPCState *env, uint64_t op1, uint64_t op2) 1700 { 1701 CPU_DoubleU u1, u2; 1702 1703 u1.ll = op1; 1704 u2.ll = op2; 1705 u1.d = float64_add(u1.d, u2.d, &env->vec_status); 1706 return u1.ll; 1707 } 1708 1709 uint64_t helper_efdsub(CPUPPCState *env, uint64_t op1, uint64_t op2) 1710 { 1711 CPU_DoubleU u1, u2; 1712 1713 u1.ll = op1; 1714 u2.ll = op2; 1715 u1.d = float64_sub(u1.d, u2.d, &env->vec_status); 1716 return u1.ll; 1717 } 1718 1719 uint64_t helper_efdmul(CPUPPCState *env, uint64_t op1, uint64_t op2) 1720 { 1721 CPU_DoubleU u1, u2; 1722 1723 u1.ll = op1; 1724 u2.ll = op2; 1725 u1.d = float64_mul(u1.d, u2.d, &env->vec_status); 1726 return u1.ll; 1727 } 1728 1729 uint64_t helper_efddiv(CPUPPCState *env, uint64_t op1, uint64_t op2) 1730 { 1731 CPU_DoubleU u1, u2; 1732 1733 u1.ll = op1; 1734 u2.ll = op2; 1735 u1.d = float64_div(u1.d, u2.d, &env->vec_status); 1736 return u1.ll; 1737 } 1738 1739 /* Double precision floating point helpers */ 1740 uint32_t helper_efdtstlt(CPUPPCState *env, uint64_t op1, uint64_t op2) 1741 { 1742 CPU_DoubleU u1, u2; 1743 1744 u1.ll = op1; 1745 u2.ll = op2; 1746 return float64_lt(u1.d, u2.d, &env->vec_status) ? 4 : 0; 1747 } 1748 1749 uint32_t helper_efdtstgt(CPUPPCState *env, uint64_t op1, uint64_t op2) 1750 { 1751 CPU_DoubleU u1, u2; 1752 1753 u1.ll = op1; 1754 u2.ll = op2; 1755 return float64_le(u1.d, u2.d, &env->vec_status) ? 0 : 4; 1756 } 1757 1758 uint32_t helper_efdtsteq(CPUPPCState *env, uint64_t op1, uint64_t op2) 1759 { 1760 CPU_DoubleU u1, u2; 1761 1762 u1.ll = op1; 1763 u2.ll = op2; 1764 return float64_eq_quiet(u1.d, u2.d, &env->vec_status) ? 4 : 0; 1765 } 1766 1767 uint32_t helper_efdcmplt(CPUPPCState *env, uint64_t op1, uint64_t op2) 1768 { 1769 /* XXX: TODO: test special values (NaN, infinites, ...) */ 1770 return helper_efdtstlt(env, op1, op2); 1771 } 1772 1773 uint32_t helper_efdcmpgt(CPUPPCState *env, uint64_t op1, uint64_t op2) 1774 { 1775 /* XXX: TODO: test special values (NaN, infinites, ...) */ 1776 return helper_efdtstgt(env, op1, op2); 1777 } 1778 1779 uint32_t helper_efdcmpeq(CPUPPCState *env, uint64_t op1, uint64_t op2) 1780 { 1781 /* XXX: TODO: test special values (NaN, infinites, ...) */ 1782 return helper_efdtsteq(env, op1, op2); 1783 } 1784 1785 #define float64_to_float64(x, env) x 1786 1787 1788 /* VSX_ADD_SUB - VSX floating point add/subract 1789 * name - instruction mnemonic 1790 * op - operation (add or sub) 1791 * nels - number of elements (1, 2 or 4) 1792 * tp - type (float32 or float64) 1793 * fld - vsr_t field (VsrD(*) or VsrW(*)) 1794 * sfprf - set FPRF 1795 */ 1796 #define VSX_ADD_SUB(name, op, nels, tp, fld, sfprf, r2sp) \ 1797 void helper_##name(CPUPPCState *env, uint32_t opcode) \ 1798 { \ 1799 ppc_vsr_t xt, xa, xb; \ 1800 int i; \ 1801 \ 1802 getVSR(xA(opcode), &xa, env); \ 1803 getVSR(xB(opcode), &xb, env); \ 1804 getVSR(xT(opcode), &xt, env); \ 1805 helper_reset_fpstatus(env); \ 1806 \ 1807 for (i = 0; i < nels; i++) { \ 1808 float_status tstat = env->fp_status; \ 1809 set_float_exception_flags(0, &tstat); \ 1810 xt.fld = tp##_##op(xa.fld, xb.fld, &tstat); \ 1811 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \ 1812 \ 1813 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \ 1814 if (tp##_is_infinity(xa.fld) && tp##_is_infinity(xb.fld)) { \ 1815 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, sfprf); \ 1816 } else if (tp##_is_signaling_nan(xa.fld, &tstat) || \ 1817 tp##_is_signaling_nan(xb.fld, &tstat)) { \ 1818 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \ 1819 } \ 1820 } \ 1821 \ 1822 if (r2sp) { \ 1823 xt.fld = helper_frsp(env, xt.fld); \ 1824 } \ 1825 \ 1826 if (sfprf) { \ 1827 helper_compute_fprf_float64(env, xt.fld); \ 1828 } \ 1829 } \ 1830 putVSR(xT(opcode), &xt, env); \ 1831 float_check_status(env); \ 1832 } 1833 1834 VSX_ADD_SUB(xsadddp, add, 1, float64, VsrD(0), 1, 0) 1835 VSX_ADD_SUB(xsaddsp, add, 1, float64, VsrD(0), 1, 1) 1836 VSX_ADD_SUB(xvadddp, add, 2, float64, VsrD(i), 0, 0) 1837 VSX_ADD_SUB(xvaddsp, add, 4, float32, VsrW(i), 0, 0) 1838 VSX_ADD_SUB(xssubdp, sub, 1, float64, VsrD(0), 1, 0) 1839 VSX_ADD_SUB(xssubsp, sub, 1, float64, VsrD(0), 1, 1) 1840 VSX_ADD_SUB(xvsubdp, sub, 2, float64, VsrD(i), 0, 0) 1841 VSX_ADD_SUB(xvsubsp, sub, 4, float32, VsrW(i), 0, 0) 1842 1843 void helper_xsaddqp(CPUPPCState *env, uint32_t opcode) 1844 { 1845 ppc_vsr_t xt, xa, xb; 1846 float_status tstat; 1847 1848 getVSR(rA(opcode) + 32, &xa, env); 1849 getVSR(rB(opcode) + 32, &xb, env); 1850 getVSR(rD(opcode) + 32, &xt, env); 1851 helper_reset_fpstatus(env); 1852 1853 if (unlikely(Rc(opcode) != 0)) { 1854 /* TODO: Support xsadddpo after round-to-odd is implemented */ 1855 abort(); 1856 } 1857 1858 tstat = env->fp_status; 1859 set_float_exception_flags(0, &tstat); 1860 xt.f128 = float128_add(xa.f128, xb.f128, &tstat); 1861 env->fp_status.float_exception_flags |= tstat.float_exception_flags; 1862 1863 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { 1864 if (float128_is_infinity(xa.f128) && float128_is_infinity(xb.f128)) { 1865 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1); 1866 } else if (float128_is_signaling_nan(xa.f128, &tstat) || 1867 float128_is_signaling_nan(xb.f128, &tstat)) { 1868 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); 1869 } 1870 } 1871 1872 helper_compute_fprf_float128(env, xt.f128); 1873 1874 putVSR(rD(opcode) + 32, &xt, env); 1875 float_check_status(env); 1876 } 1877 1878 /* VSX_MUL - VSX floating point multiply 1879 * op - instruction mnemonic 1880 * nels - number of elements (1, 2 or 4) 1881 * tp - type (float32 or float64) 1882 * fld - vsr_t field (VsrD(*) or VsrW(*)) 1883 * sfprf - set FPRF 1884 */ 1885 #define VSX_MUL(op, nels, tp, fld, sfprf, r2sp) \ 1886 void helper_##op(CPUPPCState *env, uint32_t opcode) \ 1887 { \ 1888 ppc_vsr_t xt, xa, xb; \ 1889 int i; \ 1890 \ 1891 getVSR(xA(opcode), &xa, env); \ 1892 getVSR(xB(opcode), &xb, env); \ 1893 getVSR(xT(opcode), &xt, env); \ 1894 helper_reset_fpstatus(env); \ 1895 \ 1896 for (i = 0; i < nels; i++) { \ 1897 float_status tstat = env->fp_status; \ 1898 set_float_exception_flags(0, &tstat); \ 1899 xt.fld = tp##_mul(xa.fld, xb.fld, &tstat); \ 1900 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \ 1901 \ 1902 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \ 1903 if ((tp##_is_infinity(xa.fld) && tp##_is_zero(xb.fld)) || \ 1904 (tp##_is_infinity(xb.fld) && tp##_is_zero(xa.fld))) { \ 1905 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, sfprf); \ 1906 } else if (tp##_is_signaling_nan(xa.fld, &tstat) || \ 1907 tp##_is_signaling_nan(xb.fld, &tstat)) { \ 1908 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \ 1909 } \ 1910 } \ 1911 \ 1912 if (r2sp) { \ 1913 xt.fld = helper_frsp(env, xt.fld); \ 1914 } \ 1915 \ 1916 if (sfprf) { \ 1917 helper_compute_fprf_float64(env, xt.fld); \ 1918 } \ 1919 } \ 1920 \ 1921 putVSR(xT(opcode), &xt, env); \ 1922 float_check_status(env); \ 1923 } 1924 1925 VSX_MUL(xsmuldp, 1, float64, VsrD(0), 1, 0) 1926 VSX_MUL(xsmulsp, 1, float64, VsrD(0), 1, 1) 1927 VSX_MUL(xvmuldp, 2, float64, VsrD(i), 0, 0) 1928 VSX_MUL(xvmulsp, 4, float32, VsrW(i), 0, 0) 1929 1930 void helper_xsmulqp(CPUPPCState *env, uint32_t opcode) 1931 { 1932 ppc_vsr_t xt, xa, xb; 1933 1934 getVSR(rA(opcode) + 32, &xa, env); 1935 getVSR(rB(opcode) + 32, &xb, env); 1936 getVSR(rD(opcode) + 32, &xt, env); 1937 1938 if (unlikely(Rc(opcode) != 0)) { 1939 /* TODO: Support xsmulpo after round-to-odd is implemented */ 1940 abort(); 1941 } 1942 1943 helper_reset_fpstatus(env); 1944 1945 float_status tstat = env->fp_status; 1946 set_float_exception_flags(0, &tstat); 1947 xt.f128 = float128_mul(xa.f128, xb.f128, &tstat); 1948 env->fp_status.float_exception_flags |= tstat.float_exception_flags; 1949 1950 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { 1951 if ((float128_is_infinity(xa.f128) && float128_is_zero(xb.f128)) || 1952 (float128_is_infinity(xb.f128) && float128_is_zero(xa.f128))) { 1953 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1); 1954 } else if (float128_is_signaling_nan(xa.f128, &tstat) || 1955 float128_is_signaling_nan(xb.f128, &tstat)) { 1956 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); 1957 } 1958 } 1959 helper_compute_fprf_float128(env, xt.f128); 1960 1961 putVSR(rD(opcode) + 32, &xt, env); 1962 float_check_status(env); 1963 } 1964 1965 /* VSX_DIV - VSX floating point divide 1966 * op - instruction mnemonic 1967 * nels - number of elements (1, 2 or 4) 1968 * tp - type (float32 or float64) 1969 * fld - vsr_t field (VsrD(*) or VsrW(*)) 1970 * sfprf - set FPRF 1971 */ 1972 #define VSX_DIV(op, nels, tp, fld, sfprf, r2sp) \ 1973 void helper_##op(CPUPPCState *env, uint32_t opcode) \ 1974 { \ 1975 ppc_vsr_t xt, xa, xb; \ 1976 int i; \ 1977 \ 1978 getVSR(xA(opcode), &xa, env); \ 1979 getVSR(xB(opcode), &xb, env); \ 1980 getVSR(xT(opcode), &xt, env); \ 1981 helper_reset_fpstatus(env); \ 1982 \ 1983 for (i = 0; i < nels; i++) { \ 1984 float_status tstat = env->fp_status; \ 1985 set_float_exception_flags(0, &tstat); \ 1986 xt.fld = tp##_div(xa.fld, xb.fld, &tstat); \ 1987 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \ 1988 \ 1989 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \ 1990 if (tp##_is_infinity(xa.fld) && tp##_is_infinity(xb.fld)) { \ 1991 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIDI, sfprf); \ 1992 } else if (tp##_is_zero(xa.fld) && \ 1993 tp##_is_zero(xb.fld)) { \ 1994 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXZDZ, sfprf); \ 1995 } else if (tp##_is_signaling_nan(xa.fld, &tstat) || \ 1996 tp##_is_signaling_nan(xb.fld, &tstat)) { \ 1997 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \ 1998 } \ 1999 } \ 2000 \ 2001 if (r2sp) { \ 2002 xt.fld = helper_frsp(env, xt.fld); \ 2003 } \ 2004 \ 2005 if (sfprf) { \ 2006 helper_compute_fprf_float64(env, xt.fld); \ 2007 } \ 2008 } \ 2009 \ 2010 putVSR(xT(opcode), &xt, env); \ 2011 float_check_status(env); \ 2012 } 2013 2014 VSX_DIV(xsdivdp, 1, float64, VsrD(0), 1, 0) 2015 VSX_DIV(xsdivsp, 1, float64, VsrD(0), 1, 1) 2016 VSX_DIV(xvdivdp, 2, float64, VsrD(i), 0, 0) 2017 VSX_DIV(xvdivsp, 4, float32, VsrW(i), 0, 0) 2018 2019 void helper_xsdivqp(CPUPPCState *env, uint32_t opcode) 2020 { 2021 ppc_vsr_t xt, xa, xb; 2022 2023 getVSR(rA(opcode) + 32, &xa, env); 2024 getVSR(rB(opcode) + 32, &xb, env); 2025 getVSR(rD(opcode) + 32, &xt, env); 2026 2027 if (unlikely(Rc(opcode) != 0)) { 2028 /* TODO: Support xsdivqpo after round-to-odd is implemented */ 2029 abort(); 2030 } 2031 2032 helper_reset_fpstatus(env); 2033 float_status tstat = env->fp_status; 2034 set_float_exception_flags(0, &tstat); 2035 xt.f128 = float128_div(xa.f128, xb.f128, &tstat); 2036 env->fp_status.float_exception_flags |= tstat.float_exception_flags; 2037 2038 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { 2039 if (float128_is_infinity(xa.f128) && float128_is_infinity(xb.f128)) { 2040 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIDI, 1); 2041 } else if (float128_is_zero(xa.f128) && 2042 float128_is_zero(xb.f128)) { 2043 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXZDZ, 1); 2044 } else if (float128_is_signaling_nan(xa.f128, &tstat) || 2045 float128_is_signaling_nan(xb.f128, &tstat)) { 2046 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); 2047 } 2048 } 2049 2050 helper_compute_fprf_float128(env, xt.f128); 2051 putVSR(rD(opcode) + 32, &xt, env); 2052 float_check_status(env); 2053 } 2054 2055 /* VSX_RE - VSX floating point reciprocal estimate 2056 * op - instruction mnemonic 2057 * nels - number of elements (1, 2 or 4) 2058 * tp - type (float32 or float64) 2059 * fld - vsr_t field (VsrD(*) or VsrW(*)) 2060 * sfprf - set FPRF 2061 */ 2062 #define VSX_RE(op, nels, tp, fld, sfprf, r2sp) \ 2063 void helper_##op(CPUPPCState *env, uint32_t opcode) \ 2064 { \ 2065 ppc_vsr_t xt, xb; \ 2066 int i; \ 2067 \ 2068 getVSR(xB(opcode), &xb, env); \ 2069 getVSR(xT(opcode), &xt, env); \ 2070 helper_reset_fpstatus(env); \ 2071 \ 2072 for (i = 0; i < nels; i++) { \ 2073 if (unlikely(tp##_is_signaling_nan(xb.fld, &env->fp_status))) { \ 2074 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \ 2075 } \ 2076 xt.fld = tp##_div(tp##_one, xb.fld, &env->fp_status); \ 2077 \ 2078 if (r2sp) { \ 2079 xt.fld = helper_frsp(env, xt.fld); \ 2080 } \ 2081 \ 2082 if (sfprf) { \ 2083 helper_compute_fprf_float64(env, xt.fld); \ 2084 } \ 2085 } \ 2086 \ 2087 putVSR(xT(opcode), &xt, env); \ 2088 float_check_status(env); \ 2089 } 2090 2091 VSX_RE(xsredp, 1, float64, VsrD(0), 1, 0) 2092 VSX_RE(xsresp, 1, float64, VsrD(0), 1, 1) 2093 VSX_RE(xvredp, 2, float64, VsrD(i), 0, 0) 2094 VSX_RE(xvresp, 4, float32, VsrW(i), 0, 0) 2095 2096 /* VSX_SQRT - VSX floating point square root 2097 * op - instruction mnemonic 2098 * nels - number of elements (1, 2 or 4) 2099 * tp - type (float32 or float64) 2100 * fld - vsr_t field (VsrD(*) or VsrW(*)) 2101 * sfprf - set FPRF 2102 */ 2103 #define VSX_SQRT(op, nels, tp, fld, sfprf, r2sp) \ 2104 void helper_##op(CPUPPCState *env, uint32_t opcode) \ 2105 { \ 2106 ppc_vsr_t xt, xb; \ 2107 int i; \ 2108 \ 2109 getVSR(xB(opcode), &xb, env); \ 2110 getVSR(xT(opcode), &xt, env); \ 2111 helper_reset_fpstatus(env); \ 2112 \ 2113 for (i = 0; i < nels; i++) { \ 2114 float_status tstat = env->fp_status; \ 2115 set_float_exception_flags(0, &tstat); \ 2116 xt.fld = tp##_sqrt(xb.fld, &tstat); \ 2117 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \ 2118 \ 2119 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \ 2120 if (tp##_is_neg(xb.fld) && !tp##_is_zero(xb.fld)) { \ 2121 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, sfprf); \ 2122 } else if (tp##_is_signaling_nan(xb.fld, &tstat)) { \ 2123 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \ 2124 } \ 2125 } \ 2126 \ 2127 if (r2sp) { \ 2128 xt.fld = helper_frsp(env, xt.fld); \ 2129 } \ 2130 \ 2131 if (sfprf) { \ 2132 helper_compute_fprf_float64(env, xt.fld); \ 2133 } \ 2134 } \ 2135 \ 2136 putVSR(xT(opcode), &xt, env); \ 2137 float_check_status(env); \ 2138 } 2139 2140 VSX_SQRT(xssqrtdp, 1, float64, VsrD(0), 1, 0) 2141 VSX_SQRT(xssqrtsp, 1, float64, VsrD(0), 1, 1) 2142 VSX_SQRT(xvsqrtdp, 2, float64, VsrD(i), 0, 0) 2143 VSX_SQRT(xvsqrtsp, 4, float32, VsrW(i), 0, 0) 2144 2145 /* VSX_RSQRTE - VSX floating point reciprocal square root estimate 2146 * op - instruction mnemonic 2147 * nels - number of elements (1, 2 or 4) 2148 * tp - type (float32 or float64) 2149 * fld - vsr_t field (VsrD(*) or VsrW(*)) 2150 * sfprf - set FPRF 2151 */ 2152 #define VSX_RSQRTE(op, nels, tp, fld, sfprf, r2sp) \ 2153 void helper_##op(CPUPPCState *env, uint32_t opcode) \ 2154 { \ 2155 ppc_vsr_t xt, xb; \ 2156 int i; \ 2157 \ 2158 getVSR(xB(opcode), &xb, env); \ 2159 getVSR(xT(opcode), &xt, env); \ 2160 helper_reset_fpstatus(env); \ 2161 \ 2162 for (i = 0; i < nels; i++) { \ 2163 float_status tstat = env->fp_status; \ 2164 set_float_exception_flags(0, &tstat); \ 2165 xt.fld = tp##_sqrt(xb.fld, &tstat); \ 2166 xt.fld = tp##_div(tp##_one, xt.fld, &tstat); \ 2167 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \ 2168 \ 2169 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \ 2170 if (tp##_is_neg(xb.fld) && !tp##_is_zero(xb.fld)) { \ 2171 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, sfprf); \ 2172 } else if (tp##_is_signaling_nan(xb.fld, &tstat)) { \ 2173 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \ 2174 } \ 2175 } \ 2176 \ 2177 if (r2sp) { \ 2178 xt.fld = helper_frsp(env, xt.fld); \ 2179 } \ 2180 \ 2181 if (sfprf) { \ 2182 helper_compute_fprf_float64(env, xt.fld); \ 2183 } \ 2184 } \ 2185 \ 2186 putVSR(xT(opcode), &xt, env); \ 2187 float_check_status(env); \ 2188 } 2189 2190 VSX_RSQRTE(xsrsqrtedp, 1, float64, VsrD(0), 1, 0) 2191 VSX_RSQRTE(xsrsqrtesp, 1, float64, VsrD(0), 1, 1) 2192 VSX_RSQRTE(xvrsqrtedp, 2, float64, VsrD(i), 0, 0) 2193 VSX_RSQRTE(xvrsqrtesp, 4, float32, VsrW(i), 0, 0) 2194 2195 /* VSX_TDIV - VSX floating point test for divide 2196 * op - instruction mnemonic 2197 * nels - number of elements (1, 2 or 4) 2198 * tp - type (float32 or float64) 2199 * fld - vsr_t field (VsrD(*) or VsrW(*)) 2200 * emin - minimum unbiased exponent 2201 * emax - maximum unbiased exponent 2202 * nbits - number of fraction bits 2203 */ 2204 #define VSX_TDIV(op, nels, tp, fld, emin, emax, nbits) \ 2205 void helper_##op(CPUPPCState *env, uint32_t opcode) \ 2206 { \ 2207 ppc_vsr_t xa, xb; \ 2208 int i; \ 2209 int fe_flag = 0; \ 2210 int fg_flag = 0; \ 2211 \ 2212 getVSR(xA(opcode), &xa, env); \ 2213 getVSR(xB(opcode), &xb, env); \ 2214 \ 2215 for (i = 0; i < nels; i++) { \ 2216 if (unlikely(tp##_is_infinity(xa.fld) || \ 2217 tp##_is_infinity(xb.fld) || \ 2218 tp##_is_zero(xb.fld))) { \ 2219 fe_flag = 1; \ 2220 fg_flag = 1; \ 2221 } else { \ 2222 int e_a = ppc_##tp##_get_unbiased_exp(xa.fld); \ 2223 int e_b = ppc_##tp##_get_unbiased_exp(xb.fld); \ 2224 \ 2225 if (unlikely(tp##_is_any_nan(xa.fld) || \ 2226 tp##_is_any_nan(xb.fld))) { \ 2227 fe_flag = 1; \ 2228 } else if ((e_b <= emin) || (e_b >= (emax-2))) { \ 2229 fe_flag = 1; \ 2230 } else if (!tp##_is_zero(xa.fld) && \ 2231 (((e_a - e_b) >= emax) || \ 2232 ((e_a - e_b) <= (emin+1)) || \ 2233 (e_a <= (emin+nbits)))) { \ 2234 fe_flag = 1; \ 2235 } \ 2236 \ 2237 if (unlikely(tp##_is_zero_or_denormal(xb.fld))) { \ 2238 /* XB is not zero because of the above check and */ \ 2239 /* so must be denormalized. */ \ 2240 fg_flag = 1; \ 2241 } \ 2242 } \ 2243 } \ 2244 \ 2245 env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \ 2246 } 2247 2248 VSX_TDIV(xstdivdp, 1, float64, VsrD(0), -1022, 1023, 52) 2249 VSX_TDIV(xvtdivdp, 2, float64, VsrD(i), -1022, 1023, 52) 2250 VSX_TDIV(xvtdivsp, 4, float32, VsrW(i), -126, 127, 23) 2251 2252 /* VSX_TSQRT - VSX floating point test for square root 2253 * op - instruction mnemonic 2254 * nels - number of elements (1, 2 or 4) 2255 * tp - type (float32 or float64) 2256 * fld - vsr_t field (VsrD(*) or VsrW(*)) 2257 * emin - minimum unbiased exponent 2258 * emax - maximum unbiased exponent 2259 * nbits - number of fraction bits 2260 */ 2261 #define VSX_TSQRT(op, nels, tp, fld, emin, nbits) \ 2262 void helper_##op(CPUPPCState *env, uint32_t opcode) \ 2263 { \ 2264 ppc_vsr_t xa, xb; \ 2265 int i; \ 2266 int fe_flag = 0; \ 2267 int fg_flag = 0; \ 2268 \ 2269 getVSR(xA(opcode), &xa, env); \ 2270 getVSR(xB(opcode), &xb, env); \ 2271 \ 2272 for (i = 0; i < nels; i++) { \ 2273 if (unlikely(tp##_is_infinity(xb.fld) || \ 2274 tp##_is_zero(xb.fld))) { \ 2275 fe_flag = 1; \ 2276 fg_flag = 1; \ 2277 } else { \ 2278 int e_b = ppc_##tp##_get_unbiased_exp(xb.fld); \ 2279 \ 2280 if (unlikely(tp##_is_any_nan(xb.fld))) { \ 2281 fe_flag = 1; \ 2282 } else if (unlikely(tp##_is_zero(xb.fld))) { \ 2283 fe_flag = 1; \ 2284 } else if (unlikely(tp##_is_neg(xb.fld))) { \ 2285 fe_flag = 1; \ 2286 } else if (!tp##_is_zero(xb.fld) && \ 2287 (e_b <= (emin+nbits))) { \ 2288 fe_flag = 1; \ 2289 } \ 2290 \ 2291 if (unlikely(tp##_is_zero_or_denormal(xb.fld))) { \ 2292 /* XB is not zero because of the above check and */ \ 2293 /* therefore must be denormalized. */ \ 2294 fg_flag = 1; \ 2295 } \ 2296 } \ 2297 } \ 2298 \ 2299 env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \ 2300 } 2301 2302 VSX_TSQRT(xstsqrtdp, 1, float64, VsrD(0), -1022, 52) 2303 VSX_TSQRT(xvtsqrtdp, 2, float64, VsrD(i), -1022, 52) 2304 VSX_TSQRT(xvtsqrtsp, 4, float32, VsrW(i), -126, 23) 2305 2306 /* VSX_MADD - VSX floating point muliply/add variations 2307 * op - instruction mnemonic 2308 * nels - number of elements (1, 2 or 4) 2309 * tp - type (float32 or float64) 2310 * fld - vsr_t field (VsrD(*) or VsrW(*)) 2311 * maddflgs - flags for the float*muladd routine that control the 2312 * various forms (madd, msub, nmadd, nmsub) 2313 * afrm - A form (1=A, 0=M) 2314 * sfprf - set FPRF 2315 */ 2316 #define VSX_MADD(op, nels, tp, fld, maddflgs, afrm, sfprf, r2sp) \ 2317 void helper_##op(CPUPPCState *env, uint32_t opcode) \ 2318 { \ 2319 ppc_vsr_t xt_in, xa, xb, xt_out; \ 2320 ppc_vsr_t *b, *c; \ 2321 int i; \ 2322 \ 2323 if (afrm) { /* AxB + T */ \ 2324 b = &xb; \ 2325 c = &xt_in; \ 2326 } else { /* AxT + B */ \ 2327 b = &xt_in; \ 2328 c = &xb; \ 2329 } \ 2330 \ 2331 getVSR(xA(opcode), &xa, env); \ 2332 getVSR(xB(opcode), &xb, env); \ 2333 getVSR(xT(opcode), &xt_in, env); \ 2334 \ 2335 xt_out = xt_in; \ 2336 \ 2337 helper_reset_fpstatus(env); \ 2338 \ 2339 for (i = 0; i < nels; i++) { \ 2340 float_status tstat = env->fp_status; \ 2341 set_float_exception_flags(0, &tstat); \ 2342 if (r2sp && (tstat.float_rounding_mode == float_round_nearest_even)) {\ 2343 /* Avoid double rounding errors by rounding the intermediate */ \ 2344 /* result to odd. */ \ 2345 set_float_rounding_mode(float_round_to_zero, &tstat); \ 2346 xt_out.fld = tp##_muladd(xa.fld, b->fld, c->fld, \ 2347 maddflgs, &tstat); \ 2348 xt_out.fld |= (get_float_exception_flags(&tstat) & \ 2349 float_flag_inexact) != 0; \ 2350 } else { \ 2351 xt_out.fld = tp##_muladd(xa.fld, b->fld, c->fld, \ 2352 maddflgs, &tstat); \ 2353 } \ 2354 env->fp_status.float_exception_flags |= tstat.float_exception_flags; \ 2355 \ 2356 if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \ 2357 if (tp##_is_signaling_nan(xa.fld, &tstat) || \ 2358 tp##_is_signaling_nan(b->fld, &tstat) || \ 2359 tp##_is_signaling_nan(c->fld, &tstat)) { \ 2360 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \ 2361 tstat.float_exception_flags &= ~float_flag_invalid; \ 2362 } \ 2363 if ((tp##_is_infinity(xa.fld) && tp##_is_zero(b->fld)) || \ 2364 (tp##_is_zero(xa.fld) && tp##_is_infinity(b->fld))) { \ 2365 xt_out.fld = float64_to_##tp(float_invalid_op_excp(env, \ 2366 POWERPC_EXCP_FP_VXIMZ, sfprf), &env->fp_status); \ 2367 tstat.float_exception_flags &= ~float_flag_invalid; \ 2368 } \ 2369 if ((tstat.float_exception_flags & float_flag_invalid) && \ 2370 ((tp##_is_infinity(xa.fld) || \ 2371 tp##_is_infinity(b->fld)) && \ 2372 tp##_is_infinity(c->fld))) { \ 2373 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, sfprf); \ 2374 } \ 2375 } \ 2376 \ 2377 if (r2sp) { \ 2378 xt_out.fld = helper_frsp(env, xt_out.fld); \ 2379 } \ 2380 \ 2381 if (sfprf) { \ 2382 helper_compute_fprf_float64(env, xt_out.fld); \ 2383 } \ 2384 } \ 2385 putVSR(xT(opcode), &xt_out, env); \ 2386 float_check_status(env); \ 2387 } 2388 2389 #define MADD_FLGS 0 2390 #define MSUB_FLGS float_muladd_negate_c 2391 #define NMADD_FLGS float_muladd_negate_result 2392 #define NMSUB_FLGS (float_muladd_negate_c | float_muladd_negate_result) 2393 2394 VSX_MADD(xsmaddadp, 1, float64, VsrD(0), MADD_FLGS, 1, 1, 0) 2395 VSX_MADD(xsmaddmdp, 1, float64, VsrD(0), MADD_FLGS, 0, 1, 0) 2396 VSX_MADD(xsmsubadp, 1, float64, VsrD(0), MSUB_FLGS, 1, 1, 0) 2397 VSX_MADD(xsmsubmdp, 1, float64, VsrD(0), MSUB_FLGS, 0, 1, 0) 2398 VSX_MADD(xsnmaddadp, 1, float64, VsrD(0), NMADD_FLGS, 1, 1, 0) 2399 VSX_MADD(xsnmaddmdp, 1, float64, VsrD(0), NMADD_FLGS, 0, 1, 0) 2400 VSX_MADD(xsnmsubadp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 1, 0) 2401 VSX_MADD(xsnmsubmdp, 1, float64, VsrD(0), NMSUB_FLGS, 0, 1, 0) 2402 2403 VSX_MADD(xsmaddasp, 1, float64, VsrD(0), MADD_FLGS, 1, 1, 1) 2404 VSX_MADD(xsmaddmsp, 1, float64, VsrD(0), MADD_FLGS, 0, 1, 1) 2405 VSX_MADD(xsmsubasp, 1, float64, VsrD(0), MSUB_FLGS, 1, 1, 1) 2406 VSX_MADD(xsmsubmsp, 1, float64, VsrD(0), MSUB_FLGS, 0, 1, 1) 2407 VSX_MADD(xsnmaddasp, 1, float64, VsrD(0), NMADD_FLGS, 1, 1, 1) 2408 VSX_MADD(xsnmaddmsp, 1, float64, VsrD(0), NMADD_FLGS, 0, 1, 1) 2409 VSX_MADD(xsnmsubasp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 1, 1) 2410 VSX_MADD(xsnmsubmsp, 1, float64, VsrD(0), NMSUB_FLGS, 0, 1, 1) 2411 2412 VSX_MADD(xvmaddadp, 2, float64, VsrD(i), MADD_FLGS, 1, 0, 0) 2413 VSX_MADD(xvmaddmdp, 2, float64, VsrD(i), MADD_FLGS, 0, 0, 0) 2414 VSX_MADD(xvmsubadp, 2, float64, VsrD(i), MSUB_FLGS, 1, 0, 0) 2415 VSX_MADD(xvmsubmdp, 2, float64, VsrD(i), MSUB_FLGS, 0, 0, 0) 2416 VSX_MADD(xvnmaddadp, 2, float64, VsrD(i), NMADD_FLGS, 1, 0, 0) 2417 VSX_MADD(xvnmaddmdp, 2, float64, VsrD(i), NMADD_FLGS, 0, 0, 0) 2418 VSX_MADD(xvnmsubadp, 2, float64, VsrD(i), NMSUB_FLGS, 1, 0, 0) 2419 VSX_MADD(xvnmsubmdp, 2, float64, VsrD(i), NMSUB_FLGS, 0, 0, 0) 2420 2421 VSX_MADD(xvmaddasp, 4, float32, VsrW(i), MADD_FLGS, 1, 0, 0) 2422 VSX_MADD(xvmaddmsp, 4, float32, VsrW(i), MADD_FLGS, 0, 0, 0) 2423 VSX_MADD(xvmsubasp, 4, float32, VsrW(i), MSUB_FLGS, 1, 0, 0) 2424 VSX_MADD(xvmsubmsp, 4, float32, VsrW(i), MSUB_FLGS, 0, 0, 0) 2425 VSX_MADD(xvnmaddasp, 4, float32, VsrW(i), NMADD_FLGS, 1, 0, 0) 2426 VSX_MADD(xvnmaddmsp, 4, float32, VsrW(i), NMADD_FLGS, 0, 0, 0) 2427 VSX_MADD(xvnmsubasp, 4, float32, VsrW(i), NMSUB_FLGS, 1, 0, 0) 2428 VSX_MADD(xvnmsubmsp, 4, float32, VsrW(i), NMSUB_FLGS, 0, 0, 0) 2429 2430 /* VSX_SCALAR_CMP_DP - VSX scalar floating point compare double precision 2431 * op - instruction mnemonic 2432 * cmp - comparison operation 2433 * exp - expected result of comparison 2434 * svxvc - set VXVC bit 2435 */ 2436 #define VSX_SCALAR_CMP_DP(op, cmp, exp, svxvc) \ 2437 void helper_##op(CPUPPCState *env, uint32_t opcode) \ 2438 { \ 2439 ppc_vsr_t xt, xa, xb; \ 2440 bool vxsnan_flag = false, vxvc_flag = false, vex_flag = false; \ 2441 \ 2442 getVSR(xA(opcode), &xa, env); \ 2443 getVSR(xB(opcode), &xb, env); \ 2444 getVSR(xT(opcode), &xt, env); \ 2445 \ 2446 if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) || \ 2447 float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) { \ 2448 vxsnan_flag = true; \ 2449 if (fpscr_ve == 0 && svxvc) { \ 2450 vxvc_flag = true; \ 2451 } \ 2452 } else if (svxvc) { \ 2453 vxvc_flag = float64_is_quiet_nan(xa.VsrD(0), &env->fp_status) || \ 2454 float64_is_quiet_nan(xb.VsrD(0), &env->fp_status); \ 2455 } \ 2456 if (vxsnan_flag) { \ 2457 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \ 2458 } \ 2459 if (vxvc_flag) { \ 2460 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0); \ 2461 } \ 2462 vex_flag = fpscr_ve && (vxvc_flag || vxsnan_flag); \ 2463 \ 2464 if (!vex_flag) { \ 2465 if (float64_##cmp(xb.VsrD(0), xa.VsrD(0), &env->fp_status) == exp) { \ 2466 xt.VsrD(0) = -1; \ 2467 xt.VsrD(1) = 0; \ 2468 } else { \ 2469 xt.VsrD(0) = 0; \ 2470 xt.VsrD(1) = 0; \ 2471 } \ 2472 } \ 2473 putVSR(xT(opcode), &xt, env); \ 2474 helper_float_check_status(env); \ 2475 } 2476 2477 VSX_SCALAR_CMP_DP(xscmpeqdp, eq, 1, 0) 2478 VSX_SCALAR_CMP_DP(xscmpgedp, le, 1, 1) 2479 VSX_SCALAR_CMP_DP(xscmpgtdp, lt, 1, 1) 2480 VSX_SCALAR_CMP_DP(xscmpnedp, eq, 0, 0) 2481 2482 void helper_xscmpexpdp(CPUPPCState *env, uint32_t opcode) 2483 { 2484 ppc_vsr_t xa, xb; 2485 int64_t exp_a, exp_b; 2486 uint32_t cc; 2487 2488 getVSR(xA(opcode), &xa, env); 2489 getVSR(xB(opcode), &xb, env); 2490 2491 exp_a = extract64(xa.VsrD(0), 52, 11); 2492 exp_b = extract64(xb.VsrD(0), 52, 11); 2493 2494 if (unlikely(float64_is_any_nan(xa.VsrD(0)) || 2495 float64_is_any_nan(xb.VsrD(0)))) { 2496 cc = CRF_SO; 2497 } else { 2498 if (exp_a < exp_b) { 2499 cc = CRF_LT; 2500 } else if (exp_a > exp_b) { 2501 cc = CRF_GT; 2502 } else { 2503 cc = CRF_EQ; 2504 } 2505 } 2506 2507 env->fpscr &= ~(0x0F << FPSCR_FPRF); 2508 env->fpscr |= cc << FPSCR_FPRF; 2509 env->crf[BF(opcode)] = cc; 2510 2511 helper_float_check_status(env); 2512 } 2513 2514 void helper_xscmpexpqp(CPUPPCState *env, uint32_t opcode) 2515 { 2516 ppc_vsr_t xa, xb; 2517 int64_t exp_a, exp_b; 2518 uint32_t cc; 2519 2520 getVSR(rA(opcode) + 32, &xa, env); 2521 getVSR(rB(opcode) + 32, &xb, env); 2522 2523 exp_a = extract64(xa.VsrD(0), 48, 15); 2524 exp_b = extract64(xb.VsrD(0), 48, 15); 2525 2526 if (unlikely(float128_is_any_nan(xa.f128) || 2527 float128_is_any_nan(xb.f128))) { 2528 cc = CRF_SO; 2529 } else { 2530 if (exp_a < exp_b) { 2531 cc = CRF_LT; 2532 } else if (exp_a > exp_b) { 2533 cc = CRF_GT; 2534 } else { 2535 cc = CRF_EQ; 2536 } 2537 } 2538 2539 env->fpscr &= ~(0x0F << FPSCR_FPRF); 2540 env->fpscr |= cc << FPSCR_FPRF; 2541 env->crf[BF(opcode)] = cc; 2542 2543 helper_float_check_status(env); 2544 } 2545 2546 #define VSX_SCALAR_CMP(op, ordered) \ 2547 void helper_##op(CPUPPCState *env, uint32_t opcode) \ 2548 { \ 2549 ppc_vsr_t xa, xb; \ 2550 uint32_t cc = 0; \ 2551 bool vxsnan_flag = false, vxvc_flag = false; \ 2552 \ 2553 helper_reset_fpstatus(env); \ 2554 getVSR(xA(opcode), &xa, env); \ 2555 getVSR(xB(opcode), &xb, env); \ 2556 \ 2557 if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) || \ 2558 float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) { \ 2559 vxsnan_flag = true; \ 2560 cc = CRF_SO; \ 2561 if (fpscr_ve == 0 && ordered) { \ 2562 vxvc_flag = true; \ 2563 } \ 2564 } else if (float64_is_quiet_nan(xa.VsrD(0), &env->fp_status) || \ 2565 float64_is_quiet_nan(xb.VsrD(0), &env->fp_status)) { \ 2566 cc = CRF_SO; \ 2567 if (ordered) { \ 2568 vxvc_flag = true; \ 2569 } \ 2570 } \ 2571 if (vxsnan_flag) { \ 2572 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \ 2573 } \ 2574 if (vxvc_flag) { \ 2575 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0); \ 2576 } \ 2577 \ 2578 if (float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) { \ 2579 cc |= CRF_LT; \ 2580 } else if (!float64_le(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) { \ 2581 cc |= CRF_GT; \ 2582 } else { \ 2583 cc |= CRF_EQ; \ 2584 } \ 2585 \ 2586 env->fpscr &= ~(0x0F << FPSCR_FPRF); \ 2587 env->fpscr |= cc << FPSCR_FPRF; \ 2588 env->crf[BF(opcode)] = cc; \ 2589 \ 2590 float_check_status(env); \ 2591 } 2592 2593 VSX_SCALAR_CMP(xscmpodp, 1) 2594 VSX_SCALAR_CMP(xscmpudp, 0) 2595 2596 #define VSX_SCALAR_CMPQ(op, ordered) \ 2597 void helper_##op(CPUPPCState *env, uint32_t opcode) \ 2598 { \ 2599 ppc_vsr_t xa, xb; \ 2600 uint32_t cc = 0; \ 2601 bool vxsnan_flag = false, vxvc_flag = false; \ 2602 \ 2603 helper_reset_fpstatus(env); \ 2604 getVSR(rA(opcode) + 32, &xa, env); \ 2605 getVSR(rB(opcode) + 32, &xb, env); \ 2606 \ 2607 if (float128_is_signaling_nan(xa.f128, &env->fp_status) || \ 2608 float128_is_signaling_nan(xb.f128, &env->fp_status)) { \ 2609 vxsnan_flag = true; \ 2610 cc = CRF_SO; \ 2611 if (fpscr_ve == 0 && ordered) { \ 2612 vxvc_flag = true; \ 2613 } \ 2614 } else if (float128_is_quiet_nan(xa.f128, &env->fp_status) || \ 2615 float128_is_quiet_nan(xb.f128, &env->fp_status)) { \ 2616 cc = CRF_SO; \ 2617 if (ordered) { \ 2618 vxvc_flag = true; \ 2619 } \ 2620 } \ 2621 if (vxsnan_flag) { \ 2622 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \ 2623 } \ 2624 if (vxvc_flag) { \ 2625 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0); \ 2626 } \ 2627 \ 2628 if (float128_lt(xa.f128, xb.f128, &env->fp_status)) { \ 2629 cc |= CRF_LT; \ 2630 } else if (!float128_le(xa.f128, xb.f128, &env->fp_status)) { \ 2631 cc |= CRF_GT; \ 2632 } else { \ 2633 cc |= CRF_EQ; \ 2634 } \ 2635 \ 2636 env->fpscr &= ~(0x0F << FPSCR_FPRF); \ 2637 env->fpscr |= cc << FPSCR_FPRF; \ 2638 env->crf[BF(opcode)] = cc; \ 2639 \ 2640 float_check_status(env); \ 2641 } 2642 2643 VSX_SCALAR_CMPQ(xscmpoqp, 1) 2644 VSX_SCALAR_CMPQ(xscmpuqp, 0) 2645 2646 /* VSX_MAX_MIN - VSX floating point maximum/minimum 2647 * name - instruction mnemonic 2648 * op - operation (max or min) 2649 * nels - number of elements (1, 2 or 4) 2650 * tp - type (float32 or float64) 2651 * fld - vsr_t field (VsrD(*) or VsrW(*)) 2652 */ 2653 #define VSX_MAX_MIN(name, op, nels, tp, fld) \ 2654 void helper_##name(CPUPPCState *env, uint32_t opcode) \ 2655 { \ 2656 ppc_vsr_t xt, xa, xb; \ 2657 int i; \ 2658 \ 2659 getVSR(xA(opcode), &xa, env); \ 2660 getVSR(xB(opcode), &xb, env); \ 2661 getVSR(xT(opcode), &xt, env); \ 2662 \ 2663 for (i = 0; i < nels; i++) { \ 2664 xt.fld = tp##_##op(xa.fld, xb.fld, &env->fp_status); \ 2665 if (unlikely(tp##_is_signaling_nan(xa.fld, &env->fp_status) || \ 2666 tp##_is_signaling_nan(xb.fld, &env->fp_status))) { \ 2667 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \ 2668 } \ 2669 } \ 2670 \ 2671 putVSR(xT(opcode), &xt, env); \ 2672 float_check_status(env); \ 2673 } 2674 2675 VSX_MAX_MIN(xsmaxdp, maxnum, 1, float64, VsrD(0)) 2676 VSX_MAX_MIN(xvmaxdp, maxnum, 2, float64, VsrD(i)) 2677 VSX_MAX_MIN(xvmaxsp, maxnum, 4, float32, VsrW(i)) 2678 VSX_MAX_MIN(xsmindp, minnum, 1, float64, VsrD(0)) 2679 VSX_MAX_MIN(xvmindp, minnum, 2, float64, VsrD(i)) 2680 VSX_MAX_MIN(xvminsp, minnum, 4, float32, VsrW(i)) 2681 2682 /* VSX_CMP - VSX floating point compare 2683 * op - instruction mnemonic 2684 * nels - number of elements (1, 2 or 4) 2685 * tp - type (float32 or float64) 2686 * fld - vsr_t field (VsrD(*) or VsrW(*)) 2687 * cmp - comparison operation 2688 * svxvc - set VXVC bit 2689 * exp - expected result of comparison 2690 */ 2691 #define VSX_CMP(op, nels, tp, fld, cmp, svxvc, exp) \ 2692 void helper_##op(CPUPPCState *env, uint32_t opcode) \ 2693 { \ 2694 ppc_vsr_t xt, xa, xb; \ 2695 int i; \ 2696 int all_true = 1; \ 2697 int all_false = 1; \ 2698 \ 2699 getVSR(xA(opcode), &xa, env); \ 2700 getVSR(xB(opcode), &xb, env); \ 2701 getVSR(xT(opcode), &xt, env); \ 2702 \ 2703 for (i = 0; i < nels; i++) { \ 2704 if (unlikely(tp##_is_any_nan(xa.fld) || \ 2705 tp##_is_any_nan(xb.fld))) { \ 2706 if (tp##_is_signaling_nan(xa.fld, &env->fp_status) || \ 2707 tp##_is_signaling_nan(xb.fld, &env->fp_status)) { \ 2708 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \ 2709 } \ 2710 if (svxvc) { \ 2711 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0); \ 2712 } \ 2713 xt.fld = 0; \ 2714 all_true = 0; \ 2715 } else { \ 2716 if (tp##_##cmp(xb.fld, xa.fld, &env->fp_status) == exp) { \ 2717 xt.fld = -1; \ 2718 all_false = 0; \ 2719 } else { \ 2720 xt.fld = 0; \ 2721 all_true = 0; \ 2722 } \ 2723 } \ 2724 } \ 2725 \ 2726 putVSR(xT(opcode), &xt, env); \ 2727 if ((opcode >> (31-21)) & 1) { \ 2728 env->crf[6] = (all_true ? 0x8 : 0) | (all_false ? 0x2 : 0); \ 2729 } \ 2730 float_check_status(env); \ 2731 } 2732 2733 VSX_CMP(xvcmpeqdp, 2, float64, VsrD(i), eq, 0, 1) 2734 VSX_CMP(xvcmpgedp, 2, float64, VsrD(i), le, 1, 1) 2735 VSX_CMP(xvcmpgtdp, 2, float64, VsrD(i), lt, 1, 1) 2736 VSX_CMP(xvcmpnedp, 2, float64, VsrD(i), eq, 0, 0) 2737 VSX_CMP(xvcmpeqsp, 4, float32, VsrW(i), eq, 0, 1) 2738 VSX_CMP(xvcmpgesp, 4, float32, VsrW(i), le, 1, 1) 2739 VSX_CMP(xvcmpgtsp, 4, float32, VsrW(i), lt, 1, 1) 2740 VSX_CMP(xvcmpnesp, 4, float32, VsrW(i), eq, 0, 0) 2741 2742 /* VSX_CVT_FP_TO_FP - VSX floating point/floating point conversion 2743 * op - instruction mnemonic 2744 * nels - number of elements (1, 2 or 4) 2745 * stp - source type (float32 or float64) 2746 * ttp - target type (float32 or float64) 2747 * sfld - source vsr_t field 2748 * tfld - target vsr_t field (f32 or f64) 2749 * sfprf - set FPRF 2750 */ 2751 #define VSX_CVT_FP_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf) \ 2752 void helper_##op(CPUPPCState *env, uint32_t opcode) \ 2753 { \ 2754 ppc_vsr_t xt, xb; \ 2755 int i; \ 2756 \ 2757 getVSR(xB(opcode), &xb, env); \ 2758 getVSR(xT(opcode), &xt, env); \ 2759 \ 2760 for (i = 0; i < nels; i++) { \ 2761 xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \ 2762 if (unlikely(stp##_is_signaling_nan(xb.sfld, \ 2763 &env->fp_status))) { \ 2764 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \ 2765 xt.tfld = ttp##_snan_to_qnan(xt.tfld); \ 2766 } \ 2767 if (sfprf) { \ 2768 helper_compute_fprf_##ttp(env, xt.tfld); \ 2769 } \ 2770 } \ 2771 \ 2772 putVSR(xT(opcode), &xt, env); \ 2773 float_check_status(env); \ 2774 } 2775 2776 VSX_CVT_FP_TO_FP(xscvdpsp, 1, float64, float32, VsrD(0), VsrW(0), 1) 2777 VSX_CVT_FP_TO_FP(xscvspdp, 1, float32, float64, VsrW(0), VsrD(0), 1) 2778 VSX_CVT_FP_TO_FP(xvcvdpsp, 2, float64, float32, VsrD(i), VsrW(2*i), 0) 2779 VSX_CVT_FP_TO_FP(xvcvspdp, 2, float32, float64, VsrW(2*i), VsrD(i), 0) 2780 2781 /* VSX_CVT_FP_TO_FP_VECTOR - VSX floating point/floating point conversion 2782 * op - instruction mnemonic 2783 * nels - number of elements (1, 2 or 4) 2784 * stp - source type (float32 or float64) 2785 * ttp - target type (float32 or float64) 2786 * sfld - source vsr_t field 2787 * tfld - target vsr_t field (f32 or f64) 2788 * sfprf - set FPRF 2789 */ 2790 #define VSX_CVT_FP_TO_FP_VECTOR(op, nels, stp, ttp, sfld, tfld, sfprf) \ 2791 void helper_##op(CPUPPCState *env, uint32_t opcode) \ 2792 { \ 2793 ppc_vsr_t xt, xb; \ 2794 int i; \ 2795 \ 2796 getVSR(rB(opcode) + 32, &xb, env); \ 2797 getVSR(rD(opcode) + 32, &xt, env); \ 2798 \ 2799 for (i = 0; i < nels; i++) { \ 2800 xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \ 2801 if (unlikely(stp##_is_signaling_nan(xb.sfld, \ 2802 &env->fp_status))) { \ 2803 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \ 2804 xt.tfld = ttp##_snan_to_qnan(xt.tfld); \ 2805 } \ 2806 if (sfprf) { \ 2807 helper_compute_fprf_##ttp(env, xt.tfld); \ 2808 } \ 2809 } \ 2810 \ 2811 putVSR(rD(opcode) + 32, &xt, env); \ 2812 float_check_status(env); \ 2813 } 2814 2815 VSX_CVT_FP_TO_FP_VECTOR(xscvdpqp, 1, float64, float128, VsrD(0), f128, 1) 2816 2817 /* VSX_CVT_FP_TO_FP_HP - VSX floating point/floating point conversion 2818 * involving one half precision value 2819 * op - instruction mnemonic 2820 * nels - number of elements (1, 2 or 4) 2821 * stp - source type 2822 * ttp - target type 2823 * sfld - source vsr_t field 2824 * tfld - target vsr_t field 2825 * sfprf - set FPRF 2826 */ 2827 #define VSX_CVT_FP_TO_FP_HP(op, nels, stp, ttp, sfld, tfld, sfprf) \ 2828 void helper_##op(CPUPPCState *env, uint32_t opcode) \ 2829 { \ 2830 ppc_vsr_t xt, xb; \ 2831 int i; \ 2832 \ 2833 getVSR(xB(opcode), &xb, env); \ 2834 memset(&xt, 0, sizeof(xt)); \ 2835 \ 2836 for (i = 0; i < nels; i++) { \ 2837 xt.tfld = stp##_to_##ttp(xb.sfld, 1, &env->fp_status); \ 2838 if (unlikely(stp##_is_signaling_nan(xb.sfld, \ 2839 &env->fp_status))) { \ 2840 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \ 2841 xt.tfld = ttp##_snan_to_qnan(xt.tfld); \ 2842 } \ 2843 if (sfprf) { \ 2844 helper_compute_fprf_##ttp(env, xt.tfld); \ 2845 } \ 2846 } \ 2847 \ 2848 putVSR(xT(opcode), &xt, env); \ 2849 float_check_status(env); \ 2850 } 2851 2852 VSX_CVT_FP_TO_FP_HP(xscvdphp, 1, float64, float16, VsrD(0), VsrH(3), 1) 2853 VSX_CVT_FP_TO_FP_HP(xscvhpdp, 1, float16, float64, VsrH(3), VsrD(0), 1) 2854 VSX_CVT_FP_TO_FP_HP(xvcvsphp, 4, float32, float16, VsrW(i), VsrH(2 * i + 1), 0) 2855 VSX_CVT_FP_TO_FP_HP(xvcvhpsp, 4, float16, float32, VsrH(2 * i + 1), VsrW(i), 0) 2856 2857 /* 2858 * xscvqpdp isn't using VSX_CVT_FP_TO_FP() because xscvqpdpo will be 2859 * added to this later. 2860 */ 2861 void helper_xscvqpdp(CPUPPCState *env, uint32_t opcode) 2862 { 2863 ppc_vsr_t xt, xb; 2864 2865 getVSR(rB(opcode) + 32, &xb, env); 2866 memset(&xt, 0, sizeof(xt)); 2867 2868 if (unlikely(Rc(opcode) != 0)) { 2869 /* TODO: Support xscvqpdpo after round-to-odd is implemented */ 2870 abort(); 2871 } 2872 2873 xt.VsrD(0) = float128_to_float64(xb.f128, &env->fp_status); 2874 if (unlikely(float128_is_signaling_nan(xb.f128, 2875 &env->fp_status))) { 2876 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); 2877 xt.VsrD(0) = float64_snan_to_qnan(xt.VsrD(0)); 2878 } 2879 helper_compute_fprf_float64(env, xt.VsrD(0)); 2880 2881 putVSR(rD(opcode) + 32, &xt, env); 2882 float_check_status(env); 2883 } 2884 2885 uint64_t helper_xscvdpspn(CPUPPCState *env, uint64_t xb) 2886 { 2887 float_status tstat = env->fp_status; 2888 set_float_exception_flags(0, &tstat); 2889 2890 return (uint64_t)float64_to_float32(xb, &tstat) << 32; 2891 } 2892 2893 uint64_t helper_xscvspdpn(CPUPPCState *env, uint64_t xb) 2894 { 2895 float_status tstat = env->fp_status; 2896 set_float_exception_flags(0, &tstat); 2897 2898 return float32_to_float64(xb >> 32, &tstat); 2899 } 2900 2901 /* VSX_CVT_FP_TO_INT - VSX floating point to integer conversion 2902 * op - instruction mnemonic 2903 * nels - number of elements (1, 2 or 4) 2904 * stp - source type (float32 or float64) 2905 * ttp - target type (int32, uint32, int64 or uint64) 2906 * sfld - source vsr_t field 2907 * tfld - target vsr_t field 2908 * rnan - resulting NaN 2909 */ 2910 #define VSX_CVT_FP_TO_INT(op, nels, stp, ttp, sfld, tfld, rnan) \ 2911 void helper_##op(CPUPPCState *env, uint32_t opcode) \ 2912 { \ 2913 ppc_vsr_t xt, xb; \ 2914 int i; \ 2915 \ 2916 getVSR(xB(opcode), &xb, env); \ 2917 getVSR(xT(opcode), &xt, env); \ 2918 \ 2919 for (i = 0; i < nels; i++) { \ 2920 if (unlikely(stp##_is_any_nan(xb.sfld))) { \ 2921 if (stp##_is_signaling_nan(xb.sfld, &env->fp_status)) { \ 2922 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \ 2923 } \ 2924 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0); \ 2925 xt.tfld = rnan; \ 2926 } else { \ 2927 xt.tfld = stp##_to_##ttp##_round_to_zero(xb.sfld, \ 2928 &env->fp_status); \ 2929 if (env->fp_status.float_exception_flags & float_flag_invalid) { \ 2930 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0); \ 2931 } \ 2932 } \ 2933 } \ 2934 \ 2935 putVSR(xT(opcode), &xt, env); \ 2936 float_check_status(env); \ 2937 } 2938 2939 VSX_CVT_FP_TO_INT(xscvdpsxds, 1, float64, int64, VsrD(0), VsrD(0), \ 2940 0x8000000000000000ULL) 2941 VSX_CVT_FP_TO_INT(xscvdpsxws, 1, float64, int32, VsrD(0), VsrW(1), \ 2942 0x80000000U) 2943 VSX_CVT_FP_TO_INT(xscvdpuxds, 1, float64, uint64, VsrD(0), VsrD(0), 0ULL) 2944 VSX_CVT_FP_TO_INT(xscvdpuxws, 1, float64, uint32, VsrD(0), VsrW(1), 0U) 2945 VSX_CVT_FP_TO_INT(xvcvdpsxds, 2, float64, int64, VsrD(i), VsrD(i), \ 2946 0x8000000000000000ULL) 2947 VSX_CVT_FP_TO_INT(xvcvdpsxws, 2, float64, int32, VsrD(i), VsrW(2*i), \ 2948 0x80000000U) 2949 VSX_CVT_FP_TO_INT(xvcvdpuxds, 2, float64, uint64, VsrD(i), VsrD(i), 0ULL) 2950 VSX_CVT_FP_TO_INT(xvcvdpuxws, 2, float64, uint32, VsrD(i), VsrW(2*i), 0U) 2951 VSX_CVT_FP_TO_INT(xvcvspsxds, 2, float32, int64, VsrW(2*i), VsrD(i), \ 2952 0x8000000000000000ULL) 2953 VSX_CVT_FP_TO_INT(xvcvspsxws, 4, float32, int32, VsrW(i), VsrW(i), 0x80000000U) 2954 VSX_CVT_FP_TO_INT(xvcvspuxds, 2, float32, uint64, VsrW(2*i), VsrD(i), 0ULL) 2955 VSX_CVT_FP_TO_INT(xvcvspuxws, 4, float32, uint32, VsrW(i), VsrW(i), 0U) 2956 2957 /* VSX_CVT_FP_TO_INT_VECTOR - VSX floating point to integer conversion 2958 * op - instruction mnemonic 2959 * stp - source type (float32 or float64) 2960 * ttp - target type (int32, uint32, int64 or uint64) 2961 * sfld - source vsr_t field 2962 * tfld - target vsr_t field 2963 * rnan - resulting NaN 2964 */ 2965 #define VSX_CVT_FP_TO_INT_VECTOR(op, stp, ttp, sfld, tfld, rnan) \ 2966 void helper_##op(CPUPPCState *env, uint32_t opcode) \ 2967 { \ 2968 ppc_vsr_t xt, xb; \ 2969 \ 2970 getVSR(rB(opcode) + 32, &xb, env); \ 2971 memset(&xt, 0, sizeof(xt)); \ 2972 \ 2973 if (unlikely(stp##_is_any_nan(xb.sfld))) { \ 2974 if (stp##_is_signaling_nan(xb.sfld, &env->fp_status)) { \ 2975 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \ 2976 } \ 2977 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0); \ 2978 xt.tfld = rnan; \ 2979 } else { \ 2980 xt.tfld = stp##_to_##ttp##_round_to_zero(xb.sfld, \ 2981 &env->fp_status); \ 2982 if (env->fp_status.float_exception_flags & float_flag_invalid) { \ 2983 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0); \ 2984 } \ 2985 } \ 2986 \ 2987 putVSR(rD(opcode) + 32, &xt, env); \ 2988 float_check_status(env); \ 2989 } 2990 2991 VSX_CVT_FP_TO_INT_VECTOR(xscvqpsdz, float128, int64, f128, VsrD(0), \ 2992 0x8000000000000000ULL) 2993 2994 VSX_CVT_FP_TO_INT_VECTOR(xscvqpswz, float128, int32, f128, VsrD(0), \ 2995 0xffffffff80000000ULL) 2996 2997 /* VSX_CVT_INT_TO_FP - VSX integer to floating point conversion 2998 * op - instruction mnemonic 2999 * nels - number of elements (1, 2 or 4) 3000 * stp - source type (int32, uint32, int64 or uint64) 3001 * ttp - target type (float32 or float64) 3002 * sfld - source vsr_t field 3003 * tfld - target vsr_t field 3004 * jdef - definition of the j index (i or 2*i) 3005 * sfprf - set FPRF 3006 */ 3007 #define VSX_CVT_INT_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf, r2sp) \ 3008 void helper_##op(CPUPPCState *env, uint32_t opcode) \ 3009 { \ 3010 ppc_vsr_t xt, xb; \ 3011 int i; \ 3012 \ 3013 getVSR(xB(opcode), &xb, env); \ 3014 getVSR(xT(opcode), &xt, env); \ 3015 \ 3016 for (i = 0; i < nels; i++) { \ 3017 xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \ 3018 if (r2sp) { \ 3019 xt.tfld = helper_frsp(env, xt.tfld); \ 3020 } \ 3021 if (sfprf) { \ 3022 helper_compute_fprf_float64(env, xt.tfld); \ 3023 } \ 3024 } \ 3025 \ 3026 putVSR(xT(opcode), &xt, env); \ 3027 float_check_status(env); \ 3028 } 3029 3030 VSX_CVT_INT_TO_FP(xscvsxddp, 1, int64, float64, VsrD(0), VsrD(0), 1, 0) 3031 VSX_CVT_INT_TO_FP(xscvuxddp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 0) 3032 VSX_CVT_INT_TO_FP(xscvsxdsp, 1, int64, float64, VsrD(0), VsrD(0), 1, 1) 3033 VSX_CVT_INT_TO_FP(xscvuxdsp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 1) 3034 VSX_CVT_INT_TO_FP(xvcvsxddp, 2, int64, float64, VsrD(i), VsrD(i), 0, 0) 3035 VSX_CVT_INT_TO_FP(xvcvuxddp, 2, uint64, float64, VsrD(i), VsrD(i), 0, 0) 3036 VSX_CVT_INT_TO_FP(xvcvsxwdp, 2, int32, float64, VsrW(2*i), VsrD(i), 0, 0) 3037 VSX_CVT_INT_TO_FP(xvcvuxwdp, 2, uint64, float64, VsrW(2*i), VsrD(i), 0, 0) 3038 VSX_CVT_INT_TO_FP(xvcvsxdsp, 2, int64, float32, VsrD(i), VsrW(2*i), 0, 0) 3039 VSX_CVT_INT_TO_FP(xvcvuxdsp, 2, uint64, float32, VsrD(i), VsrW(2*i), 0, 0) 3040 VSX_CVT_INT_TO_FP(xvcvsxwsp, 4, int32, float32, VsrW(i), VsrW(i), 0, 0) 3041 VSX_CVT_INT_TO_FP(xvcvuxwsp, 4, uint32, float32, VsrW(i), VsrW(i), 0, 0) 3042 3043 /* VSX_CVT_INT_TO_FP_VECTOR - VSX integer to floating point conversion 3044 * op - instruction mnemonic 3045 * stp - source type (int32, uint32, int64 or uint64) 3046 * ttp - target type (float32 or float64) 3047 * sfld - source vsr_t field 3048 * tfld - target vsr_t field 3049 */ 3050 #define VSX_CVT_INT_TO_FP_VECTOR(op, stp, ttp, sfld, tfld) \ 3051 void helper_##op(CPUPPCState *env, uint32_t opcode) \ 3052 { \ 3053 ppc_vsr_t xt, xb; \ 3054 \ 3055 getVSR(rB(opcode) + 32, &xb, env); \ 3056 getVSR(rD(opcode) + 32, &xt, env); \ 3057 \ 3058 xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \ 3059 helper_compute_fprf_##ttp(env, xt.tfld); \ 3060 \ 3061 putVSR(xT(opcode) + 32, &xt, env); \ 3062 float_check_status(env); \ 3063 } 3064 3065 VSX_CVT_INT_TO_FP_VECTOR(xscvsdqp, int64, float128, VsrD(0), f128) 3066 VSX_CVT_INT_TO_FP_VECTOR(xscvudqp, uint64, float128, VsrD(0), f128) 3067 3068 /* For "use current rounding mode", define a value that will not be one of 3069 * the existing rounding model enums. 3070 */ 3071 #define FLOAT_ROUND_CURRENT (float_round_nearest_even + float_round_down + \ 3072 float_round_up + float_round_to_zero) 3073 3074 /* VSX_ROUND - VSX floating point round 3075 * op - instruction mnemonic 3076 * nels - number of elements (1, 2 or 4) 3077 * tp - type (float32 or float64) 3078 * fld - vsr_t field (VsrD(*) or VsrW(*)) 3079 * rmode - rounding mode 3080 * sfprf - set FPRF 3081 */ 3082 #define VSX_ROUND(op, nels, tp, fld, rmode, sfprf) \ 3083 void helper_##op(CPUPPCState *env, uint32_t opcode) \ 3084 { \ 3085 ppc_vsr_t xt, xb; \ 3086 int i; \ 3087 getVSR(xB(opcode), &xb, env); \ 3088 getVSR(xT(opcode), &xt, env); \ 3089 \ 3090 if (rmode != FLOAT_ROUND_CURRENT) { \ 3091 set_float_rounding_mode(rmode, &env->fp_status); \ 3092 } \ 3093 \ 3094 for (i = 0; i < nels; i++) { \ 3095 if (unlikely(tp##_is_signaling_nan(xb.fld, \ 3096 &env->fp_status))) { \ 3097 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \ 3098 xt.fld = tp##_snan_to_qnan(xb.fld); \ 3099 } else { \ 3100 xt.fld = tp##_round_to_int(xb.fld, &env->fp_status); \ 3101 } \ 3102 if (sfprf) { \ 3103 helper_compute_fprf_float64(env, xt.fld); \ 3104 } \ 3105 } \ 3106 \ 3107 /* If this is not a "use current rounding mode" instruction, \ 3108 * then inhibit setting of the XX bit and restore rounding \ 3109 * mode from FPSCR */ \ 3110 if (rmode != FLOAT_ROUND_CURRENT) { \ 3111 fpscr_set_rounding_mode(env); \ 3112 env->fp_status.float_exception_flags &= ~float_flag_inexact; \ 3113 } \ 3114 \ 3115 putVSR(xT(opcode), &xt, env); \ 3116 float_check_status(env); \ 3117 } 3118 3119 VSX_ROUND(xsrdpi, 1, float64, VsrD(0), float_round_ties_away, 1) 3120 VSX_ROUND(xsrdpic, 1, float64, VsrD(0), FLOAT_ROUND_CURRENT, 1) 3121 VSX_ROUND(xsrdpim, 1, float64, VsrD(0), float_round_down, 1) 3122 VSX_ROUND(xsrdpip, 1, float64, VsrD(0), float_round_up, 1) 3123 VSX_ROUND(xsrdpiz, 1, float64, VsrD(0), float_round_to_zero, 1) 3124 3125 VSX_ROUND(xvrdpi, 2, float64, VsrD(i), float_round_ties_away, 0) 3126 VSX_ROUND(xvrdpic, 2, float64, VsrD(i), FLOAT_ROUND_CURRENT, 0) 3127 VSX_ROUND(xvrdpim, 2, float64, VsrD(i), float_round_down, 0) 3128 VSX_ROUND(xvrdpip, 2, float64, VsrD(i), float_round_up, 0) 3129 VSX_ROUND(xvrdpiz, 2, float64, VsrD(i), float_round_to_zero, 0) 3130 3131 VSX_ROUND(xvrspi, 4, float32, VsrW(i), float_round_ties_away, 0) 3132 VSX_ROUND(xvrspic, 4, float32, VsrW(i), FLOAT_ROUND_CURRENT, 0) 3133 VSX_ROUND(xvrspim, 4, float32, VsrW(i), float_round_down, 0) 3134 VSX_ROUND(xvrspip, 4, float32, VsrW(i), float_round_up, 0) 3135 VSX_ROUND(xvrspiz, 4, float32, VsrW(i), float_round_to_zero, 0) 3136 3137 uint64_t helper_xsrsp(CPUPPCState *env, uint64_t xb) 3138 { 3139 helper_reset_fpstatus(env); 3140 3141 uint64_t xt = helper_frsp(env, xb); 3142 3143 helper_compute_fprf_float64(env, xt); 3144 float_check_status(env); 3145 return xt; 3146 } 3147 3148 #define VSX_XXPERM(op, indexed) \ 3149 void helper_##op(CPUPPCState *env, uint32_t opcode) \ 3150 { \ 3151 ppc_vsr_t xt, xa, pcv, xto; \ 3152 int i, idx; \ 3153 \ 3154 getVSR(xA(opcode), &xa, env); \ 3155 getVSR(xT(opcode), &xt, env); \ 3156 getVSR(xB(opcode), &pcv, env); \ 3157 \ 3158 for (i = 0; i < 16; i++) { \ 3159 idx = pcv.VsrB(i) & 0x1F; \ 3160 if (indexed) { \ 3161 idx = 31 - idx; \ 3162 } \ 3163 xto.VsrB(i) = (idx <= 15) ? xa.VsrB(idx) : xt.VsrB(idx - 16); \ 3164 } \ 3165 putVSR(xT(opcode), &xto, env); \ 3166 } 3167 3168 VSX_XXPERM(xxperm, 0) 3169 VSX_XXPERM(xxpermr, 1) 3170 3171 void helper_xvxsigsp(CPUPPCState *env, uint32_t opcode) 3172 { 3173 ppc_vsr_t xt, xb; 3174 uint32_t exp, i, fraction; 3175 3176 getVSR(xB(opcode), &xb, env); 3177 memset(&xt, 0, sizeof(xt)); 3178 3179 for (i = 0; i < 4; i++) { 3180 exp = (xb.VsrW(i) >> 23) & 0xFF; 3181 fraction = xb.VsrW(i) & 0x7FFFFF; 3182 if (exp != 0 && exp != 255) { 3183 xt.VsrW(i) = fraction | 0x00800000; 3184 } else { 3185 xt.VsrW(i) = fraction; 3186 } 3187 } 3188 putVSR(xT(opcode), &xt, env); 3189 } 3190 3191 /* VSX_TEST_DC - VSX floating point test data class 3192 * op - instruction mnemonic 3193 * nels - number of elements (1, 2 or 4) 3194 * xbn - VSR register number 3195 * tp - type (float32 or float64) 3196 * fld - vsr_t field (VsrD(*) or VsrW(*)) 3197 * tfld - target vsr_t field (VsrD(*) or VsrW(*)) 3198 * fld_max - target field max 3199 * scrf - set result in CR and FPCC 3200 */ 3201 #define VSX_TEST_DC(op, nels, xbn, tp, fld, tfld, fld_max, scrf) \ 3202 void helper_##op(CPUPPCState *env, uint32_t opcode) \ 3203 { \ 3204 ppc_vsr_t xt, xb; \ 3205 uint32_t i, sign, dcmx; \ 3206 uint32_t cc, match = 0; \ 3207 \ 3208 getVSR(xbn, &xb, env); \ 3209 if (!scrf) { \ 3210 memset(&xt, 0, sizeof(xt)); \ 3211 dcmx = DCMX_XV(opcode); \ 3212 } else { \ 3213 dcmx = DCMX(opcode); \ 3214 } \ 3215 \ 3216 for (i = 0; i < nels; i++) { \ 3217 sign = tp##_is_neg(xb.fld); \ 3218 if (tp##_is_any_nan(xb.fld)) { \ 3219 match = extract32(dcmx, 6, 1); \ 3220 } else if (tp##_is_infinity(xb.fld)) { \ 3221 match = extract32(dcmx, 4 + !sign, 1); \ 3222 } else if (tp##_is_zero(xb.fld)) { \ 3223 match = extract32(dcmx, 2 + !sign, 1); \ 3224 } else if (tp##_is_zero_or_denormal(xb.fld)) { \ 3225 match = extract32(dcmx, 0 + !sign, 1); \ 3226 } \ 3227 \ 3228 if (scrf) { \ 3229 cc = sign << CRF_LT_BIT | match << CRF_EQ_BIT; \ 3230 env->fpscr &= ~(0x0F << FPSCR_FPRF); \ 3231 env->fpscr |= cc << FPSCR_FPRF; \ 3232 env->crf[BF(opcode)] = cc; \ 3233 } else { \ 3234 xt.tfld = match ? fld_max : 0; \ 3235 } \ 3236 match = 0; \ 3237 } \ 3238 if (!scrf) { \ 3239 putVSR(xT(opcode), &xt, env); \ 3240 } \ 3241 } 3242 3243 VSX_TEST_DC(xvtstdcdp, 2, xB(opcode), float64, VsrD(i), VsrD(i), UINT64_MAX, 0) 3244 VSX_TEST_DC(xvtstdcsp, 4, xB(opcode), float32, VsrW(i), VsrW(i), UINT32_MAX, 0) 3245 VSX_TEST_DC(xststdcdp, 1, xB(opcode), float64, VsrD(0), VsrD(0), 0, 1) 3246 VSX_TEST_DC(xststdcqp, 1, (rB(opcode) + 32), float128, f128, VsrD(0), 0, 1) 3247 3248 void helper_xststdcsp(CPUPPCState *env, uint32_t opcode) 3249 { 3250 ppc_vsr_t xb; 3251 uint32_t dcmx, sign, exp; 3252 uint32_t cc, match = 0, not_sp = 0; 3253 3254 getVSR(xB(opcode), &xb, env); 3255 dcmx = DCMX(opcode); 3256 exp = (xb.VsrD(0) >> 52) & 0x7FF; 3257 3258 sign = float64_is_neg(xb.VsrD(0)); 3259 if (float64_is_any_nan(xb.VsrD(0))) { 3260 match = extract32(dcmx, 6, 1); 3261 } else if (float64_is_infinity(xb.VsrD(0))) { 3262 match = extract32(dcmx, 4 + !sign, 1); 3263 } else if (float64_is_zero(xb.VsrD(0))) { 3264 match = extract32(dcmx, 2 + !sign, 1); 3265 } else if (float64_is_zero_or_denormal(xb.VsrD(0)) || 3266 (exp > 0 && exp < 0x381)) { 3267 match = extract32(dcmx, 0 + !sign, 1); 3268 } 3269 3270 not_sp = !float64_eq(xb.VsrD(0), 3271 float32_to_float64( 3272 float64_to_float32(xb.VsrD(0), &env->fp_status), 3273 &env->fp_status), &env->fp_status); 3274 3275 cc = sign << CRF_LT_BIT | match << CRF_EQ_BIT | not_sp << CRF_SO_BIT; 3276 env->fpscr &= ~(0x0F << FPSCR_FPRF); 3277 env->fpscr |= cc << FPSCR_FPRF; 3278 env->crf[BF(opcode)] = cc; 3279 } 3280