xref: /openbmc/qemu/target/ppc/fpu_helper.c (revision de799beb)
1 /*
2  *  PowerPC floating point and SPE emulation helpers for QEMU.
3  *
4  *  Copyright (c) 2003-2007 Jocelyn Mayer
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 #include "cpu.h"
21 #include "exec/helper-proto.h"
22 #include "exec/exec-all.h"
23 #include "internal.h"
24 #include "fpu/softfloat.h"
25 
26 static inline float128 float128_snan_to_qnan(float128 x)
27 {
28     float128 r;
29 
30     r.high = x.high | 0x0000800000000000;
31     r.low = x.low;
32     return r;
33 }
34 
35 #define float64_snan_to_qnan(x) ((x) | 0x0008000000000000ULL)
36 #define float32_snan_to_qnan(x) ((x) | 0x00400000)
37 #define float16_snan_to_qnan(x) ((x) | 0x0200)
38 
39 static inline float32 bfp32_neg(float32 a)
40 {
41     if (unlikely(float32_is_any_nan(a))) {
42         return a;
43     } else {
44         return float32_chs(a);
45     }
46 }
47 
48 static inline bool fp_exceptions_enabled(CPUPPCState *env)
49 {
50 #ifdef CONFIG_USER_ONLY
51     return true;
52 #else
53     return (env->msr & ((1U << MSR_FE0) | (1U << MSR_FE1))) != 0;
54 #endif
55 }
56 
57 /*****************************************************************************/
58 /* Floating point operations helpers */
59 
60 /*
61  * This is the non-arithmatic conversion that happens e.g. on loads.
62  * In the Power ISA pseudocode, this is called DOUBLE.
63  */
64 uint64_t helper_todouble(uint32_t arg)
65 {
66     uint32_t abs_arg = arg & 0x7fffffff;
67     uint64_t ret;
68 
69     if (likely(abs_arg >= 0x00800000)) {
70         if (unlikely(extract32(arg, 23, 8) == 0xff)) {
71             /* Inf or NAN.  */
72             ret  = (uint64_t)extract32(arg, 31, 1) << 63;
73             ret |= (uint64_t)0x7ff << 52;
74             ret |= (uint64_t)extract32(arg, 0, 23) << 29;
75         } else {
76             /* Normalized operand.  */
77             ret  = (uint64_t)extract32(arg, 30, 2) << 62;
78             ret |= ((extract32(arg, 30, 1) ^ 1) * (uint64_t)7) << 59;
79             ret |= (uint64_t)extract32(arg, 0, 30) << 29;
80         }
81     } else {
82         /* Zero or Denormalized operand.  */
83         ret = (uint64_t)extract32(arg, 31, 1) << 63;
84         if (unlikely(abs_arg != 0)) {
85             /*
86              * Denormalized operand.
87              * Shift fraction so that the msb is in the implicit bit position.
88              * Thus, shift is in the range [1:23].
89              */
90             int shift = clz32(abs_arg) - 8;
91             /*
92              * The first 3 terms compute the float64 exponent.  We then bias
93              * this result by -1 so that we can swallow the implicit bit below.
94              */
95             int exp = -126 - shift + 1023 - 1;
96 
97             ret |= (uint64_t)exp << 52;
98             ret += (uint64_t)abs_arg << (52 - 23 + shift);
99         }
100     }
101     return ret;
102 }
103 
104 /*
105  * This is the non-arithmatic conversion that happens e.g. on stores.
106  * In the Power ISA pseudocode, this is called SINGLE.
107  */
108 uint32_t helper_tosingle(uint64_t arg)
109 {
110     int exp = extract64(arg, 52, 11);
111     uint32_t ret;
112 
113     if (likely(exp > 896)) {
114         /* No denormalization required (includes Inf, NaN).  */
115         ret  = extract64(arg, 62, 2) << 30;
116         ret |= extract64(arg, 29, 30);
117     } else {
118         /*
119          * Zero or Denormal result.  If the exponent is in bounds for
120          * a single-precision denormal result, extract the proper
121          * bits.  If the input is not zero, and the exponent is out of
122          * bounds, then the result is undefined; this underflows to
123          * zero.
124          */
125         ret = extract64(arg, 63, 1) << 31;
126         if (unlikely(exp >= 874)) {
127             /* Denormal result.  */
128             ret |= ((1ULL << 52) | extract64(arg, 0, 52)) >> (896 + 30 - exp);
129         }
130     }
131     return ret;
132 }
133 
134 static inline int ppc_float32_get_unbiased_exp(float32 f)
135 {
136     return ((f >> 23) & 0xFF) - 127;
137 }
138 
139 static inline int ppc_float64_get_unbiased_exp(float64 f)
140 {
141     return ((f >> 52) & 0x7FF) - 1023;
142 }
143 
144 /* Classify a floating-point number.  */
145 enum {
146     is_normal   = 1,
147     is_zero     = 2,
148     is_denormal = 4,
149     is_inf      = 8,
150     is_qnan     = 16,
151     is_snan     = 32,
152     is_neg      = 64,
153 };
154 
155 #define COMPUTE_CLASS(tp)                                      \
156 static int tp##_classify(tp arg)                               \
157 {                                                              \
158     int ret = tp##_is_neg(arg) * is_neg;                       \
159     if (unlikely(tp##_is_any_nan(arg))) {                      \
160         float_status dummy = { };  /* snan_bit_is_one = 0 */   \
161         ret |= (tp##_is_signaling_nan(arg, &dummy)             \
162                 ? is_snan : is_qnan);                          \
163     } else if (unlikely(tp##_is_infinity(arg))) {              \
164         ret |= is_inf;                                         \
165     } else if (tp##_is_zero(arg)) {                            \
166         ret |= is_zero;                                        \
167     } else if (tp##_is_zero_or_denormal(arg)) {                \
168         ret |= is_denormal;                                    \
169     } else {                                                   \
170         ret |= is_normal;                                      \
171     }                                                          \
172     return ret;                                                \
173 }
174 
175 COMPUTE_CLASS(float16)
176 COMPUTE_CLASS(float32)
177 COMPUTE_CLASS(float64)
178 COMPUTE_CLASS(float128)
179 
180 static void set_fprf_from_class(CPUPPCState *env, int class)
181 {
182     static const uint8_t fprf[6][2] = {
183         { 0x04, 0x08 },  /* normalized */
184         { 0x02, 0x12 },  /* zero */
185         { 0x14, 0x18 },  /* denormalized */
186         { 0x05, 0x09 },  /* infinity */
187         { 0x11, 0x11 },  /* qnan */
188         { 0x00, 0x00 },  /* snan -- flags are undefined */
189     };
190     bool isneg = class & is_neg;
191 
192     env->fpscr &= ~FP_FPRF;
193     env->fpscr |= fprf[ctz32(class)][isneg] << FPSCR_FPRF;
194 }
195 
196 #define COMPUTE_FPRF(tp)                                \
197 void helper_compute_fprf_##tp(CPUPPCState *env, tp arg) \
198 {                                                       \
199     set_fprf_from_class(env, tp##_classify(arg));       \
200 }
201 
202 COMPUTE_FPRF(float16)
203 COMPUTE_FPRF(float32)
204 COMPUTE_FPRF(float64)
205 COMPUTE_FPRF(float128)
206 
207 /* Floating-point invalid operations exception */
208 static void finish_invalid_op_excp(CPUPPCState *env, int op, uintptr_t retaddr)
209 {
210     /* Update the floating-point invalid operation summary */
211     env->fpscr |= FP_VX;
212     /* Update the floating-point exception summary */
213     env->fpscr |= FP_FX;
214     if (env->fpscr & FP_VE) {
215         /* Update the floating-point enabled exception summary */
216         env->fpscr |= FP_FEX;
217         if (fp_exceptions_enabled(env)) {
218             raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
219                                    POWERPC_EXCP_FP | op, retaddr);
220         }
221     }
222 }
223 
224 static void finish_invalid_op_arith(CPUPPCState *env, int op,
225                                     bool set_fpcc, uintptr_t retaddr)
226 {
227     env->fpscr &= ~(FP_FR | FP_FI);
228     if (!(env->fpscr & FP_VE)) {
229         if (set_fpcc) {
230             env->fpscr &= ~FP_FPCC;
231             env->fpscr |= (FP_C | FP_FU);
232         }
233     }
234     finish_invalid_op_excp(env, op, retaddr);
235 }
236 
237 /* Signalling NaN */
238 static void float_invalid_op_vxsnan(CPUPPCState *env, uintptr_t retaddr)
239 {
240     env->fpscr |= FP_VXSNAN;
241     finish_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, retaddr);
242 }
243 
244 /* Magnitude subtraction of infinities */
245 static void float_invalid_op_vxisi(CPUPPCState *env, bool set_fpcc,
246                                    uintptr_t retaddr)
247 {
248     env->fpscr |= FP_VXISI;
249     finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXISI, set_fpcc, retaddr);
250 }
251 
252 /* Division of infinity by infinity */
253 static void float_invalid_op_vxidi(CPUPPCState *env, bool set_fpcc,
254                                    uintptr_t retaddr)
255 {
256     env->fpscr |= FP_VXIDI;
257     finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXIDI, set_fpcc, retaddr);
258 }
259 
260 /* Division of zero by zero */
261 static void float_invalid_op_vxzdz(CPUPPCState *env, bool set_fpcc,
262                                    uintptr_t retaddr)
263 {
264     env->fpscr |= FP_VXZDZ;
265     finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXZDZ, set_fpcc, retaddr);
266 }
267 
268 /* Multiplication of zero by infinity */
269 static void float_invalid_op_vximz(CPUPPCState *env, bool set_fpcc,
270                                    uintptr_t retaddr)
271 {
272     env->fpscr |= FP_VXIMZ;
273     finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXIMZ, set_fpcc, retaddr);
274 }
275 
276 /* Square root of a negative number */
277 static void float_invalid_op_vxsqrt(CPUPPCState *env, bool set_fpcc,
278                                     uintptr_t retaddr)
279 {
280     env->fpscr |= FP_VXSQRT;
281     finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXSQRT, set_fpcc, retaddr);
282 }
283 
284 /* Ordered comparison of NaN */
285 static void float_invalid_op_vxvc(CPUPPCState *env, bool set_fpcc,
286                                   uintptr_t retaddr)
287 {
288     env->fpscr |= FP_VXVC;
289     if (set_fpcc) {
290         env->fpscr &= ~FP_FPCC;
291         env->fpscr |= (FP_C | FP_FU);
292     }
293     /* Update the floating-point invalid operation summary */
294     env->fpscr |= FP_VX;
295     /* Update the floating-point exception summary */
296     env->fpscr |= FP_FX;
297     /* We must update the target FPR before raising the exception */
298     if (env->fpscr & FP_VE) {
299         CPUState *cs = env_cpu(env);
300 
301         cs->exception_index = POWERPC_EXCP_PROGRAM;
302         env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
303         /* Update the floating-point enabled exception summary */
304         env->fpscr |= FP_FEX;
305         /* Exception is deferred */
306     }
307 }
308 
309 /* Invalid conversion */
310 static void float_invalid_op_vxcvi(CPUPPCState *env, bool set_fpcc,
311                                    uintptr_t retaddr)
312 {
313     env->fpscr |= FP_VXCVI;
314     env->fpscr &= ~(FP_FR | FP_FI);
315     if (!(env->fpscr & FP_VE)) {
316         if (set_fpcc) {
317             env->fpscr &= ~FP_FPCC;
318             env->fpscr |= (FP_C | FP_FU);
319         }
320     }
321     finish_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, retaddr);
322 }
323 
324 static inline void float_zero_divide_excp(CPUPPCState *env, uintptr_t raddr)
325 {
326     env->fpscr |= FP_ZX;
327     env->fpscr &= ~(FP_FR | FP_FI);
328     /* Update the floating-point exception summary */
329     env->fpscr |= FP_FX;
330     if (env->fpscr & FP_ZE) {
331         /* Update the floating-point enabled exception summary */
332         env->fpscr |= FP_FEX;
333         if (fp_exceptions_enabled(env)) {
334             raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
335                                    POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX,
336                                    raddr);
337         }
338     }
339 }
340 
341 static inline int float_overflow_excp(CPUPPCState *env)
342 {
343     CPUState *cs = env_cpu(env);
344 
345     env->fpscr |= FP_OX;
346     /* Update the floating-point exception summary */
347     env->fpscr |= FP_FX;
348 
349     bool overflow_enabled = !!(env->fpscr & FP_OE);
350     if (overflow_enabled) {
351         /* XXX: should adjust the result */
352         /* Update the floating-point enabled exception summary */
353         env->fpscr |= FP_FEX;
354         /* We must update the target FPR before raising the exception */
355         cs->exception_index = POWERPC_EXCP_PROGRAM;
356         env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
357     }
358 
359     return overflow_enabled ? 0 : float_flag_inexact;
360 }
361 
362 static inline void float_underflow_excp(CPUPPCState *env)
363 {
364     CPUState *cs = env_cpu(env);
365 
366     env->fpscr |= FP_UX;
367     /* Update the floating-point exception summary */
368     env->fpscr |= FP_FX;
369     if (env->fpscr & FP_UE) {
370         /* XXX: should adjust the result */
371         /* Update the floating-point enabled exception summary */
372         env->fpscr |= FP_FEX;
373         /* We must update the target FPR before raising the exception */
374         cs->exception_index = POWERPC_EXCP_PROGRAM;
375         env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
376     }
377 }
378 
379 static inline void float_inexact_excp(CPUPPCState *env)
380 {
381     CPUState *cs = env_cpu(env);
382 
383     env->fpscr |= FP_XX;
384     /* Update the floating-point exception summary */
385     env->fpscr |= FP_FX;
386     if (env->fpscr & FP_XE) {
387         /* Update the floating-point enabled exception summary */
388         env->fpscr |= FP_FEX;
389         /* We must update the target FPR before raising the exception */
390         cs->exception_index = POWERPC_EXCP_PROGRAM;
391         env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
392     }
393 }
394 
395 void helper_fpscr_clrbit(CPUPPCState *env, uint32_t bit)
396 {
397     uint32_t mask = 1u << bit;
398     if (env->fpscr & mask) {
399         ppc_store_fpscr(env, env->fpscr & ~(target_ulong)mask);
400     }
401 }
402 
403 void helper_fpscr_setbit(CPUPPCState *env, uint32_t bit)
404 {
405     uint32_t mask = 1u << bit;
406     if (!(env->fpscr & mask)) {
407         ppc_store_fpscr(env, env->fpscr | mask);
408     }
409 }
410 
411 void helper_store_fpscr(CPUPPCState *env, uint64_t val, uint32_t nibbles)
412 {
413     target_ulong mask = 0;
414     int i;
415 
416     /* TODO: push this extension back to translation time */
417     for (i = 0; i < sizeof(target_ulong) * 2; i++) {
418         if (nibbles & (1 << i)) {
419             mask |= (target_ulong) 0xf << (4 * i);
420         }
421     }
422     val = (val & mask) | (env->fpscr & ~mask);
423     ppc_store_fpscr(env, val);
424 }
425 
426 static void do_fpscr_check_status(CPUPPCState *env, uintptr_t raddr)
427 {
428     CPUState *cs = env_cpu(env);
429     target_ulong fpscr = env->fpscr;
430     int error = 0;
431 
432     if ((fpscr & FP_OX) && (fpscr & FP_OE)) {
433         error = POWERPC_EXCP_FP_OX;
434     } else if ((fpscr & FP_UX) && (fpscr & FP_UE)) {
435         error = POWERPC_EXCP_FP_UX;
436     } else if ((fpscr & FP_XX) && (fpscr & FP_XE)) {
437         error = POWERPC_EXCP_FP_XX;
438     } else if ((fpscr & FP_ZX) && (fpscr & FP_ZE)) {
439         error = POWERPC_EXCP_FP_ZX;
440     } else if (fpscr & FP_VE) {
441         if (fpscr & FP_VXSOFT) {
442             error = POWERPC_EXCP_FP_VXSOFT;
443         } else if (fpscr & FP_VXSNAN) {
444             error = POWERPC_EXCP_FP_VXSNAN;
445         } else if (fpscr & FP_VXISI) {
446             error = POWERPC_EXCP_FP_VXISI;
447         } else if (fpscr & FP_VXIDI) {
448             error = POWERPC_EXCP_FP_VXIDI;
449         } else if (fpscr & FP_VXZDZ) {
450             error = POWERPC_EXCP_FP_VXZDZ;
451         } else if (fpscr & FP_VXIMZ) {
452             error = POWERPC_EXCP_FP_VXIMZ;
453         } else if (fpscr & FP_VXVC) {
454             error = POWERPC_EXCP_FP_VXVC;
455         } else if (fpscr & FP_VXSQRT) {
456             error = POWERPC_EXCP_FP_VXSQRT;
457         } else if (fpscr & FP_VXCVI) {
458             error = POWERPC_EXCP_FP_VXCVI;
459         } else {
460             return;
461         }
462     } else {
463         return;
464     }
465     cs->exception_index = POWERPC_EXCP_PROGRAM;
466     env->error_code = error | POWERPC_EXCP_FP;
467     env->fpscr |= error ? FP_FEX : 0;
468     /* Deferred floating-point exception after target FPSCR update */
469     if (fp_exceptions_enabled(env)) {
470         raise_exception_err_ra(env, cs->exception_index,
471                                env->error_code, raddr);
472     }
473 }
474 
475 void helper_fpscr_check_status(CPUPPCState *env)
476 {
477     do_fpscr_check_status(env, GETPC());
478 }
479 
480 static void do_float_check_status(CPUPPCState *env, bool change_fi,
481                                   uintptr_t raddr)
482 {
483     CPUState *cs = env_cpu(env);
484     int status = get_float_exception_flags(&env->fp_status);
485 
486     if (status & float_flag_overflow) {
487         status |= float_overflow_excp(env);
488     } else if (status & float_flag_underflow) {
489         float_underflow_excp(env);
490     }
491     if (status & float_flag_inexact) {
492         float_inexact_excp(env);
493     }
494     if (change_fi) {
495         env->fpscr = FIELD_DP64(env->fpscr, FPSCR, FI,
496                                 !!(status & float_flag_inexact));
497     }
498 
499     if (cs->exception_index == POWERPC_EXCP_PROGRAM &&
500         (env->error_code & POWERPC_EXCP_FP)) {
501         /* Deferred floating-point exception after target FPR update */
502         if (fp_exceptions_enabled(env)) {
503             raise_exception_err_ra(env, cs->exception_index,
504                                    env->error_code, raddr);
505         }
506     }
507 }
508 
509 void helper_float_check_status(CPUPPCState *env)
510 {
511     do_float_check_status(env, true, GETPC());
512 }
513 
514 void helper_reset_fpstatus(CPUPPCState *env)
515 {
516     set_float_exception_flags(0, &env->fp_status);
517 }
518 
519 static void float_invalid_op_addsub(CPUPPCState *env, int flags,
520                                     bool set_fpcc, uintptr_t retaddr)
521 {
522     if (flags & float_flag_invalid_isi) {
523         float_invalid_op_vxisi(env, set_fpcc, retaddr);
524     } else if (flags & float_flag_invalid_snan) {
525         float_invalid_op_vxsnan(env, retaddr);
526     }
527 }
528 
529 /* fadd - fadd. */
530 float64 helper_fadd(CPUPPCState *env, float64 arg1, float64 arg2)
531 {
532     float64 ret = float64_add(arg1, arg2, &env->fp_status);
533     int flags = get_float_exception_flags(&env->fp_status);
534 
535     if (unlikely(flags & float_flag_invalid)) {
536         float_invalid_op_addsub(env, flags, 1, GETPC());
537     }
538 
539     return ret;
540 }
541 
542 /* fadds - fadds. */
543 float64 helper_fadds(CPUPPCState *env, float64 arg1, float64 arg2)
544 {
545     float64 ret = float64r32_add(arg1, arg2, &env->fp_status);
546     int flags = get_float_exception_flags(&env->fp_status);
547 
548     if (unlikely(flags & float_flag_invalid)) {
549         float_invalid_op_addsub(env, flags, 1, GETPC());
550     }
551     return ret;
552 }
553 
554 /* fsub - fsub. */
555 float64 helper_fsub(CPUPPCState *env, float64 arg1, float64 arg2)
556 {
557     float64 ret = float64_sub(arg1, arg2, &env->fp_status);
558     int flags = get_float_exception_flags(&env->fp_status);
559 
560     if (unlikely(flags & float_flag_invalid)) {
561         float_invalid_op_addsub(env, flags, 1, GETPC());
562     }
563 
564     return ret;
565 }
566 
567 /* fsubs - fsubs. */
568 float64 helper_fsubs(CPUPPCState *env, float64 arg1, float64 arg2)
569 {
570     float64 ret = float64r32_sub(arg1, arg2, &env->fp_status);
571     int flags = get_float_exception_flags(&env->fp_status);
572 
573     if (unlikely(flags & float_flag_invalid)) {
574         float_invalid_op_addsub(env, flags, 1, GETPC());
575     }
576     return ret;
577 }
578 
579 static void float_invalid_op_mul(CPUPPCState *env, int flags,
580                                  bool set_fprc, uintptr_t retaddr)
581 {
582     if (flags & float_flag_invalid_imz) {
583         float_invalid_op_vximz(env, set_fprc, retaddr);
584     } else if (flags & float_flag_invalid_snan) {
585         float_invalid_op_vxsnan(env, retaddr);
586     }
587 }
588 
589 /* fmul - fmul. */
590 float64 helper_fmul(CPUPPCState *env, float64 arg1, float64 arg2)
591 {
592     float64 ret = float64_mul(arg1, arg2, &env->fp_status);
593     int flags = get_float_exception_flags(&env->fp_status);
594 
595     if (unlikely(flags & float_flag_invalid)) {
596         float_invalid_op_mul(env, flags, 1, GETPC());
597     }
598 
599     return ret;
600 }
601 
602 /* fmuls - fmuls. */
603 float64 helper_fmuls(CPUPPCState *env, float64 arg1, float64 arg2)
604 {
605     float64 ret = float64r32_mul(arg1, arg2, &env->fp_status);
606     int flags = get_float_exception_flags(&env->fp_status);
607 
608     if (unlikely(flags & float_flag_invalid)) {
609         float_invalid_op_mul(env, flags, 1, GETPC());
610     }
611     return ret;
612 }
613 
614 static void float_invalid_op_div(CPUPPCState *env, int flags,
615                                  bool set_fprc, uintptr_t retaddr)
616 {
617     if (flags & float_flag_invalid_idi) {
618         float_invalid_op_vxidi(env, set_fprc, retaddr);
619     } else if (flags & float_flag_invalid_zdz) {
620         float_invalid_op_vxzdz(env, set_fprc, retaddr);
621     } else if (flags & float_flag_invalid_snan) {
622         float_invalid_op_vxsnan(env, retaddr);
623     }
624 }
625 
626 /* fdiv - fdiv. */
627 float64 helper_fdiv(CPUPPCState *env, float64 arg1, float64 arg2)
628 {
629     float64 ret = float64_div(arg1, arg2, &env->fp_status);
630     int flags = get_float_exception_flags(&env->fp_status);
631 
632     if (unlikely(flags & float_flag_invalid)) {
633         float_invalid_op_div(env, flags, 1, GETPC());
634     }
635     if (unlikely(flags & float_flag_divbyzero)) {
636         float_zero_divide_excp(env, GETPC());
637     }
638 
639     return ret;
640 }
641 
642 /* fdivs - fdivs. */
643 float64 helper_fdivs(CPUPPCState *env, float64 arg1, float64 arg2)
644 {
645     float64 ret = float64r32_div(arg1, arg2, &env->fp_status);
646     int flags = get_float_exception_flags(&env->fp_status);
647 
648     if (unlikely(flags & float_flag_invalid)) {
649         float_invalid_op_div(env, flags, 1, GETPC());
650     }
651     if (unlikely(flags & float_flag_divbyzero)) {
652         float_zero_divide_excp(env, GETPC());
653     }
654 
655     return ret;
656 }
657 
658 static uint64_t float_invalid_cvt(CPUPPCState *env, int flags,
659                                   uint64_t ret, uint64_t ret_nan,
660                                   bool set_fprc, uintptr_t retaddr)
661 {
662     /*
663      * VXCVI is different from most in that it sets two exception bits,
664      * VXCVI and VXSNAN for an SNaN input.
665      */
666     if (flags & float_flag_invalid_snan) {
667         env->fpscr |= FP_VXSNAN;
668     }
669     float_invalid_op_vxcvi(env, set_fprc, retaddr);
670 
671     return flags & float_flag_invalid_cvti ? ret : ret_nan;
672 }
673 
674 #define FPU_FCTI(op, cvt, nanval)                                      \
675 uint64_t helper_##op(CPUPPCState *env, float64 arg)                    \
676 {                                                                      \
677     uint64_t ret = float64_to_##cvt(arg, &env->fp_status);             \
678     int flags = get_float_exception_flags(&env->fp_status);            \
679     if (unlikely(flags & float_flag_invalid)) {                        \
680         ret = float_invalid_cvt(env, flags, ret, nanval, 1, GETPC());  \
681     }                                                                  \
682     return ret;                                                        \
683 }
684 
685 FPU_FCTI(fctiw, int32, 0x80000000U)
686 FPU_FCTI(fctiwz, int32_round_to_zero, 0x80000000U)
687 FPU_FCTI(fctiwu, uint32, 0x00000000U)
688 FPU_FCTI(fctiwuz, uint32_round_to_zero, 0x00000000U)
689 FPU_FCTI(fctid, int64, 0x8000000000000000ULL)
690 FPU_FCTI(fctidz, int64_round_to_zero, 0x8000000000000000ULL)
691 FPU_FCTI(fctidu, uint64, 0x0000000000000000ULL)
692 FPU_FCTI(fctiduz, uint64_round_to_zero, 0x0000000000000000ULL)
693 
694 #define FPU_FCFI(op, cvtr, is_single)                      \
695 uint64_t helper_##op(CPUPPCState *env, uint64_t arg)       \
696 {                                                          \
697     CPU_DoubleU farg;                                      \
698                                                            \
699     if (is_single) {                                       \
700         float32 tmp = cvtr(arg, &env->fp_status);          \
701         farg.d = float32_to_float64(tmp, &env->fp_status); \
702     } else {                                               \
703         farg.d = cvtr(arg, &env->fp_status);               \
704     }                                                      \
705     do_float_check_status(env, true, GETPC());             \
706     return farg.ll;                                        \
707 }
708 
709 FPU_FCFI(fcfid, int64_to_float64, 0)
710 FPU_FCFI(fcfids, int64_to_float32, 1)
711 FPU_FCFI(fcfidu, uint64_to_float64, 0)
712 FPU_FCFI(fcfidus, uint64_to_float32, 1)
713 
714 static uint64_t do_fri(CPUPPCState *env, uint64_t arg,
715                        FloatRoundMode rounding_mode)
716 {
717     FloatRoundMode old_rounding_mode = get_float_rounding_mode(&env->fp_status);
718     int flags;
719 
720     set_float_rounding_mode(rounding_mode, &env->fp_status);
721     arg = float64_round_to_int(arg, &env->fp_status);
722     set_float_rounding_mode(old_rounding_mode, &env->fp_status);
723 
724     flags = get_float_exception_flags(&env->fp_status);
725     if (flags & float_flag_invalid_snan) {
726         float_invalid_op_vxsnan(env, GETPC());
727     }
728 
729     /* fri* does not set FPSCR[XX] */
730     set_float_exception_flags(flags & ~float_flag_inexact, &env->fp_status);
731     do_float_check_status(env, true, GETPC());
732 
733     return arg;
734 }
735 
736 uint64_t helper_frin(CPUPPCState *env, uint64_t arg)
737 {
738     return do_fri(env, arg, float_round_ties_away);
739 }
740 
741 uint64_t helper_friz(CPUPPCState *env, uint64_t arg)
742 {
743     return do_fri(env, arg, float_round_to_zero);
744 }
745 
746 uint64_t helper_frip(CPUPPCState *env, uint64_t arg)
747 {
748     return do_fri(env, arg, float_round_up);
749 }
750 
751 uint64_t helper_frim(CPUPPCState *env, uint64_t arg)
752 {
753     return do_fri(env, arg, float_round_down);
754 }
755 
756 static void float_invalid_op_madd(CPUPPCState *env, int flags,
757                                   bool set_fpcc, uintptr_t retaddr)
758 {
759     if (flags & float_flag_invalid_imz) {
760         float_invalid_op_vximz(env, set_fpcc, retaddr);
761     } else {
762         float_invalid_op_addsub(env, flags, set_fpcc, retaddr);
763     }
764 }
765 
766 static float64 do_fmadd(CPUPPCState *env, float64 a, float64 b,
767                          float64 c, int madd_flags, uintptr_t retaddr)
768 {
769     float64 ret = float64_muladd(a, b, c, madd_flags, &env->fp_status);
770     int flags = get_float_exception_flags(&env->fp_status);
771 
772     if (unlikely(flags & float_flag_invalid)) {
773         float_invalid_op_madd(env, flags, 1, retaddr);
774     }
775     return ret;
776 }
777 
778 static uint64_t do_fmadds(CPUPPCState *env, float64 a, float64 b,
779                           float64 c, int madd_flags, uintptr_t retaddr)
780 {
781     float64 ret = float64r32_muladd(a, b, c, madd_flags, &env->fp_status);
782     int flags = get_float_exception_flags(&env->fp_status);
783 
784     if (unlikely(flags & float_flag_invalid)) {
785         float_invalid_op_madd(env, flags, 1, retaddr);
786     }
787     return ret;
788 }
789 
790 #define FPU_FMADD(op, madd_flags)                                    \
791     uint64_t helper_##op(CPUPPCState *env, uint64_t arg1,            \
792                          uint64_t arg2, uint64_t arg3)               \
793     { return do_fmadd(env, arg1, arg2, arg3, madd_flags, GETPC()); } \
794     uint64_t helper_##op##s(CPUPPCState *env, uint64_t arg1,         \
795                          uint64_t arg2, uint64_t arg3)               \
796     { return do_fmadds(env, arg1, arg2, arg3, madd_flags, GETPC()); }
797 
798 #define MADD_FLGS 0
799 #define MSUB_FLGS float_muladd_negate_c
800 #define NMADD_FLGS float_muladd_negate_result
801 #define NMSUB_FLGS (float_muladd_negate_c | float_muladd_negate_result)
802 
803 FPU_FMADD(fmadd, MADD_FLGS)
804 FPU_FMADD(fnmadd, NMADD_FLGS)
805 FPU_FMADD(fmsub, MSUB_FLGS)
806 FPU_FMADD(fnmsub, NMSUB_FLGS)
807 
808 /* frsp - frsp. */
809 static uint64_t do_frsp(CPUPPCState *env, uint64_t arg, uintptr_t retaddr)
810 {
811     float32 f32 = float64_to_float32(arg, &env->fp_status);
812     int flags = get_float_exception_flags(&env->fp_status);
813 
814     if (unlikely(flags & float_flag_invalid_snan)) {
815         float_invalid_op_vxsnan(env, retaddr);
816     }
817     return helper_todouble(f32);
818 }
819 
820 uint64_t helper_frsp(CPUPPCState *env, uint64_t arg)
821 {
822     return do_frsp(env, arg, GETPC());
823 }
824 
825 static void float_invalid_op_sqrt(CPUPPCState *env, int flags,
826                                   bool set_fpcc, uintptr_t retaddr)
827 {
828     if (unlikely(flags & float_flag_invalid_sqrt)) {
829         float_invalid_op_vxsqrt(env, set_fpcc, retaddr);
830     } else if (unlikely(flags & float_flag_invalid_snan)) {
831         float_invalid_op_vxsnan(env, retaddr);
832     }
833 }
834 
835 /* fsqrt - fsqrt. */
836 float64 helper_fsqrt(CPUPPCState *env, float64 arg)
837 {
838     float64 ret = float64_sqrt(arg, &env->fp_status);
839     int flags = get_float_exception_flags(&env->fp_status);
840 
841     if (unlikely(flags & float_flag_invalid)) {
842         float_invalid_op_sqrt(env, flags, 1, GETPC());
843     }
844 
845     return ret;
846 }
847 
848 /* fsqrts - fsqrts. */
849 float64 helper_fsqrts(CPUPPCState *env, float64 arg)
850 {
851     float64 ret = float64r32_sqrt(arg, &env->fp_status);
852     int flags = get_float_exception_flags(&env->fp_status);
853 
854     if (unlikely(flags & float_flag_invalid)) {
855         float_invalid_op_sqrt(env, flags, 1, GETPC());
856     }
857     return ret;
858 }
859 
860 /* fre - fre. */
861 float64 helper_fre(CPUPPCState *env, float64 arg)
862 {
863     /* "Estimate" the reciprocal with actual division.  */
864     float64 ret = float64_div(float64_one, arg, &env->fp_status);
865     int flags = get_float_exception_flags(&env->fp_status);
866 
867     if (unlikely(flags & float_flag_invalid_snan)) {
868         float_invalid_op_vxsnan(env, GETPC());
869     }
870     if (unlikely(flags & float_flag_divbyzero)) {
871         float_zero_divide_excp(env, GETPC());
872         /* For FPSCR.ZE == 0, the result is 1/2.  */
873         ret = float64_set_sign(float64_half, float64_is_neg(arg));
874     }
875 
876     return ret;
877 }
878 
879 /* fres - fres. */
880 uint64_t helper_fres(CPUPPCState *env, uint64_t arg)
881 {
882     /* "Estimate" the reciprocal with actual division.  */
883     float64 ret = float64r32_div(float64_one, arg, &env->fp_status);
884     int flags = get_float_exception_flags(&env->fp_status);
885 
886     if (unlikely(flags & float_flag_invalid_snan)) {
887         float_invalid_op_vxsnan(env, GETPC());
888     }
889     if (unlikely(flags & float_flag_divbyzero)) {
890         float_zero_divide_excp(env, GETPC());
891         /* For FPSCR.ZE == 0, the result is 1/2.  */
892         ret = float64_set_sign(float64_half, float64_is_neg(arg));
893     }
894 
895     return ret;
896 }
897 
898 /* frsqrte  - frsqrte. */
899 float64 helper_frsqrte(CPUPPCState *env, float64 arg)
900 {
901     /* "Estimate" the reciprocal with actual division.  */
902     float64 rets = float64_sqrt(arg, &env->fp_status);
903     float64 retd = float64_div(float64_one, rets, &env->fp_status);
904     int flags = get_float_exception_flags(&env->fp_status);
905 
906     if (unlikely(flags & float_flag_invalid)) {
907         float_invalid_op_sqrt(env, flags, 1, GETPC());
908     }
909     if (unlikely(flags & float_flag_divbyzero)) {
910         /* Reciprocal of (square root of) zero.  */
911         float_zero_divide_excp(env, GETPC());
912     }
913 
914     return retd;
915 }
916 
917 /* frsqrtes  - frsqrtes. */
918 float64 helper_frsqrtes(CPUPPCState *env, float64 arg)
919 {
920     /* "Estimate" the reciprocal with actual division.  */
921     float64 rets = float64_sqrt(arg, &env->fp_status);
922     float64 retd = float64r32_div(float64_one, rets, &env->fp_status);
923     int flags = get_float_exception_flags(&env->fp_status);
924 
925     if (unlikely(flags & float_flag_invalid)) {
926         float_invalid_op_sqrt(env, flags, 1, GETPC());
927     }
928     if (unlikely(flags & float_flag_divbyzero)) {
929         /* Reciprocal of (square root of) zero.  */
930         float_zero_divide_excp(env, GETPC());
931     }
932 
933     return retd;
934 }
935 
936 /* fsel - fsel. */
937 uint64_t helper_FSEL(uint64_t a, uint64_t b, uint64_t c)
938 {
939     CPU_DoubleU fa;
940 
941     fa.ll = a;
942 
943     if ((!float64_is_neg(fa.d) || float64_is_zero(fa.d)) &&
944         !float64_is_any_nan(fa.d)) {
945         return c;
946     } else {
947         return b;
948     }
949 }
950 
951 uint32_t helper_ftdiv(uint64_t fra, uint64_t frb)
952 {
953     int fe_flag = 0;
954     int fg_flag = 0;
955 
956     if (unlikely(float64_is_infinity(fra) ||
957                  float64_is_infinity(frb) ||
958                  float64_is_zero(frb))) {
959         fe_flag = 1;
960         fg_flag = 1;
961     } else {
962         int e_a = ppc_float64_get_unbiased_exp(fra);
963         int e_b = ppc_float64_get_unbiased_exp(frb);
964 
965         if (unlikely(float64_is_any_nan(fra) ||
966                      float64_is_any_nan(frb))) {
967             fe_flag = 1;
968         } else if ((e_b <= -1022) || (e_b >= 1021)) {
969             fe_flag = 1;
970         } else if (!float64_is_zero(fra) &&
971                    (((e_a - e_b) >= 1023) ||
972                     ((e_a - e_b) <= -1021) ||
973                     (e_a <= -970))) {
974             fe_flag = 1;
975         }
976 
977         if (unlikely(float64_is_zero_or_denormal(frb))) {
978             /* XB is not zero because of the above check and */
979             /* so must be denormalized.                      */
980             fg_flag = 1;
981         }
982     }
983 
984     return 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0);
985 }
986 
987 uint32_t helper_ftsqrt(uint64_t frb)
988 {
989     int fe_flag = 0;
990     int fg_flag = 0;
991 
992     if (unlikely(float64_is_infinity(frb) || float64_is_zero(frb))) {
993         fe_flag = 1;
994         fg_flag = 1;
995     } else {
996         int e_b = ppc_float64_get_unbiased_exp(frb);
997 
998         if (unlikely(float64_is_any_nan(frb))) {
999             fe_flag = 1;
1000         } else if (unlikely(float64_is_zero(frb))) {
1001             fe_flag = 1;
1002         } else if (unlikely(float64_is_neg(frb))) {
1003             fe_flag = 1;
1004         } else if (!float64_is_zero(frb) && (e_b <= (-1022 + 52))) {
1005             fe_flag = 1;
1006         }
1007 
1008         if (unlikely(float64_is_zero_or_denormal(frb))) {
1009             /* XB is not zero because of the above check and */
1010             /* therefore must be denormalized.               */
1011             fg_flag = 1;
1012         }
1013     }
1014 
1015     return 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0);
1016 }
1017 
1018 void helper_fcmpu(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
1019                   uint32_t crfD)
1020 {
1021     CPU_DoubleU farg1, farg2;
1022     uint32_t ret = 0;
1023 
1024     farg1.ll = arg1;
1025     farg2.ll = arg2;
1026 
1027     if (unlikely(float64_is_any_nan(farg1.d) ||
1028                  float64_is_any_nan(farg2.d))) {
1029         ret = 0x01UL;
1030     } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1031         ret = 0x08UL;
1032     } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1033         ret = 0x04UL;
1034     } else {
1035         ret = 0x02UL;
1036     }
1037 
1038     env->fpscr &= ~FP_FPCC;
1039     env->fpscr |= ret << FPSCR_FPCC;
1040     env->crf[crfD] = ret;
1041     if (unlikely(ret == 0x01UL
1042                  && (float64_is_signaling_nan(farg1.d, &env->fp_status) ||
1043                      float64_is_signaling_nan(farg2.d, &env->fp_status)))) {
1044         /* sNaN comparison */
1045         float_invalid_op_vxsnan(env, GETPC());
1046     }
1047 }
1048 
1049 void helper_fcmpo(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
1050                   uint32_t crfD)
1051 {
1052     CPU_DoubleU farg1, farg2;
1053     uint32_t ret = 0;
1054 
1055     farg1.ll = arg1;
1056     farg2.ll = arg2;
1057 
1058     if (unlikely(float64_is_any_nan(farg1.d) ||
1059                  float64_is_any_nan(farg2.d))) {
1060         ret = 0x01UL;
1061     } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1062         ret = 0x08UL;
1063     } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1064         ret = 0x04UL;
1065     } else {
1066         ret = 0x02UL;
1067     }
1068 
1069     env->fpscr &= ~FP_FPCC;
1070     env->fpscr |= ret << FPSCR_FPCC;
1071     env->crf[crfD] = (uint32_t) ret;
1072     if (unlikely(ret == 0x01UL)) {
1073         float_invalid_op_vxvc(env, 1, GETPC());
1074         if (float64_is_signaling_nan(farg1.d, &env->fp_status) ||
1075             float64_is_signaling_nan(farg2.d, &env->fp_status)) {
1076             /* sNaN comparison */
1077             float_invalid_op_vxsnan(env, GETPC());
1078         }
1079     }
1080 }
1081 
1082 /* Single-precision floating-point conversions */
1083 static inline uint32_t efscfsi(CPUPPCState *env, uint32_t val)
1084 {
1085     CPU_FloatU u;
1086 
1087     u.f = int32_to_float32(val, &env->vec_status);
1088 
1089     return u.l;
1090 }
1091 
1092 static inline uint32_t efscfui(CPUPPCState *env, uint32_t val)
1093 {
1094     CPU_FloatU u;
1095 
1096     u.f = uint32_to_float32(val, &env->vec_status);
1097 
1098     return u.l;
1099 }
1100 
1101 static inline int32_t efsctsi(CPUPPCState *env, uint32_t val)
1102 {
1103     CPU_FloatU u;
1104 
1105     u.l = val;
1106     /* NaN are not treated the same way IEEE 754 does */
1107     if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1108         return 0;
1109     }
1110 
1111     return float32_to_int32(u.f, &env->vec_status);
1112 }
1113 
1114 static inline uint32_t efsctui(CPUPPCState *env, uint32_t val)
1115 {
1116     CPU_FloatU u;
1117 
1118     u.l = val;
1119     /* NaN are not treated the same way IEEE 754 does */
1120     if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1121         return 0;
1122     }
1123 
1124     return float32_to_uint32(u.f, &env->vec_status);
1125 }
1126 
1127 static inline uint32_t efsctsiz(CPUPPCState *env, uint32_t val)
1128 {
1129     CPU_FloatU u;
1130 
1131     u.l = val;
1132     /* NaN are not treated the same way IEEE 754 does */
1133     if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1134         return 0;
1135     }
1136 
1137     return float32_to_int32_round_to_zero(u.f, &env->vec_status);
1138 }
1139 
1140 static inline uint32_t efsctuiz(CPUPPCState *env, uint32_t val)
1141 {
1142     CPU_FloatU u;
1143 
1144     u.l = val;
1145     /* NaN are not treated the same way IEEE 754 does */
1146     if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1147         return 0;
1148     }
1149 
1150     return float32_to_uint32_round_to_zero(u.f, &env->vec_status);
1151 }
1152 
1153 static inline uint32_t efscfsf(CPUPPCState *env, uint32_t val)
1154 {
1155     CPU_FloatU u;
1156     float32 tmp;
1157 
1158     u.f = int32_to_float32(val, &env->vec_status);
1159     tmp = int64_to_float32(1ULL << 32, &env->vec_status);
1160     u.f = float32_div(u.f, tmp, &env->vec_status);
1161 
1162     return u.l;
1163 }
1164 
1165 static inline uint32_t efscfuf(CPUPPCState *env, uint32_t val)
1166 {
1167     CPU_FloatU u;
1168     float32 tmp;
1169 
1170     u.f = uint32_to_float32(val, &env->vec_status);
1171     tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1172     u.f = float32_div(u.f, tmp, &env->vec_status);
1173 
1174     return u.l;
1175 }
1176 
1177 static inline uint32_t efsctsf(CPUPPCState *env, uint32_t val)
1178 {
1179     CPU_FloatU u;
1180     float32 tmp;
1181 
1182     u.l = val;
1183     /* NaN are not treated the same way IEEE 754 does */
1184     if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1185         return 0;
1186     }
1187     tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1188     u.f = float32_mul(u.f, tmp, &env->vec_status);
1189 
1190     return float32_to_int32(u.f, &env->vec_status);
1191 }
1192 
1193 static inline uint32_t efsctuf(CPUPPCState *env, uint32_t val)
1194 {
1195     CPU_FloatU u;
1196     float32 tmp;
1197 
1198     u.l = val;
1199     /* NaN are not treated the same way IEEE 754 does */
1200     if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1201         return 0;
1202     }
1203     tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1204     u.f = float32_mul(u.f, tmp, &env->vec_status);
1205 
1206     return float32_to_uint32(u.f, &env->vec_status);
1207 }
1208 
1209 #define HELPER_SPE_SINGLE_CONV(name)                              \
1210     uint32_t helper_e##name(CPUPPCState *env, uint32_t val)       \
1211     {                                                             \
1212         return e##name(env, val);                                 \
1213     }
1214 /* efscfsi */
1215 HELPER_SPE_SINGLE_CONV(fscfsi);
1216 /* efscfui */
1217 HELPER_SPE_SINGLE_CONV(fscfui);
1218 /* efscfuf */
1219 HELPER_SPE_SINGLE_CONV(fscfuf);
1220 /* efscfsf */
1221 HELPER_SPE_SINGLE_CONV(fscfsf);
1222 /* efsctsi */
1223 HELPER_SPE_SINGLE_CONV(fsctsi);
1224 /* efsctui */
1225 HELPER_SPE_SINGLE_CONV(fsctui);
1226 /* efsctsiz */
1227 HELPER_SPE_SINGLE_CONV(fsctsiz);
1228 /* efsctuiz */
1229 HELPER_SPE_SINGLE_CONV(fsctuiz);
1230 /* efsctsf */
1231 HELPER_SPE_SINGLE_CONV(fsctsf);
1232 /* efsctuf */
1233 HELPER_SPE_SINGLE_CONV(fsctuf);
1234 
1235 #define HELPER_SPE_VECTOR_CONV(name)                            \
1236     uint64_t helper_ev##name(CPUPPCState *env, uint64_t val)    \
1237     {                                                           \
1238         return ((uint64_t)e##name(env, val >> 32) << 32) |      \
1239             (uint64_t)e##name(env, val);                        \
1240     }
1241 /* evfscfsi */
1242 HELPER_SPE_VECTOR_CONV(fscfsi);
1243 /* evfscfui */
1244 HELPER_SPE_VECTOR_CONV(fscfui);
1245 /* evfscfuf */
1246 HELPER_SPE_VECTOR_CONV(fscfuf);
1247 /* evfscfsf */
1248 HELPER_SPE_VECTOR_CONV(fscfsf);
1249 /* evfsctsi */
1250 HELPER_SPE_VECTOR_CONV(fsctsi);
1251 /* evfsctui */
1252 HELPER_SPE_VECTOR_CONV(fsctui);
1253 /* evfsctsiz */
1254 HELPER_SPE_VECTOR_CONV(fsctsiz);
1255 /* evfsctuiz */
1256 HELPER_SPE_VECTOR_CONV(fsctuiz);
1257 /* evfsctsf */
1258 HELPER_SPE_VECTOR_CONV(fsctsf);
1259 /* evfsctuf */
1260 HELPER_SPE_VECTOR_CONV(fsctuf);
1261 
1262 /* Single-precision floating-point arithmetic */
1263 static inline uint32_t efsadd(CPUPPCState *env, uint32_t op1, uint32_t op2)
1264 {
1265     CPU_FloatU u1, u2;
1266 
1267     u1.l = op1;
1268     u2.l = op2;
1269     u1.f = float32_add(u1.f, u2.f, &env->vec_status);
1270     return u1.l;
1271 }
1272 
1273 static inline uint32_t efssub(CPUPPCState *env, uint32_t op1, uint32_t op2)
1274 {
1275     CPU_FloatU u1, u2;
1276 
1277     u1.l = op1;
1278     u2.l = op2;
1279     u1.f = float32_sub(u1.f, u2.f, &env->vec_status);
1280     return u1.l;
1281 }
1282 
1283 static inline uint32_t efsmul(CPUPPCState *env, uint32_t op1, uint32_t op2)
1284 {
1285     CPU_FloatU u1, u2;
1286 
1287     u1.l = op1;
1288     u2.l = op2;
1289     u1.f = float32_mul(u1.f, u2.f, &env->vec_status);
1290     return u1.l;
1291 }
1292 
1293 static inline uint32_t efsdiv(CPUPPCState *env, uint32_t op1, uint32_t op2)
1294 {
1295     CPU_FloatU u1, u2;
1296 
1297     u1.l = op1;
1298     u2.l = op2;
1299     u1.f = float32_div(u1.f, u2.f, &env->vec_status);
1300     return u1.l;
1301 }
1302 
1303 #define HELPER_SPE_SINGLE_ARITH(name)                                   \
1304     uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1305     {                                                                   \
1306         return e##name(env, op1, op2);                                  \
1307     }
1308 /* efsadd */
1309 HELPER_SPE_SINGLE_ARITH(fsadd);
1310 /* efssub */
1311 HELPER_SPE_SINGLE_ARITH(fssub);
1312 /* efsmul */
1313 HELPER_SPE_SINGLE_ARITH(fsmul);
1314 /* efsdiv */
1315 HELPER_SPE_SINGLE_ARITH(fsdiv);
1316 
1317 #define HELPER_SPE_VECTOR_ARITH(name)                                   \
1318     uint64_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1319     {                                                                   \
1320         return ((uint64_t)e##name(env, op1 >> 32, op2 >> 32) << 32) |   \
1321             (uint64_t)e##name(env, op1, op2);                           \
1322     }
1323 /* evfsadd */
1324 HELPER_SPE_VECTOR_ARITH(fsadd);
1325 /* evfssub */
1326 HELPER_SPE_VECTOR_ARITH(fssub);
1327 /* evfsmul */
1328 HELPER_SPE_VECTOR_ARITH(fsmul);
1329 /* evfsdiv */
1330 HELPER_SPE_VECTOR_ARITH(fsdiv);
1331 
1332 /* Single-precision floating-point comparisons */
1333 static inline uint32_t efscmplt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1334 {
1335     CPU_FloatU u1, u2;
1336 
1337     u1.l = op1;
1338     u2.l = op2;
1339     return float32_lt(u1.f, u2.f, &env->vec_status) ? 4 : 0;
1340 }
1341 
1342 static inline uint32_t efscmpgt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1343 {
1344     CPU_FloatU u1, u2;
1345 
1346     u1.l = op1;
1347     u2.l = op2;
1348     return float32_le(u1.f, u2.f, &env->vec_status) ? 0 : 4;
1349 }
1350 
1351 static inline uint32_t efscmpeq(CPUPPCState *env, uint32_t op1, uint32_t op2)
1352 {
1353     CPU_FloatU u1, u2;
1354 
1355     u1.l = op1;
1356     u2.l = op2;
1357     return float32_eq(u1.f, u2.f, &env->vec_status) ? 4 : 0;
1358 }
1359 
1360 static inline uint32_t efststlt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1361 {
1362     /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1363     return efscmplt(env, op1, op2);
1364 }
1365 
1366 static inline uint32_t efststgt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1367 {
1368     /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1369     return efscmpgt(env, op1, op2);
1370 }
1371 
1372 static inline uint32_t efststeq(CPUPPCState *env, uint32_t op1, uint32_t op2)
1373 {
1374     /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1375     return efscmpeq(env, op1, op2);
1376 }
1377 
1378 #define HELPER_SINGLE_SPE_CMP(name)                                     \
1379     uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1380     {                                                                   \
1381         return e##name(env, op1, op2);                                  \
1382     }
1383 /* efststlt */
1384 HELPER_SINGLE_SPE_CMP(fststlt);
1385 /* efststgt */
1386 HELPER_SINGLE_SPE_CMP(fststgt);
1387 /* efststeq */
1388 HELPER_SINGLE_SPE_CMP(fststeq);
1389 /* efscmplt */
1390 HELPER_SINGLE_SPE_CMP(fscmplt);
1391 /* efscmpgt */
1392 HELPER_SINGLE_SPE_CMP(fscmpgt);
1393 /* efscmpeq */
1394 HELPER_SINGLE_SPE_CMP(fscmpeq);
1395 
1396 static inline uint32_t evcmp_merge(int t0, int t1)
1397 {
1398     return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
1399 }
1400 
1401 #define HELPER_VECTOR_SPE_CMP(name)                                     \
1402     uint32_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1403     {                                                                   \
1404         return evcmp_merge(e##name(env, op1 >> 32, op2 >> 32),          \
1405                            e##name(env, op1, op2));                     \
1406     }
1407 /* evfststlt */
1408 HELPER_VECTOR_SPE_CMP(fststlt);
1409 /* evfststgt */
1410 HELPER_VECTOR_SPE_CMP(fststgt);
1411 /* evfststeq */
1412 HELPER_VECTOR_SPE_CMP(fststeq);
1413 /* evfscmplt */
1414 HELPER_VECTOR_SPE_CMP(fscmplt);
1415 /* evfscmpgt */
1416 HELPER_VECTOR_SPE_CMP(fscmpgt);
1417 /* evfscmpeq */
1418 HELPER_VECTOR_SPE_CMP(fscmpeq);
1419 
1420 /* Double-precision floating-point conversion */
1421 uint64_t helper_efdcfsi(CPUPPCState *env, uint32_t val)
1422 {
1423     CPU_DoubleU u;
1424 
1425     u.d = int32_to_float64(val, &env->vec_status);
1426 
1427     return u.ll;
1428 }
1429 
1430 uint64_t helper_efdcfsid(CPUPPCState *env, uint64_t val)
1431 {
1432     CPU_DoubleU u;
1433 
1434     u.d = int64_to_float64(val, &env->vec_status);
1435 
1436     return u.ll;
1437 }
1438 
1439 uint64_t helper_efdcfui(CPUPPCState *env, uint32_t val)
1440 {
1441     CPU_DoubleU u;
1442 
1443     u.d = uint32_to_float64(val, &env->vec_status);
1444 
1445     return u.ll;
1446 }
1447 
1448 uint64_t helper_efdcfuid(CPUPPCState *env, uint64_t val)
1449 {
1450     CPU_DoubleU u;
1451 
1452     u.d = uint64_to_float64(val, &env->vec_status);
1453 
1454     return u.ll;
1455 }
1456 
1457 uint32_t helper_efdctsi(CPUPPCState *env, uint64_t val)
1458 {
1459     CPU_DoubleU u;
1460 
1461     u.ll = val;
1462     /* NaN are not treated the same way IEEE 754 does */
1463     if (unlikely(float64_is_any_nan(u.d))) {
1464         return 0;
1465     }
1466 
1467     return float64_to_int32(u.d, &env->vec_status);
1468 }
1469 
1470 uint32_t helper_efdctui(CPUPPCState *env, uint64_t val)
1471 {
1472     CPU_DoubleU u;
1473 
1474     u.ll = val;
1475     /* NaN are not treated the same way IEEE 754 does */
1476     if (unlikely(float64_is_any_nan(u.d))) {
1477         return 0;
1478     }
1479 
1480     return float64_to_uint32(u.d, &env->vec_status);
1481 }
1482 
1483 uint32_t helper_efdctsiz(CPUPPCState *env, uint64_t val)
1484 {
1485     CPU_DoubleU u;
1486 
1487     u.ll = val;
1488     /* NaN are not treated the same way IEEE 754 does */
1489     if (unlikely(float64_is_any_nan(u.d))) {
1490         return 0;
1491     }
1492 
1493     return float64_to_int32_round_to_zero(u.d, &env->vec_status);
1494 }
1495 
1496 uint64_t helper_efdctsidz(CPUPPCState *env, uint64_t val)
1497 {
1498     CPU_DoubleU u;
1499 
1500     u.ll = val;
1501     /* NaN are not treated the same way IEEE 754 does */
1502     if (unlikely(float64_is_any_nan(u.d))) {
1503         return 0;
1504     }
1505 
1506     return float64_to_int64_round_to_zero(u.d, &env->vec_status);
1507 }
1508 
1509 uint32_t helper_efdctuiz(CPUPPCState *env, uint64_t val)
1510 {
1511     CPU_DoubleU u;
1512 
1513     u.ll = val;
1514     /* NaN are not treated the same way IEEE 754 does */
1515     if (unlikely(float64_is_any_nan(u.d))) {
1516         return 0;
1517     }
1518 
1519     return float64_to_uint32_round_to_zero(u.d, &env->vec_status);
1520 }
1521 
1522 uint64_t helper_efdctuidz(CPUPPCState *env, uint64_t val)
1523 {
1524     CPU_DoubleU u;
1525 
1526     u.ll = val;
1527     /* NaN are not treated the same way IEEE 754 does */
1528     if (unlikely(float64_is_any_nan(u.d))) {
1529         return 0;
1530     }
1531 
1532     return float64_to_uint64_round_to_zero(u.d, &env->vec_status);
1533 }
1534 
1535 uint64_t helper_efdcfsf(CPUPPCState *env, uint32_t val)
1536 {
1537     CPU_DoubleU u;
1538     float64 tmp;
1539 
1540     u.d = int32_to_float64(val, &env->vec_status);
1541     tmp = int64_to_float64(1ULL << 32, &env->vec_status);
1542     u.d = float64_div(u.d, tmp, &env->vec_status);
1543 
1544     return u.ll;
1545 }
1546 
1547 uint64_t helper_efdcfuf(CPUPPCState *env, uint32_t val)
1548 {
1549     CPU_DoubleU u;
1550     float64 tmp;
1551 
1552     u.d = uint32_to_float64(val, &env->vec_status);
1553     tmp = int64_to_float64(1ULL << 32, &env->vec_status);
1554     u.d = float64_div(u.d, tmp, &env->vec_status);
1555 
1556     return u.ll;
1557 }
1558 
1559 uint32_t helper_efdctsf(CPUPPCState *env, uint64_t val)
1560 {
1561     CPU_DoubleU u;
1562     float64 tmp;
1563 
1564     u.ll = val;
1565     /* NaN are not treated the same way IEEE 754 does */
1566     if (unlikely(float64_is_any_nan(u.d))) {
1567         return 0;
1568     }
1569     tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
1570     u.d = float64_mul(u.d, tmp, &env->vec_status);
1571 
1572     return float64_to_int32(u.d, &env->vec_status);
1573 }
1574 
1575 uint32_t helper_efdctuf(CPUPPCState *env, uint64_t val)
1576 {
1577     CPU_DoubleU u;
1578     float64 tmp;
1579 
1580     u.ll = val;
1581     /* NaN are not treated the same way IEEE 754 does */
1582     if (unlikely(float64_is_any_nan(u.d))) {
1583         return 0;
1584     }
1585     tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
1586     u.d = float64_mul(u.d, tmp, &env->vec_status);
1587 
1588     return float64_to_uint32(u.d, &env->vec_status);
1589 }
1590 
1591 uint32_t helper_efscfd(CPUPPCState *env, uint64_t val)
1592 {
1593     CPU_DoubleU u1;
1594     CPU_FloatU u2;
1595 
1596     u1.ll = val;
1597     u2.f = float64_to_float32(u1.d, &env->vec_status);
1598 
1599     return u2.l;
1600 }
1601 
1602 uint64_t helper_efdcfs(CPUPPCState *env, uint32_t val)
1603 {
1604     CPU_DoubleU u2;
1605     CPU_FloatU u1;
1606 
1607     u1.l = val;
1608     u2.d = float32_to_float64(u1.f, &env->vec_status);
1609 
1610     return u2.ll;
1611 }
1612 
1613 /* Double precision fixed-point arithmetic */
1614 uint64_t helper_efdadd(CPUPPCState *env, uint64_t op1, uint64_t op2)
1615 {
1616     CPU_DoubleU u1, u2;
1617 
1618     u1.ll = op1;
1619     u2.ll = op2;
1620     u1.d = float64_add(u1.d, u2.d, &env->vec_status);
1621     return u1.ll;
1622 }
1623 
1624 uint64_t helper_efdsub(CPUPPCState *env, uint64_t op1, uint64_t op2)
1625 {
1626     CPU_DoubleU u1, u2;
1627 
1628     u1.ll = op1;
1629     u2.ll = op2;
1630     u1.d = float64_sub(u1.d, u2.d, &env->vec_status);
1631     return u1.ll;
1632 }
1633 
1634 uint64_t helper_efdmul(CPUPPCState *env, uint64_t op1, uint64_t op2)
1635 {
1636     CPU_DoubleU u1, u2;
1637 
1638     u1.ll = op1;
1639     u2.ll = op2;
1640     u1.d = float64_mul(u1.d, u2.d, &env->vec_status);
1641     return u1.ll;
1642 }
1643 
1644 uint64_t helper_efddiv(CPUPPCState *env, uint64_t op1, uint64_t op2)
1645 {
1646     CPU_DoubleU u1, u2;
1647 
1648     u1.ll = op1;
1649     u2.ll = op2;
1650     u1.d = float64_div(u1.d, u2.d, &env->vec_status);
1651     return u1.ll;
1652 }
1653 
1654 /* Double precision floating point helpers */
1655 uint32_t helper_efdtstlt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1656 {
1657     CPU_DoubleU u1, u2;
1658 
1659     u1.ll = op1;
1660     u2.ll = op2;
1661     return float64_lt(u1.d, u2.d, &env->vec_status) ? 4 : 0;
1662 }
1663 
1664 uint32_t helper_efdtstgt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1665 {
1666     CPU_DoubleU u1, u2;
1667 
1668     u1.ll = op1;
1669     u2.ll = op2;
1670     return float64_le(u1.d, u2.d, &env->vec_status) ? 0 : 4;
1671 }
1672 
1673 uint32_t helper_efdtsteq(CPUPPCState *env, uint64_t op1, uint64_t op2)
1674 {
1675     CPU_DoubleU u1, u2;
1676 
1677     u1.ll = op1;
1678     u2.ll = op2;
1679     return float64_eq_quiet(u1.d, u2.d, &env->vec_status) ? 4 : 0;
1680 }
1681 
1682 uint32_t helper_efdcmplt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1683 {
1684     /* XXX: TODO: test special values (NaN, infinites, ...) */
1685     return helper_efdtstlt(env, op1, op2);
1686 }
1687 
1688 uint32_t helper_efdcmpgt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1689 {
1690     /* XXX: TODO: test special values (NaN, infinites, ...) */
1691     return helper_efdtstgt(env, op1, op2);
1692 }
1693 
1694 uint32_t helper_efdcmpeq(CPUPPCState *env, uint64_t op1, uint64_t op2)
1695 {
1696     /* XXX: TODO: test special values (NaN, infinites, ...) */
1697     return helper_efdtsteq(env, op1, op2);
1698 }
1699 
1700 #define float64_to_float64(x, env) x
1701 
1702 
1703 /*
1704  * VSX_ADD_SUB - VSX floating point add/subtract
1705  *   name  - instruction mnemonic
1706  *   op    - operation (add or sub)
1707  *   nels  - number of elements (1, 2 or 4)
1708  *   tp    - type (float32 or float64)
1709  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
1710  *   sfifprf - set FI and FPRF
1711  */
1712 #define VSX_ADD_SUB(name, op, nels, tp, fld, sfifprf, r2sp)                  \
1713 void helper_##name(CPUPPCState *env, ppc_vsr_t *xt,                          \
1714                    ppc_vsr_t *xa, ppc_vsr_t *xb)                             \
1715 {                                                                            \
1716     ppc_vsr_t t = { };                                                       \
1717     int i;                                                                   \
1718                                                                              \
1719     helper_reset_fpstatus(env);                                              \
1720                                                                              \
1721     for (i = 0; i < nels; i++) {                                             \
1722         float_status tstat = env->fp_status;                                 \
1723         set_float_exception_flags(0, &tstat);                                \
1724         t.fld = tp##_##op(xa->fld, xb->fld, &tstat);                         \
1725         env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1726                                                                              \
1727         if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
1728             float_invalid_op_addsub(env, tstat.float_exception_flags,        \
1729                                     sfifprf, GETPC());                       \
1730         }                                                                    \
1731                                                                              \
1732         if (r2sp) {                                                          \
1733             t.fld = do_frsp(env, t.fld, GETPC());                            \
1734         }                                                                    \
1735                                                                              \
1736         if (sfifprf) {                                                       \
1737             helper_compute_fprf_float64(env, t.fld);                         \
1738         }                                                                    \
1739     }                                                                        \
1740     *xt = t;                                                                 \
1741     do_float_check_status(env, sfifprf, GETPC());                            \
1742 }
1743 
1744 VSX_ADD_SUB(xsadddp, add, 1, float64, VsrD(0), 1, 0)
1745 VSX_ADD_SUB(xsaddsp, add, 1, float64, VsrD(0), 1, 1)
1746 VSX_ADD_SUB(xvadddp, add, 2, float64, VsrD(i), 0, 0)
1747 VSX_ADD_SUB(xvaddsp, add, 4, float32, VsrW(i), 0, 0)
1748 VSX_ADD_SUB(xssubdp, sub, 1, float64, VsrD(0), 1, 0)
1749 VSX_ADD_SUB(xssubsp, sub, 1, float64, VsrD(0), 1, 1)
1750 VSX_ADD_SUB(xvsubdp, sub, 2, float64, VsrD(i), 0, 0)
1751 VSX_ADD_SUB(xvsubsp, sub, 4, float32, VsrW(i), 0, 0)
1752 
1753 void helper_xsaddqp(CPUPPCState *env, uint32_t opcode,
1754                     ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb)
1755 {
1756     ppc_vsr_t t = *xt;
1757     float_status tstat;
1758 
1759     helper_reset_fpstatus(env);
1760 
1761     tstat = env->fp_status;
1762     if (unlikely(Rc(opcode) != 0)) {
1763         tstat.float_rounding_mode = float_round_to_odd;
1764     }
1765 
1766     set_float_exception_flags(0, &tstat);
1767     t.f128 = float128_add(xa->f128, xb->f128, &tstat);
1768     env->fp_status.float_exception_flags |= tstat.float_exception_flags;
1769 
1770     if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
1771         float_invalid_op_addsub(env, tstat.float_exception_flags, 1, GETPC());
1772     }
1773 
1774     helper_compute_fprf_float128(env, t.f128);
1775 
1776     *xt = t;
1777     do_float_check_status(env, true, GETPC());
1778 }
1779 
1780 /*
1781  * VSX_MUL - VSX floating point multiply
1782  *   op    - instruction mnemonic
1783  *   nels  - number of elements (1, 2 or 4)
1784  *   tp    - type (float32 or float64)
1785  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
1786  *   sfifprf - set FI and FPRF
1787  */
1788 #define VSX_MUL(op, nels, tp, fld, sfifprf, r2sp)                            \
1789 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt,                            \
1790                  ppc_vsr_t *xa, ppc_vsr_t *xb)                               \
1791 {                                                                            \
1792     ppc_vsr_t t = { };                                                       \
1793     int i;                                                                   \
1794                                                                              \
1795     helper_reset_fpstatus(env);                                              \
1796                                                                              \
1797     for (i = 0; i < nels; i++) {                                             \
1798         float_status tstat = env->fp_status;                                 \
1799         set_float_exception_flags(0, &tstat);                                \
1800         t.fld = tp##_mul(xa->fld, xb->fld, &tstat);                          \
1801         env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1802                                                                              \
1803         if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
1804             float_invalid_op_mul(env, tstat.float_exception_flags,           \
1805                                  sfifprf, GETPC());                          \
1806         }                                                                    \
1807                                                                              \
1808         if (r2sp) {                                                          \
1809             t.fld = do_frsp(env, t.fld, GETPC());                            \
1810         }                                                                    \
1811                                                                              \
1812         if (sfifprf) {                                                       \
1813             helper_compute_fprf_float64(env, t.fld);                         \
1814         }                                                                    \
1815     }                                                                        \
1816                                                                              \
1817     *xt = t;                                                                 \
1818     do_float_check_status(env, sfifprf, GETPC());                            \
1819 }
1820 
1821 VSX_MUL(xsmuldp, 1, float64, VsrD(0), 1, 0)
1822 VSX_MUL(xsmulsp, 1, float64, VsrD(0), 1, 1)
1823 VSX_MUL(xvmuldp, 2, float64, VsrD(i), 0, 0)
1824 VSX_MUL(xvmulsp, 4, float32, VsrW(i), 0, 0)
1825 
1826 void helper_xsmulqp(CPUPPCState *env, uint32_t opcode,
1827                     ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb)
1828 {
1829     ppc_vsr_t t = *xt;
1830     float_status tstat;
1831 
1832     helper_reset_fpstatus(env);
1833     tstat = env->fp_status;
1834     if (unlikely(Rc(opcode) != 0)) {
1835         tstat.float_rounding_mode = float_round_to_odd;
1836     }
1837 
1838     set_float_exception_flags(0, &tstat);
1839     t.f128 = float128_mul(xa->f128, xb->f128, &tstat);
1840     env->fp_status.float_exception_flags |= tstat.float_exception_flags;
1841 
1842     if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
1843         float_invalid_op_mul(env, tstat.float_exception_flags, 1, GETPC());
1844     }
1845     helper_compute_fprf_float128(env, t.f128);
1846 
1847     *xt = t;
1848     do_float_check_status(env, true, GETPC());
1849 }
1850 
1851 /*
1852  * VSX_DIV - VSX floating point divide
1853  *   op    - instruction mnemonic
1854  *   nels  - number of elements (1, 2 or 4)
1855  *   tp    - type (float32 or float64)
1856  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
1857  *   sfifprf - set FI and FPRF
1858  */
1859 #define VSX_DIV(op, nels, tp, fld, sfifprf, r2sp)                             \
1860 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt,                             \
1861                  ppc_vsr_t *xa, ppc_vsr_t *xb)                                \
1862 {                                                                             \
1863     ppc_vsr_t t = { };                                                        \
1864     int i;                                                                    \
1865                                                                               \
1866     helper_reset_fpstatus(env);                                               \
1867                                                                               \
1868     for (i = 0; i < nels; i++) {                                              \
1869         float_status tstat = env->fp_status;                                  \
1870         set_float_exception_flags(0, &tstat);                                 \
1871         t.fld = tp##_div(xa->fld, xb->fld, &tstat);                           \
1872         env->fp_status.float_exception_flags |= tstat.float_exception_flags;  \
1873                                                                               \
1874         if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {     \
1875             float_invalid_op_div(env, tstat.float_exception_flags,            \
1876                                  sfifprf, GETPC());                           \
1877         }                                                                     \
1878         if (unlikely(tstat.float_exception_flags & float_flag_divbyzero)) {   \
1879             float_zero_divide_excp(env, GETPC());                             \
1880         }                                                                     \
1881                                                                               \
1882         if (r2sp) {                                                           \
1883             t.fld = do_frsp(env, t.fld, GETPC());                             \
1884         }                                                                     \
1885                                                                               \
1886         if (sfifprf) {                                                        \
1887             helper_compute_fprf_float64(env, t.fld);                          \
1888         }                                                                     \
1889     }                                                                         \
1890                                                                               \
1891     *xt = t;                                                                  \
1892     do_float_check_status(env, sfifprf, GETPC());                             \
1893 }
1894 
1895 VSX_DIV(xsdivdp, 1, float64, VsrD(0), 1, 0)
1896 VSX_DIV(xsdivsp, 1, float64, VsrD(0), 1, 1)
1897 VSX_DIV(xvdivdp, 2, float64, VsrD(i), 0, 0)
1898 VSX_DIV(xvdivsp, 4, float32, VsrW(i), 0, 0)
1899 
1900 void helper_xsdivqp(CPUPPCState *env, uint32_t opcode,
1901                     ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb)
1902 {
1903     ppc_vsr_t t = *xt;
1904     float_status tstat;
1905 
1906     helper_reset_fpstatus(env);
1907     tstat = env->fp_status;
1908     if (unlikely(Rc(opcode) != 0)) {
1909         tstat.float_rounding_mode = float_round_to_odd;
1910     }
1911 
1912     set_float_exception_flags(0, &tstat);
1913     t.f128 = float128_div(xa->f128, xb->f128, &tstat);
1914     env->fp_status.float_exception_flags |= tstat.float_exception_flags;
1915 
1916     if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
1917         float_invalid_op_div(env, tstat.float_exception_flags, 1, GETPC());
1918     }
1919     if (unlikely(tstat.float_exception_flags & float_flag_divbyzero)) {
1920         float_zero_divide_excp(env, GETPC());
1921     }
1922 
1923     helper_compute_fprf_float128(env, t.f128);
1924     *xt = t;
1925     do_float_check_status(env, true, GETPC());
1926 }
1927 
1928 /*
1929  * VSX_RE  - VSX floating point reciprocal estimate
1930  *   op    - instruction mnemonic
1931  *   nels  - number of elements (1, 2 or 4)
1932  *   tp    - type (float32 or float64)
1933  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
1934  *   sfifprf - set FI and FPRF
1935  */
1936 #define VSX_RE(op, nels, tp, fld, sfifprf, r2sp)                              \
1937 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)              \
1938 {                                                                             \
1939     ppc_vsr_t t = { };                                                        \
1940     int i;                                                                    \
1941                                                                               \
1942     helper_reset_fpstatus(env);                                               \
1943                                                                               \
1944     for (i = 0; i < nels; i++) {                                              \
1945         if (unlikely(tp##_is_signaling_nan(xb->fld, &env->fp_status))) {      \
1946             float_invalid_op_vxsnan(env, GETPC());                            \
1947         }                                                                     \
1948         t.fld = tp##_div(tp##_one, xb->fld, &env->fp_status);                 \
1949                                                                               \
1950         if (r2sp) {                                                           \
1951             t.fld = do_frsp(env, t.fld, GETPC());                             \
1952         }                                                                     \
1953                                                                               \
1954         if (sfifprf) {                                                        \
1955             helper_compute_fprf_float64(env, t.fld);                          \
1956         }                                                                     \
1957     }                                                                         \
1958                                                                               \
1959     *xt = t;                                                                  \
1960     do_float_check_status(env, sfifprf, GETPC());                             \
1961 }
1962 
1963 VSX_RE(xsredp, 1, float64, VsrD(0), 1, 0)
1964 VSX_RE(xsresp, 1, float64, VsrD(0), 1, 1)
1965 VSX_RE(xvredp, 2, float64, VsrD(i), 0, 0)
1966 VSX_RE(xvresp, 4, float32, VsrW(i), 0, 0)
1967 
1968 /*
1969  * VSX_SQRT - VSX floating point square root
1970  *   op    - instruction mnemonic
1971  *   nels  - number of elements (1, 2 or 4)
1972  *   tp    - type (float32 or float64)
1973  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
1974  *   sfifprf - set FI and FPRF
1975  */
1976 #define VSX_SQRT(op, nels, tp, fld, sfifprf, r2sp)                           \
1977 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)             \
1978 {                                                                            \
1979     ppc_vsr_t t = { };                                                       \
1980     int i;                                                                   \
1981                                                                              \
1982     helper_reset_fpstatus(env);                                              \
1983                                                                              \
1984     for (i = 0; i < nels; i++) {                                             \
1985         float_status tstat = env->fp_status;                                 \
1986         set_float_exception_flags(0, &tstat);                                \
1987         t.fld = tp##_sqrt(xb->fld, &tstat);                                  \
1988         env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1989                                                                              \
1990         if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
1991             float_invalid_op_sqrt(env, tstat.float_exception_flags,          \
1992                                   sfifprf, GETPC());                         \
1993         }                                                                    \
1994                                                                              \
1995         if (r2sp) {                                                          \
1996             t.fld = do_frsp(env, t.fld, GETPC());                            \
1997         }                                                                    \
1998                                                                              \
1999         if (sfifprf) {                                                       \
2000             helper_compute_fprf_float64(env, t.fld);                         \
2001         }                                                                    \
2002     }                                                                        \
2003                                                                              \
2004     *xt = t;                                                                 \
2005     do_float_check_status(env, sfifprf, GETPC());                            \
2006 }
2007 
2008 VSX_SQRT(xssqrtdp, 1, float64, VsrD(0), 1, 0)
2009 VSX_SQRT(xssqrtsp, 1, float64, VsrD(0), 1, 1)
2010 VSX_SQRT(xvsqrtdp, 2, float64, VsrD(i), 0, 0)
2011 VSX_SQRT(xvsqrtsp, 4, float32, VsrW(i), 0, 0)
2012 
2013 /*
2014  *VSX_RSQRTE - VSX floating point reciprocal square root estimate
2015  *   op    - instruction mnemonic
2016  *   nels  - number of elements (1, 2 or 4)
2017  *   tp    - type (float32 or float64)
2018  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2019  *   sfifprf - set FI and FPRF
2020  */
2021 #define VSX_RSQRTE(op, nels, tp, fld, sfifprf, r2sp)                         \
2022 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)             \
2023 {                                                                            \
2024     ppc_vsr_t t = { };                                                       \
2025     int i;                                                                   \
2026                                                                              \
2027     helper_reset_fpstatus(env);                                              \
2028                                                                              \
2029     for (i = 0; i < nels; i++) {                                             \
2030         float_status tstat = env->fp_status;                                 \
2031         set_float_exception_flags(0, &tstat);                                \
2032         t.fld = tp##_sqrt(xb->fld, &tstat);                                  \
2033         t.fld = tp##_div(tp##_one, t.fld, &tstat);                           \
2034         env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2035         if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
2036             float_invalid_op_sqrt(env, tstat.float_exception_flags,          \
2037                                   sfifprf, GETPC());                         \
2038         }                                                                    \
2039         if (r2sp) {                                                          \
2040             t.fld = do_frsp(env, t.fld, GETPC());                            \
2041         }                                                                    \
2042                                                                              \
2043         if (sfifprf) {                                                       \
2044             helper_compute_fprf_float64(env, t.fld);                         \
2045         }                                                                    \
2046     }                                                                        \
2047                                                                              \
2048     *xt = t;                                                                 \
2049     do_float_check_status(env, sfifprf, GETPC());                            \
2050 }
2051 
2052 VSX_RSQRTE(xsrsqrtedp, 1, float64, VsrD(0), 1, 0)
2053 VSX_RSQRTE(xsrsqrtesp, 1, float64, VsrD(0), 1, 1)
2054 VSX_RSQRTE(xvrsqrtedp, 2, float64, VsrD(i), 0, 0)
2055 VSX_RSQRTE(xvrsqrtesp, 4, float32, VsrW(i), 0, 0)
2056 
2057 /*
2058  * VSX_TDIV - VSX floating point test for divide
2059  *   op    - instruction mnemonic
2060  *   nels  - number of elements (1, 2 or 4)
2061  *   tp    - type (float32 or float64)
2062  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2063  *   emin  - minimum unbiased exponent
2064  *   emax  - maximum unbiased exponent
2065  *   nbits - number of fraction bits
2066  */
2067 #define VSX_TDIV(op, nels, tp, fld, emin, emax, nbits)                  \
2068 void helper_##op(CPUPPCState *env, uint32_t opcode,                     \
2069                  ppc_vsr_t *xa, ppc_vsr_t *xb)                          \
2070 {                                                                       \
2071     int i;                                                              \
2072     int fe_flag = 0;                                                    \
2073     int fg_flag = 0;                                                    \
2074                                                                         \
2075     for (i = 0; i < nels; i++) {                                        \
2076         if (unlikely(tp##_is_infinity(xa->fld) ||                       \
2077                      tp##_is_infinity(xb->fld) ||                       \
2078                      tp##_is_zero(xb->fld))) {                          \
2079             fe_flag = 1;                                                \
2080             fg_flag = 1;                                                \
2081         } else {                                                        \
2082             int e_a = ppc_##tp##_get_unbiased_exp(xa->fld);             \
2083             int e_b = ppc_##tp##_get_unbiased_exp(xb->fld);             \
2084                                                                         \
2085             if (unlikely(tp##_is_any_nan(xa->fld) ||                    \
2086                          tp##_is_any_nan(xb->fld))) {                   \
2087                 fe_flag = 1;                                            \
2088             } else if ((e_b <= emin) || (e_b >= (emax - 2))) {          \
2089                 fe_flag = 1;                                            \
2090             } else if (!tp##_is_zero(xa->fld) &&                        \
2091                        (((e_a - e_b) >= emax) ||                        \
2092                         ((e_a - e_b) <= (emin + 1)) ||                  \
2093                         (e_a <= (emin + nbits)))) {                     \
2094                 fe_flag = 1;                                            \
2095             }                                                           \
2096                                                                         \
2097             if (unlikely(tp##_is_zero_or_denormal(xb->fld))) {          \
2098                 /*                                                      \
2099                  * XB is not zero because of the above check and so     \
2100                  * must be denormalized.                                \
2101                  */                                                     \
2102                 fg_flag = 1;                                            \
2103             }                                                           \
2104         }                                                               \
2105     }                                                                   \
2106                                                                         \
2107     env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2108 }
2109 
2110 VSX_TDIV(xstdivdp, 1, float64, VsrD(0), -1022, 1023, 52)
2111 VSX_TDIV(xvtdivdp, 2, float64, VsrD(i), -1022, 1023, 52)
2112 VSX_TDIV(xvtdivsp, 4, float32, VsrW(i), -126, 127, 23)
2113 
2114 /*
2115  * VSX_TSQRT - VSX floating point test for square root
2116  *   op    - instruction mnemonic
2117  *   nels  - number of elements (1, 2 or 4)
2118  *   tp    - type (float32 or float64)
2119  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2120  *   emin  - minimum unbiased exponent
2121  *   emax  - maximum unbiased exponent
2122  *   nbits - number of fraction bits
2123  */
2124 #define VSX_TSQRT(op, nels, tp, fld, emin, nbits)                       \
2125 void helper_##op(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xb)      \
2126 {                                                                       \
2127     int i;                                                              \
2128     int fe_flag = 0;                                                    \
2129     int fg_flag = 0;                                                    \
2130                                                                         \
2131     for (i = 0; i < nels; i++) {                                        \
2132         if (unlikely(tp##_is_infinity(xb->fld) ||                       \
2133                      tp##_is_zero(xb->fld))) {                          \
2134             fe_flag = 1;                                                \
2135             fg_flag = 1;                                                \
2136         } else {                                                        \
2137             int e_b = ppc_##tp##_get_unbiased_exp(xb->fld);             \
2138                                                                         \
2139             if (unlikely(tp##_is_any_nan(xb->fld))) {                   \
2140                 fe_flag = 1;                                            \
2141             } else if (unlikely(tp##_is_zero(xb->fld))) {               \
2142                 fe_flag = 1;                                            \
2143             } else if (unlikely(tp##_is_neg(xb->fld))) {                \
2144                 fe_flag = 1;                                            \
2145             } else if (!tp##_is_zero(xb->fld) &&                        \
2146                        (e_b <= (emin + nbits))) {                       \
2147                 fe_flag = 1;                                            \
2148             }                                                           \
2149                                                                         \
2150             if (unlikely(tp##_is_zero_or_denormal(xb->fld))) {          \
2151                 /*                                                      \
2152                  * XB is not zero because of the above check and        \
2153                  * therefore must be denormalized.                      \
2154                  */                                                     \
2155                 fg_flag = 1;                                            \
2156             }                                                           \
2157         }                                                               \
2158     }                                                                   \
2159                                                                         \
2160     env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2161 }
2162 
2163 VSX_TSQRT(xstsqrtdp, 1, float64, VsrD(0), -1022, 52)
2164 VSX_TSQRT(xvtsqrtdp, 2, float64, VsrD(i), -1022, 52)
2165 VSX_TSQRT(xvtsqrtsp, 4, float32, VsrW(i), -126, 23)
2166 
2167 /*
2168  * VSX_MADD - VSX floating point muliply/add variations
2169  *   op    - instruction mnemonic
2170  *   nels  - number of elements (1, 2 or 4)
2171  *   tp    - type (float32 or float64)
2172  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2173  *   maddflgs - flags for the float*muladd routine that control the
2174  *           various forms (madd, msub, nmadd, nmsub)
2175  *   sfifprf - set FI and FPRF
2176  */
2177 #define VSX_MADD(op, nels, tp, fld, maddflgs, sfifprf)                        \
2178 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt,                             \
2179                  ppc_vsr_t *s1, ppc_vsr_t *s2, ppc_vsr_t *s3)                 \
2180 {                                                                             \
2181     ppc_vsr_t t = *xt;                                                        \
2182     int i;                                                                    \
2183                                                                               \
2184     helper_reset_fpstatus(env);                                               \
2185                                                                               \
2186     for (i = 0; i < nels; i++) {                                              \
2187         float_status tstat = env->fp_status;                                  \
2188         set_float_exception_flags(0, &tstat);                                 \
2189         t.fld = tp##_muladd(s1->fld, s3->fld, s2->fld, maddflgs, &tstat);     \
2190         env->fp_status.float_exception_flags |= tstat.float_exception_flags;  \
2191                                                                               \
2192         if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {     \
2193             float_invalid_op_madd(env, tstat.float_exception_flags,           \
2194                                   sfifprf, GETPC());                          \
2195         }                                                                     \
2196                                                                               \
2197         if (sfifprf) {                                                        \
2198             helper_compute_fprf_float64(env, t.fld);                          \
2199         }                                                                     \
2200     }                                                                         \
2201     *xt = t;                                                                  \
2202     do_float_check_status(env, sfifprf, GETPC());                             \
2203 }
2204 
2205 VSX_MADD(XSMADDDP, 1, float64, VsrD(0), MADD_FLGS, 1)
2206 VSX_MADD(XSMSUBDP, 1, float64, VsrD(0), MSUB_FLGS, 1)
2207 VSX_MADD(XSNMADDDP, 1, float64, VsrD(0), NMADD_FLGS, 1)
2208 VSX_MADD(XSNMSUBDP, 1, float64, VsrD(0), NMSUB_FLGS, 1)
2209 VSX_MADD(XSMADDSP, 1, float64r32, VsrD(0), MADD_FLGS, 1)
2210 VSX_MADD(XSMSUBSP, 1, float64r32, VsrD(0), MSUB_FLGS, 1)
2211 VSX_MADD(XSNMADDSP, 1, float64r32, VsrD(0), NMADD_FLGS, 1)
2212 VSX_MADD(XSNMSUBSP, 1, float64r32, VsrD(0), NMSUB_FLGS, 1)
2213 
2214 VSX_MADD(xvmadddp, 2, float64, VsrD(i), MADD_FLGS, 0)
2215 VSX_MADD(xvmsubdp, 2, float64, VsrD(i), MSUB_FLGS, 0)
2216 VSX_MADD(xvnmadddp, 2, float64, VsrD(i), NMADD_FLGS, 0)
2217 VSX_MADD(xvnmsubdp, 2, float64, VsrD(i), NMSUB_FLGS, 0)
2218 
2219 VSX_MADD(xvmaddsp, 4, float32, VsrW(i), MADD_FLGS, 0)
2220 VSX_MADD(xvmsubsp, 4, float32, VsrW(i), MSUB_FLGS, 0)
2221 VSX_MADD(xvnmaddsp, 4, float32, VsrW(i), NMADD_FLGS, 0)
2222 VSX_MADD(xvnmsubsp, 4, float32, VsrW(i), NMSUB_FLGS, 0)
2223 
2224 /*
2225  * VSX_MADDQ - VSX floating point quad-precision muliply/add
2226  *   op    - instruction mnemonic
2227  *   maddflgs - flags for the float*muladd routine that control the
2228  *           various forms (madd, msub, nmadd, nmsub)
2229  *   ro    - round to odd
2230  */
2231 #define VSX_MADDQ(op, maddflgs, ro)                                            \
2232 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *s1, ppc_vsr_t *s2,\
2233                  ppc_vsr_t *s3)                                                \
2234 {                                                                              \
2235     ppc_vsr_t t = *xt;                                                         \
2236                                                                                \
2237     helper_reset_fpstatus(env);                                                \
2238                                                                                \
2239     float_status tstat = env->fp_status;                                       \
2240     set_float_exception_flags(0, &tstat);                                      \
2241     if (ro) {                                                                  \
2242         tstat.float_rounding_mode = float_round_to_odd;                        \
2243     }                                                                          \
2244     t.f128 = float128_muladd(s1->f128, s3->f128, s2->f128, maddflgs, &tstat);  \
2245     env->fp_status.float_exception_flags |= tstat.float_exception_flags;       \
2246                                                                                \
2247     if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {          \
2248         float_invalid_op_madd(env, tstat.float_exception_flags,                \
2249                               false, GETPC());                                 \
2250     }                                                                          \
2251                                                                                \
2252     helper_compute_fprf_float128(env, t.f128);                                 \
2253     *xt = t;                                                                   \
2254     do_float_check_status(env, true, GETPC());                                 \
2255 }
2256 
2257 VSX_MADDQ(XSMADDQP, MADD_FLGS, 0)
2258 VSX_MADDQ(XSMADDQPO, MADD_FLGS, 1)
2259 VSX_MADDQ(XSMSUBQP, MSUB_FLGS, 0)
2260 VSX_MADDQ(XSMSUBQPO, MSUB_FLGS, 1)
2261 VSX_MADDQ(XSNMADDQP, NMADD_FLGS, 0)
2262 VSX_MADDQ(XSNMADDQPO, NMADD_FLGS, 1)
2263 VSX_MADDQ(XSNMSUBQP, NMSUB_FLGS, 0)
2264 VSX_MADDQ(XSNMSUBQPO, NMSUB_FLGS, 0)
2265 
2266 /*
2267  * VSX_SCALAR_CMP - VSX scalar floating point compare
2268  *   op    - instruction mnemonic
2269  *   tp    - type
2270  *   cmp   - comparison operation
2271  *   fld   - vsr_t field
2272  *   svxvc - set VXVC bit
2273  */
2274 #define VSX_SCALAR_CMP(op, tp, cmp, fld, svxvc)                               \
2275         void helper_##op(CPUPPCState *env, ppc_vsr_t *xt,                     \
2276                 ppc_vsr_t *xa, ppc_vsr_t *xb)                                 \
2277 {                                                                             \
2278     int flags;                                                                \
2279     bool r, vxvc;                                                             \
2280                                                                               \
2281     helper_reset_fpstatus(env);                                               \
2282                                                                               \
2283     if (svxvc) {                                                              \
2284         r = tp##_##cmp(xb->fld, xa->fld, &env->fp_status);                    \
2285     } else {                                                                  \
2286         r = tp##_##cmp##_quiet(xb->fld, xa->fld, &env->fp_status);            \
2287     }                                                                         \
2288                                                                               \
2289     flags = get_float_exception_flags(&env->fp_status);                       \
2290     if (unlikely(flags & float_flag_invalid)) {                               \
2291         vxvc = svxvc;                                                         \
2292         if (flags & float_flag_invalid_snan) {                                \
2293             float_invalid_op_vxsnan(env, GETPC());                            \
2294             vxvc &= !(env->fpscr & FP_VE);                                    \
2295         }                                                                     \
2296         if (vxvc) {                                                           \
2297             float_invalid_op_vxvc(env, 0, GETPC());                           \
2298         }                                                                     \
2299     }                                                                         \
2300                                                                               \
2301     memset(xt, 0, sizeof(*xt));                                               \
2302     memset(&xt->fld, -r, sizeof(xt->fld));                                    \
2303     do_float_check_status(env, false, GETPC());                               \
2304 }
2305 
2306 VSX_SCALAR_CMP(XSCMPEQDP, float64, eq, VsrD(0), 0)
2307 VSX_SCALAR_CMP(XSCMPGEDP, float64, le, VsrD(0), 1)
2308 VSX_SCALAR_CMP(XSCMPGTDP, float64, lt, VsrD(0), 1)
2309 VSX_SCALAR_CMP(XSCMPEQQP, float128, eq, f128, 0)
2310 VSX_SCALAR_CMP(XSCMPGEQP, float128, le, f128, 1)
2311 VSX_SCALAR_CMP(XSCMPGTQP, float128, lt, f128, 1)
2312 
2313 void helper_xscmpexpdp(CPUPPCState *env, uint32_t opcode,
2314                        ppc_vsr_t *xa, ppc_vsr_t *xb)
2315 {
2316     int64_t exp_a, exp_b;
2317     uint32_t cc;
2318 
2319     exp_a = extract64(xa->VsrD(0), 52, 11);
2320     exp_b = extract64(xb->VsrD(0), 52, 11);
2321 
2322     if (unlikely(float64_is_any_nan(xa->VsrD(0)) ||
2323                  float64_is_any_nan(xb->VsrD(0)))) {
2324         cc = CRF_SO;
2325     } else {
2326         if (exp_a < exp_b) {
2327             cc = CRF_LT;
2328         } else if (exp_a > exp_b) {
2329             cc = CRF_GT;
2330         } else {
2331             cc = CRF_EQ;
2332         }
2333     }
2334 
2335     env->fpscr &= ~FP_FPCC;
2336     env->fpscr |= cc << FPSCR_FPCC;
2337     env->crf[BF(opcode)] = cc;
2338 
2339     do_float_check_status(env, false, GETPC());
2340 }
2341 
2342 void helper_xscmpexpqp(CPUPPCState *env, uint32_t opcode,
2343                        ppc_vsr_t *xa, ppc_vsr_t *xb)
2344 {
2345     int64_t exp_a, exp_b;
2346     uint32_t cc;
2347 
2348     exp_a = extract64(xa->VsrD(0), 48, 15);
2349     exp_b = extract64(xb->VsrD(0), 48, 15);
2350 
2351     if (unlikely(float128_is_any_nan(xa->f128) ||
2352                  float128_is_any_nan(xb->f128))) {
2353         cc = CRF_SO;
2354     } else {
2355         if (exp_a < exp_b) {
2356             cc = CRF_LT;
2357         } else if (exp_a > exp_b) {
2358             cc = CRF_GT;
2359         } else {
2360             cc = CRF_EQ;
2361         }
2362     }
2363 
2364     env->fpscr &= ~FP_FPCC;
2365     env->fpscr |= cc << FPSCR_FPCC;
2366     env->crf[BF(opcode)] = cc;
2367 
2368     do_float_check_status(env, false, GETPC());
2369 }
2370 
2371 static inline void do_scalar_cmp(CPUPPCState *env, ppc_vsr_t *xa, ppc_vsr_t *xb,
2372                                  int crf_idx, bool ordered)
2373 {
2374     uint32_t cc;
2375     bool vxsnan_flag = false, vxvc_flag = false;
2376 
2377     helper_reset_fpstatus(env);
2378 
2379     switch (float64_compare(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) {
2380     case float_relation_less:
2381         cc = CRF_LT;
2382         break;
2383     case float_relation_equal:
2384         cc = CRF_EQ;
2385         break;
2386     case float_relation_greater:
2387         cc = CRF_GT;
2388         break;
2389     case float_relation_unordered:
2390         cc = CRF_SO;
2391 
2392         if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status) ||
2393             float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) {
2394             vxsnan_flag = true;
2395             if (!(env->fpscr & FP_VE) && ordered) {
2396                 vxvc_flag = true;
2397             }
2398         } else if (float64_is_quiet_nan(xa->VsrD(0), &env->fp_status) ||
2399                    float64_is_quiet_nan(xb->VsrD(0), &env->fp_status)) {
2400             if (ordered) {
2401                 vxvc_flag = true;
2402             }
2403         }
2404 
2405         break;
2406     default:
2407         g_assert_not_reached();
2408     }
2409 
2410     env->fpscr &= ~FP_FPCC;
2411     env->fpscr |= cc << FPSCR_FPCC;
2412     env->crf[crf_idx] = cc;
2413 
2414     if (vxsnan_flag) {
2415         float_invalid_op_vxsnan(env, GETPC());
2416     }
2417     if (vxvc_flag) {
2418         float_invalid_op_vxvc(env, 0, GETPC());
2419     }
2420 
2421     do_float_check_status(env, false, GETPC());
2422 }
2423 
2424 void helper_xscmpodp(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xa,
2425                      ppc_vsr_t *xb)
2426 {
2427     do_scalar_cmp(env, xa, xb, BF(opcode), true);
2428 }
2429 
2430 void helper_xscmpudp(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xa,
2431                      ppc_vsr_t *xb)
2432 {
2433     do_scalar_cmp(env, xa, xb, BF(opcode), false);
2434 }
2435 
2436 static inline void do_scalar_cmpq(CPUPPCState *env, ppc_vsr_t *xa,
2437                                   ppc_vsr_t *xb, int crf_idx, bool ordered)
2438 {
2439     uint32_t cc;
2440     bool vxsnan_flag = false, vxvc_flag = false;
2441 
2442     helper_reset_fpstatus(env);
2443 
2444     switch (float128_compare(xa->f128, xb->f128, &env->fp_status)) {
2445     case float_relation_less:
2446         cc = CRF_LT;
2447         break;
2448     case float_relation_equal:
2449         cc = CRF_EQ;
2450         break;
2451     case float_relation_greater:
2452         cc = CRF_GT;
2453         break;
2454     case float_relation_unordered:
2455         cc = CRF_SO;
2456 
2457         if (float128_is_signaling_nan(xa->f128, &env->fp_status) ||
2458             float128_is_signaling_nan(xb->f128, &env->fp_status)) {
2459             vxsnan_flag = true;
2460             if (!(env->fpscr & FP_VE) && ordered) {
2461                 vxvc_flag = true;
2462             }
2463         } else if (float128_is_quiet_nan(xa->f128, &env->fp_status) ||
2464                    float128_is_quiet_nan(xb->f128, &env->fp_status)) {
2465             if (ordered) {
2466                 vxvc_flag = true;
2467             }
2468         }
2469 
2470         break;
2471     default:
2472         g_assert_not_reached();
2473     }
2474 
2475     env->fpscr &= ~FP_FPCC;
2476     env->fpscr |= cc << FPSCR_FPCC;
2477     env->crf[crf_idx] = cc;
2478 
2479     if (vxsnan_flag) {
2480         float_invalid_op_vxsnan(env, GETPC());
2481     }
2482     if (vxvc_flag) {
2483         float_invalid_op_vxvc(env, 0, GETPC());
2484     }
2485 
2486     do_float_check_status(env, false, GETPC());
2487 }
2488 
2489 void helper_xscmpoqp(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xa,
2490                      ppc_vsr_t *xb)
2491 {
2492     do_scalar_cmpq(env, xa, xb, BF(opcode), true);
2493 }
2494 
2495 void helper_xscmpuqp(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xa,
2496                      ppc_vsr_t *xb)
2497 {
2498     do_scalar_cmpq(env, xa, xb, BF(opcode), false);
2499 }
2500 
2501 /*
2502  * VSX_MAX_MIN - VSX floating point maximum/minimum
2503  *   name  - instruction mnemonic
2504  *   op    - operation (max or min)
2505  *   nels  - number of elements (1, 2 or 4)
2506  *   tp    - type (float32 or float64)
2507  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2508  */
2509 #define VSX_MAX_MIN(name, op, nels, tp, fld)                                  \
2510 void helper_##name(CPUPPCState *env, ppc_vsr_t *xt,                           \
2511                    ppc_vsr_t *xa, ppc_vsr_t *xb)                              \
2512 {                                                                             \
2513     ppc_vsr_t t = { };                                                        \
2514     int i;                                                                    \
2515                                                                               \
2516     for (i = 0; i < nels; i++) {                                              \
2517         t.fld = tp##_##op(xa->fld, xb->fld, &env->fp_status);                 \
2518         if (unlikely(tp##_is_signaling_nan(xa->fld, &env->fp_status) ||       \
2519                      tp##_is_signaling_nan(xb->fld, &env->fp_status))) {      \
2520             float_invalid_op_vxsnan(env, GETPC());                            \
2521         }                                                                     \
2522     }                                                                         \
2523                                                                               \
2524     *xt = t;                                                                  \
2525     do_float_check_status(env, false, GETPC());                               \
2526 }
2527 
2528 VSX_MAX_MIN(xsmaxdp, maxnum, 1, float64, VsrD(0))
2529 VSX_MAX_MIN(xvmaxdp, maxnum, 2, float64, VsrD(i))
2530 VSX_MAX_MIN(xvmaxsp, maxnum, 4, float32, VsrW(i))
2531 VSX_MAX_MIN(xsmindp, minnum, 1, float64, VsrD(0))
2532 VSX_MAX_MIN(xvmindp, minnum, 2, float64, VsrD(i))
2533 VSX_MAX_MIN(xvminsp, minnum, 4, float32, VsrW(i))
2534 
2535 #define VSX_MAX_MINC(name, max, tp, fld)                                      \
2536 void helper_##name(CPUPPCState *env,                                          \
2537                    ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb)               \
2538 {                                                                             \
2539     ppc_vsr_t t = { };                                                        \
2540     bool first;                                                               \
2541                                                                               \
2542     helper_reset_fpstatus(env);                                               \
2543                                                                               \
2544     if (max) {                                                                \
2545         first = tp##_le_quiet(xb->fld, xa->fld, &env->fp_status);             \
2546     } else {                                                                  \
2547         first = tp##_lt_quiet(xa->fld, xb->fld, &env->fp_status);             \
2548     }                                                                         \
2549                                                                               \
2550     if (first) {                                                              \
2551         t.fld = xa->fld;                                                      \
2552     } else {                                                                  \
2553         t.fld = xb->fld;                                                      \
2554         if (env->fp_status.float_exception_flags & float_flag_invalid_snan) { \
2555             float_invalid_op_vxsnan(env, GETPC());                            \
2556         }                                                                     \
2557     }                                                                         \
2558                                                                               \
2559     *xt = t;                                                                  \
2560 }
2561 
2562 VSX_MAX_MINC(XSMAXCDP, true, float64, VsrD(0));
2563 VSX_MAX_MINC(XSMINCDP, false, float64, VsrD(0));
2564 VSX_MAX_MINC(XSMAXCQP, true, float128, f128);
2565 VSX_MAX_MINC(XSMINCQP, false, float128, f128);
2566 
2567 #define VSX_MAX_MINJ(name, max)                                               \
2568 void helper_##name(CPUPPCState *env,                                          \
2569                    ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb)               \
2570 {                                                                             \
2571     ppc_vsr_t t = { };                                                        \
2572     bool vxsnan_flag = false, vex_flag = false;                               \
2573                                                                               \
2574     if (unlikely(float64_is_any_nan(xa->VsrD(0)))) {                          \
2575         if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status)) {         \
2576             vxsnan_flag = true;                                               \
2577         }                                                                     \
2578         t.VsrD(0) = xa->VsrD(0);                                              \
2579     } else if (unlikely(float64_is_any_nan(xb->VsrD(0)))) {                   \
2580         if (float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) {         \
2581             vxsnan_flag = true;                                               \
2582         }                                                                     \
2583         t.VsrD(0) = xb->VsrD(0);                                              \
2584     } else if (float64_is_zero(xa->VsrD(0)) &&                                \
2585                float64_is_zero(xb->VsrD(0))) {                                \
2586         if (max) {                                                            \
2587             if (!float64_is_neg(xa->VsrD(0)) ||                               \
2588                 !float64_is_neg(xb->VsrD(0))) {                               \
2589                 t.VsrD(0) = 0ULL;                                             \
2590             } else {                                                          \
2591                 t.VsrD(0) = 0x8000000000000000ULL;                            \
2592             }                                                                 \
2593         } else {                                                              \
2594             if (float64_is_neg(xa->VsrD(0)) ||                                \
2595                 float64_is_neg(xb->VsrD(0))) {                                \
2596                 t.VsrD(0) = 0x8000000000000000ULL;                            \
2597             } else {                                                          \
2598                 t.VsrD(0) = 0ULL;                                             \
2599             }                                                                 \
2600         }                                                                     \
2601     } else if ((max &&                                                        \
2602                !float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) ||     \
2603                (!max &&                                                       \
2604                float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status))) {      \
2605         t.VsrD(0) = xa->VsrD(0);                                              \
2606     } else {                                                                  \
2607         t.VsrD(0) = xb->VsrD(0);                                              \
2608     }                                                                         \
2609                                                                               \
2610     vex_flag = (env->fpscr & FP_VE) && vxsnan_flag;                           \
2611     if (vxsnan_flag) {                                                        \
2612         float_invalid_op_vxsnan(env, GETPC());                                \
2613     }                                                                         \
2614     if (!vex_flag) {                                                          \
2615         *xt = t;                                                              \
2616     }                                                                         \
2617 }                                                                             \
2618 
2619 VSX_MAX_MINJ(XSMAXJDP, 1);
2620 VSX_MAX_MINJ(XSMINJDP, 0);
2621 
2622 /*
2623  * VSX_CMP - VSX floating point compare
2624  *   op    - instruction mnemonic
2625  *   nels  - number of elements (1, 2 or 4)
2626  *   tp    - type (float32 or float64)
2627  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2628  *   cmp   - comparison operation
2629  *   svxvc - set VXVC bit
2630  *   exp   - expected result of comparison
2631  */
2632 #define VSX_CMP(op, nels, tp, fld, cmp, svxvc, exp)                       \
2633 uint32_t helper_##op(CPUPPCState *env, ppc_vsr_t *xt,                     \
2634                      ppc_vsr_t *xa, ppc_vsr_t *xb)                        \
2635 {                                                                         \
2636     ppc_vsr_t t = *xt;                                                    \
2637     uint32_t crf6 = 0;                                                    \
2638     int i;                                                                \
2639     int all_true = 1;                                                     \
2640     int all_false = 1;                                                    \
2641                                                                           \
2642     for (i = 0; i < nels; i++) {                                          \
2643         if (unlikely(tp##_is_any_nan(xa->fld) ||                          \
2644                      tp##_is_any_nan(xb->fld))) {                         \
2645             if (tp##_is_signaling_nan(xa->fld, &env->fp_status) ||        \
2646                 tp##_is_signaling_nan(xb->fld, &env->fp_status)) {        \
2647                 float_invalid_op_vxsnan(env, GETPC());                    \
2648             }                                                             \
2649             if (svxvc) {                                                  \
2650                 float_invalid_op_vxvc(env, 0, GETPC());                   \
2651             }                                                             \
2652             t.fld = 0;                                                    \
2653             all_true = 0;                                                 \
2654         } else {                                                          \
2655             if (tp##_##cmp(xb->fld, xa->fld, &env->fp_status) == exp) {   \
2656                 t.fld = -1;                                               \
2657                 all_false = 0;                                            \
2658             } else {                                                      \
2659                 t.fld = 0;                                                \
2660                 all_true = 0;                                             \
2661             }                                                             \
2662         }                                                                 \
2663     }                                                                     \
2664                                                                           \
2665     *xt = t;                                                              \
2666     crf6 = (all_true ? 0x8 : 0) | (all_false ? 0x2 : 0);                  \
2667     return crf6;                                                          \
2668 }
2669 
2670 VSX_CMP(xvcmpeqdp, 2, float64, VsrD(i), eq, 0, 1)
2671 VSX_CMP(xvcmpgedp, 2, float64, VsrD(i), le, 1, 1)
2672 VSX_CMP(xvcmpgtdp, 2, float64, VsrD(i), lt, 1, 1)
2673 VSX_CMP(xvcmpnedp, 2, float64, VsrD(i), eq, 0, 0)
2674 VSX_CMP(xvcmpeqsp, 4, float32, VsrW(i), eq, 0, 1)
2675 VSX_CMP(xvcmpgesp, 4, float32, VsrW(i), le, 1, 1)
2676 VSX_CMP(xvcmpgtsp, 4, float32, VsrW(i), lt, 1, 1)
2677 VSX_CMP(xvcmpnesp, 4, float32, VsrW(i), eq, 0, 0)
2678 
2679 /*
2680  * VSX_CVT_FP_TO_FP - VSX floating point/floating point conversion
2681  *   op    - instruction mnemonic
2682  *   nels  - number of elements (1, 2 or 4)
2683  *   stp   - source type (float32 or float64)
2684  *   ttp   - target type (float32 or float64)
2685  *   sfld  - source vsr_t field
2686  *   tfld  - target vsr_t field (f32 or f64)
2687  *   sfifprf - set FI and FPRF
2688  */
2689 #define VSX_CVT_FP_TO_FP(op, nels, stp, ttp, sfld, tfld, sfifprf)  \
2690 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)   \
2691 {                                                                  \
2692     ppc_vsr_t t = { };                                             \
2693     int i;                                                         \
2694                                                                    \
2695     for (i = 0; i < nels; i++) {                                   \
2696         t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status);        \
2697         if (unlikely(stp##_is_signaling_nan(xb->sfld,              \
2698                                             &env->fp_status))) {   \
2699             float_invalid_op_vxsnan(env, GETPC());                 \
2700             t.tfld = ttp##_snan_to_qnan(t.tfld);                   \
2701         }                                                          \
2702         if (sfifprf) {                                             \
2703             helper_compute_fprf_##ttp(env, t.tfld);                \
2704         }                                                          \
2705     }                                                              \
2706                                                                    \
2707     *xt = t;                                                       \
2708     do_float_check_status(env, sfifprf, GETPC());                  \
2709 }
2710 
2711 VSX_CVT_FP_TO_FP(xscvspdp, 1, float32, float64, VsrW(0), VsrD(0), 1)
2712 VSX_CVT_FP_TO_FP(xvcvspdp, 2, float32, float64, VsrW(2 * i), VsrD(i), 0)
2713 
2714 #define VSX_CVT_FP_TO_FP2(op, nels, stp, ttp, sfifprf)                \
2715 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)      \
2716 {                                                                     \
2717     ppc_vsr_t t = { };                                                \
2718     int i;                                                            \
2719                                                                       \
2720     for (i = 0; i < nels; i++) {                                      \
2721         t.VsrW(2 * i) = stp##_to_##ttp(xb->VsrD(i), &env->fp_status); \
2722         if (unlikely(stp##_is_signaling_nan(xb->VsrD(i),              \
2723                                             &env->fp_status))) {      \
2724             float_invalid_op_vxsnan(env, GETPC());                    \
2725             t.VsrW(2 * i) = ttp##_snan_to_qnan(t.VsrW(2 * i));        \
2726         }                                                             \
2727         if (sfifprf) {                                                \
2728             helper_compute_fprf_##ttp(env, t.VsrW(2 * i));            \
2729         }                                                             \
2730         t.VsrW(2 * i + 1) = t.VsrW(2 * i);                            \
2731     }                                                                 \
2732                                                                       \
2733     *xt = t;                                                          \
2734     do_float_check_status(env, sfifprf, GETPC());                     \
2735 }
2736 
2737 VSX_CVT_FP_TO_FP2(xvcvdpsp, 2, float64, float32, 0)
2738 VSX_CVT_FP_TO_FP2(xscvdpsp, 1, float64, float32, 1)
2739 
2740 /*
2741  * VSX_CVT_FP_TO_FP_VECTOR - VSX floating point/floating point conversion
2742  *   op    - instruction mnemonic
2743  *   nels  - number of elements (1, 2 or 4)
2744  *   stp   - source type (float32 or float64)
2745  *   ttp   - target type (float32 or float64)
2746  *   sfld  - source vsr_t field
2747  *   tfld  - target vsr_t field (f32 or f64)
2748  *   sfprf - set FPRF
2749  */
2750 #define VSX_CVT_FP_TO_FP_VECTOR(op, nels, stp, ttp, sfld, tfld, sfprf)  \
2751 void helper_##op(CPUPPCState *env, uint32_t opcode,                     \
2752                  ppc_vsr_t *xt, ppc_vsr_t *xb)                          \
2753 {                                                                       \
2754     ppc_vsr_t t = *xt;                                                  \
2755     int i;                                                              \
2756                                                                         \
2757     for (i = 0; i < nels; i++) {                                        \
2758         t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status);             \
2759         if (unlikely(stp##_is_signaling_nan(xb->sfld,                   \
2760                                             &env->fp_status))) {        \
2761             float_invalid_op_vxsnan(env, GETPC());                      \
2762             t.tfld = ttp##_snan_to_qnan(t.tfld);                        \
2763         }                                                               \
2764         if (sfprf) {                                                    \
2765             helper_compute_fprf_##ttp(env, t.tfld);                     \
2766         }                                                               \
2767     }                                                                   \
2768                                                                         \
2769     *xt = t;                                                            \
2770     do_float_check_status(env, true, GETPC());                          \
2771 }
2772 
2773 VSX_CVT_FP_TO_FP_VECTOR(xscvdpqp, 1, float64, float128, VsrD(0), f128, 1)
2774 
2775 /*
2776  * VSX_CVT_FP_TO_FP_HP - VSX floating point/floating point conversion
2777  *                       involving one half precision value
2778  *   op    - instruction mnemonic
2779  *   nels  - number of elements (1, 2 or 4)
2780  *   stp   - source type
2781  *   ttp   - target type
2782  *   sfld  - source vsr_t field
2783  *   tfld  - target vsr_t field
2784  *   sfifprf - set FI and FPRF
2785  */
2786 #define VSX_CVT_FP_TO_FP_HP(op, nels, stp, ttp, sfld, tfld, sfifprf) \
2787 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)   \
2788 {                                                                  \
2789     ppc_vsr_t t = { };                                             \
2790     int i;                                                         \
2791                                                                    \
2792     for (i = 0; i < nels; i++) {                                   \
2793         t.tfld = stp##_to_##ttp(xb->sfld, 1, &env->fp_status);     \
2794         if (unlikely(stp##_is_signaling_nan(xb->sfld,              \
2795                                             &env->fp_status))) {   \
2796             float_invalid_op_vxsnan(env, GETPC());                 \
2797             t.tfld = ttp##_snan_to_qnan(t.tfld);                   \
2798         }                                                          \
2799         if (sfifprf) {                                             \
2800             helper_compute_fprf_##ttp(env, t.tfld);                \
2801         }                                                          \
2802     }                                                              \
2803                                                                    \
2804     *xt = t;                                                       \
2805     do_float_check_status(env, sfifprf, GETPC());                  \
2806 }
2807 
2808 VSX_CVT_FP_TO_FP_HP(xscvdphp, 1, float64, float16, VsrD(0), VsrH(3), 1)
2809 VSX_CVT_FP_TO_FP_HP(xscvhpdp, 1, float16, float64, VsrH(3), VsrD(0), 1)
2810 VSX_CVT_FP_TO_FP_HP(xvcvsphp, 4, float32, float16, VsrW(i), VsrH(2 * i  + 1), 0)
2811 VSX_CVT_FP_TO_FP_HP(xvcvhpsp, 4, float16, float32, VsrH(2 * i + 1), VsrW(i), 0)
2812 
2813 void helper_XVCVSPBF16(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)
2814 {
2815     ppc_vsr_t t = { };
2816     int i, status;
2817 
2818     helper_reset_fpstatus(env);
2819 
2820     for (i = 0; i < 4; i++) {
2821         t.VsrH(2 * i + 1) = float32_to_bfloat16(xb->VsrW(i), &env->fp_status);
2822     }
2823 
2824     status = get_float_exception_flags(&env->fp_status);
2825     if (unlikely(status & float_flag_invalid_snan)) {
2826         float_invalid_op_vxsnan(env, GETPC());
2827     }
2828 
2829     *xt = t;
2830     do_float_check_status(env, false, GETPC());
2831 }
2832 
2833 void helper_XSCVQPDP(CPUPPCState *env, uint32_t ro, ppc_vsr_t *xt,
2834                      ppc_vsr_t *xb)
2835 {
2836     ppc_vsr_t t = { };
2837     float_status tstat;
2838 
2839     tstat = env->fp_status;
2840     if (ro != 0) {
2841         tstat.float_rounding_mode = float_round_to_odd;
2842     }
2843 
2844     t.VsrD(0) = float128_to_float64(xb->f128, &tstat);
2845     env->fp_status.float_exception_flags |= tstat.float_exception_flags;
2846     if (unlikely(float128_is_signaling_nan(xb->f128, &tstat))) {
2847         float_invalid_op_vxsnan(env, GETPC());
2848         t.VsrD(0) = float64_snan_to_qnan(t.VsrD(0));
2849     }
2850     helper_compute_fprf_float64(env, t.VsrD(0));
2851 
2852     *xt = t;
2853     do_float_check_status(env, true, GETPC());
2854 }
2855 
2856 uint64_t helper_xscvdpspn(CPUPPCState *env, uint64_t xb)
2857 {
2858     uint64_t result, sign, exp, frac;
2859 
2860     float_status tstat = env->fp_status;
2861     set_float_exception_flags(0, &tstat);
2862 
2863     sign = extract64(xb, 63,  1);
2864     exp  = extract64(xb, 52, 11);
2865     frac = extract64(xb,  0, 52) | 0x10000000000000ULL;
2866 
2867     if (unlikely(exp == 0 && extract64(frac, 0, 52) != 0)) {
2868         /* DP denormal operand.  */
2869         /* Exponent override to DP min exp.  */
2870         exp = 1;
2871         /* Implicit bit override to 0.  */
2872         frac = deposit64(frac, 53, 1, 0);
2873     }
2874 
2875     if (unlikely(exp < 897 && frac != 0)) {
2876         /* SP tiny operand.  */
2877         if (897 - exp > 63) {
2878             frac = 0;
2879         } else {
2880             /* Denormalize until exp = SP min exp.  */
2881             frac >>= (897 - exp);
2882         }
2883         /* Exponent override to SP min exp - 1.  */
2884         exp = 896;
2885     }
2886 
2887     result = sign << 31;
2888     result |= extract64(exp, 10, 1) << 30;
2889     result |= extract64(exp, 0, 7) << 23;
2890     result |= extract64(frac, 29, 23);
2891 
2892     /* hardware replicates result to both words of the doubleword result.  */
2893     return (result << 32) | result;
2894 }
2895 
2896 uint64_t helper_XSCVSPDPN(uint64_t xb)
2897 {
2898     return helper_todouble(xb >> 32);
2899 }
2900 
2901 /*
2902  * VSX_CVT_FP_TO_INT - VSX floating point to integer conversion
2903  *   op    - instruction mnemonic
2904  *   nels  - number of elements (1, 2 or 4)
2905  *   stp   - source type (float32 or float64)
2906  *   ttp   - target type (int32, uint32, int64 or uint64)
2907  *   sfld  - source vsr_t field
2908  *   tfld  - target vsr_t field
2909  *   sfi   - set FI
2910  *   rnan  - resulting NaN
2911  */
2912 #define VSX_CVT_FP_TO_INT(op, nels, stp, ttp, sfld, tfld, sfi, rnan)         \
2913 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)             \
2914 {                                                                            \
2915     int all_flags = env->fp_status.float_exception_flags, flags;             \
2916     ppc_vsr_t t = { };                                                       \
2917     int i;                                                                   \
2918                                                                              \
2919     for (i = 0; i < nels; i++) {                                             \
2920         env->fp_status.float_exception_flags = 0;                            \
2921         t.tfld = stp##_to_##ttp##_round_to_zero(xb->sfld, &env->fp_status);  \
2922         flags = env->fp_status.float_exception_flags;                        \
2923         if (unlikely(flags & float_flag_invalid)) {                          \
2924             t.tfld = float_invalid_cvt(env, flags, t.tfld, rnan, 0, GETPC());\
2925         }                                                                    \
2926         all_flags |= flags;                                                  \
2927     }                                                                        \
2928                                                                              \
2929     *xt = t;                                                                 \
2930     env->fp_status.float_exception_flags = all_flags;                        \
2931     do_float_check_status(env, sfi, GETPC());                                \
2932 }
2933 
2934 VSX_CVT_FP_TO_INT(xscvdpsxds, 1, float64, int64, VsrD(0), VsrD(0), true, \
2935                   0x8000000000000000ULL)
2936 VSX_CVT_FP_TO_INT(xscvdpuxds, 1, float64, uint64, VsrD(0), VsrD(0), true, 0ULL)
2937 VSX_CVT_FP_TO_INT(xvcvdpsxds, 2, float64, int64, VsrD(i), VsrD(i), false, \
2938                   0x8000000000000000ULL)
2939 VSX_CVT_FP_TO_INT(xvcvdpuxds, 2, float64, uint64, VsrD(i), VsrD(i), false, \
2940                   0ULL)
2941 VSX_CVT_FP_TO_INT(xvcvspsxds, 2, float32, int64, VsrW(2 * i), VsrD(i), false, \
2942                   0x8000000000000000ULL)
2943 VSX_CVT_FP_TO_INT(xvcvspsxws, 4, float32, int32, VsrW(i), VsrW(i), false, \
2944                   0x80000000ULL)
2945 VSX_CVT_FP_TO_INT(xvcvspuxds, 2, float32, uint64, VsrW(2 * i), VsrD(i), \
2946                   false, 0ULL)
2947 VSX_CVT_FP_TO_INT(xvcvspuxws, 4, float32, uint32, VsrW(i), VsrW(i), false, 0U)
2948 
2949 #define VSX_CVT_FP_TO_INT128(op, tp, rnan)                                     \
2950 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)               \
2951 {                                                                              \
2952     ppc_vsr_t t;                                                               \
2953     int flags;                                                                 \
2954                                                                                \
2955     helper_reset_fpstatus(env);                                                \
2956     t.s128 = float128_to_##tp##_round_to_zero(xb->f128, &env->fp_status);      \
2957     flags = get_float_exception_flags(&env->fp_status);                        \
2958     if (unlikely(flags & float_flag_invalid)) {                                \
2959         t.VsrD(0) = float_invalid_cvt(env, flags, t.VsrD(0), rnan, 0, GETPC());\
2960         t.VsrD(1) = -(t.VsrD(0) & 1);                                          \
2961     }                                                                          \
2962                                                                                \
2963     *xt = t;                                                                   \
2964     do_float_check_status(env, true, GETPC());                                 \
2965 }
2966 
2967 VSX_CVT_FP_TO_INT128(XSCVQPUQZ, uint128, 0)
2968 VSX_CVT_FP_TO_INT128(XSCVQPSQZ, int128, 0x8000000000000000ULL);
2969 
2970 /*
2971  * Likewise, except that the result is duplicated into both subwords.
2972  * Power ISA v3.1 has Programming Notes for these insns:
2973  *     Previous versions of the architecture allowed the contents of
2974  *     word 0 of the result register to be undefined. However, all
2975  *     processors that support this instruction write the result into
2976  *     words 0 and 1 (and words 2 and 3) of the result register, as
2977  *     is required by this version of the architecture.
2978  */
2979 #define VSX_CVT_FP_TO_INT2(op, nels, stp, ttp, sfi, rnan)                    \
2980 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)             \
2981 {                                                                            \
2982     int all_flags = env->fp_status.float_exception_flags, flags;             \
2983     ppc_vsr_t t = { };                                                       \
2984     int i;                                                                   \
2985                                                                              \
2986     for (i = 0; i < nels; i++) {                                             \
2987         env->fp_status.float_exception_flags = 0;                            \
2988         t.VsrW(2 * i) = stp##_to_##ttp##_round_to_zero(xb->VsrD(i),          \
2989                                                        &env->fp_status);     \
2990         flags = env->fp_status.float_exception_flags;                        \
2991         if (unlikely(flags & float_flag_invalid)) {                          \
2992             t.VsrW(2 * i) = float_invalid_cvt(env, flags, t.VsrW(2 * i),     \
2993                                               rnan, 0, GETPC());             \
2994         }                                                                    \
2995         t.VsrW(2 * i + 1) = t.VsrW(2 * i);                                   \
2996         all_flags |= flags;                                                  \
2997     }                                                                        \
2998                                                                              \
2999     *xt = t;                                                                 \
3000     env->fp_status.float_exception_flags = all_flags;                        \
3001     do_float_check_status(env, sfi, GETPC());                                \
3002 }
3003 
3004 VSX_CVT_FP_TO_INT2(xscvdpsxws, 1, float64, int32, true, 0x80000000U)
3005 VSX_CVT_FP_TO_INT2(xscvdpuxws, 1, float64, uint32, true, 0U)
3006 VSX_CVT_FP_TO_INT2(xvcvdpsxws, 2, float64, int32, false, 0x80000000U)
3007 VSX_CVT_FP_TO_INT2(xvcvdpuxws, 2, float64, uint32, false, 0U)
3008 
3009 /*
3010  * VSX_CVT_FP_TO_INT_VECTOR - VSX floating point to integer conversion
3011  *   op    - instruction mnemonic
3012  *   stp   - source type (float32 or float64)
3013  *   ttp   - target type (int32, uint32, int64 or uint64)
3014  *   sfld  - source vsr_t field
3015  *   tfld  - target vsr_t field
3016  *   rnan  - resulting NaN
3017  */
3018 #define VSX_CVT_FP_TO_INT_VECTOR(op, stp, ttp, sfld, tfld, rnan)             \
3019 void helper_##op(CPUPPCState *env, uint32_t opcode,                          \
3020                  ppc_vsr_t *xt, ppc_vsr_t *xb)                               \
3021 {                                                                            \
3022     ppc_vsr_t t = { };                                                       \
3023     int flags;                                                               \
3024                                                                              \
3025     t.tfld = stp##_to_##ttp##_round_to_zero(xb->sfld, &env->fp_status);      \
3026     flags = get_float_exception_flags(&env->fp_status);                      \
3027     if (flags & float_flag_invalid) {                                        \
3028         t.tfld = float_invalid_cvt(env, flags, t.tfld, rnan, 0, GETPC());    \
3029     }                                                                        \
3030                                                                              \
3031     *xt = t;                                                                 \
3032     do_float_check_status(env, true, GETPC());                               \
3033 }
3034 
3035 VSX_CVT_FP_TO_INT_VECTOR(xscvqpsdz, float128, int64, f128, VsrD(0),          \
3036                   0x8000000000000000ULL)
3037 
3038 VSX_CVT_FP_TO_INT_VECTOR(xscvqpswz, float128, int32, f128, VsrD(0),          \
3039                   0xffffffff80000000ULL)
3040 VSX_CVT_FP_TO_INT_VECTOR(xscvqpudz, float128, uint64, f128, VsrD(0), 0x0ULL)
3041 VSX_CVT_FP_TO_INT_VECTOR(xscvqpuwz, float128, uint32, f128, VsrD(0), 0x0ULL)
3042 
3043 /*
3044  * VSX_CVT_INT_TO_FP - VSX integer to floating point conversion
3045  *   op    - instruction mnemonic
3046  *   nels  - number of elements (1, 2 or 4)
3047  *   stp   - source type (int32, uint32, int64 or uint64)
3048  *   ttp   - target type (float32 or float64)
3049  *   sfld  - source vsr_t field
3050  *   tfld  - target vsr_t field
3051  *   jdef  - definition of the j index (i or 2*i)
3052  *   sfifprf - set FI and FPRF
3053  */
3054 #define VSX_CVT_INT_TO_FP(op, nels, stp, ttp, sfld, tfld, sfifprf, r2sp)\
3055 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)        \
3056 {                                                                       \
3057     ppc_vsr_t t = { };                                                  \
3058     int i;                                                              \
3059                                                                         \
3060     for (i = 0; i < nels; i++) {                                        \
3061         t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status);             \
3062         if (r2sp) {                                                     \
3063             t.tfld = do_frsp(env, t.tfld, GETPC());                     \
3064         }                                                               \
3065         if (sfifprf) {                                                  \
3066             helper_compute_fprf_float64(env, t.tfld);                   \
3067         }                                                               \
3068     }                                                                   \
3069                                                                         \
3070     *xt = t;                                                            \
3071     do_float_check_status(env, sfifprf, GETPC());                       \
3072 }
3073 
3074 VSX_CVT_INT_TO_FP(xscvsxddp, 1, int64, float64, VsrD(0), VsrD(0), 1, 0)
3075 VSX_CVT_INT_TO_FP(xscvuxddp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 0)
3076 VSX_CVT_INT_TO_FP(xscvsxdsp, 1, int64, float64, VsrD(0), VsrD(0), 1, 1)
3077 VSX_CVT_INT_TO_FP(xscvuxdsp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 1)
3078 VSX_CVT_INT_TO_FP(xvcvsxddp, 2, int64, float64, VsrD(i), VsrD(i), 0, 0)
3079 VSX_CVT_INT_TO_FP(xvcvuxddp, 2, uint64, float64, VsrD(i), VsrD(i), 0, 0)
3080 VSX_CVT_INT_TO_FP(xvcvsxwdp, 2, int32, float64, VsrW(2 * i), VsrD(i), 0, 0)
3081 VSX_CVT_INT_TO_FP(xvcvuxwdp, 2, uint64, float64, VsrW(2 * i), VsrD(i), 0, 0)
3082 VSX_CVT_INT_TO_FP(xvcvsxwsp, 4, int32, float32, VsrW(i), VsrW(i), 0, 0)
3083 VSX_CVT_INT_TO_FP(xvcvuxwsp, 4, uint32, float32, VsrW(i), VsrW(i), 0, 0)
3084 
3085 #define VSX_CVT_INT_TO_FP2(op, stp, ttp)                                \
3086 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)        \
3087 {                                                                       \
3088     ppc_vsr_t t = { };                                                  \
3089     int i;                                                              \
3090                                                                         \
3091     for (i = 0; i < 2; i++) {                                           \
3092         t.VsrW(2 * i) = stp##_to_##ttp(xb->VsrD(i), &env->fp_status);   \
3093         t.VsrW(2 * i + 1) = t.VsrW(2 * i);                              \
3094     }                                                                   \
3095                                                                         \
3096     *xt = t;                                                            \
3097     do_float_check_status(env, false, GETPC());                         \
3098 }
3099 
3100 VSX_CVT_INT_TO_FP2(xvcvsxdsp, int64, float32)
3101 VSX_CVT_INT_TO_FP2(xvcvuxdsp, uint64, float32)
3102 
3103 #define VSX_CVT_INT128_TO_FP(op, tp)                            \
3104 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)\
3105 {                                                               \
3106     helper_reset_fpstatus(env);                                 \
3107     xt->f128 = tp##_to_float128(xb->s128, &env->fp_status);     \
3108     helper_compute_fprf_float128(env, xt->f128);                \
3109     do_float_check_status(env, true, GETPC());                  \
3110 }
3111 
3112 VSX_CVT_INT128_TO_FP(XSCVUQQP, uint128);
3113 VSX_CVT_INT128_TO_FP(XSCVSQQP, int128);
3114 
3115 /*
3116  * VSX_CVT_INT_TO_FP_VECTOR - VSX integer to floating point conversion
3117  *   op    - instruction mnemonic
3118  *   stp   - source type (int32, uint32, int64 or uint64)
3119  *   ttp   - target type (float32 or float64)
3120  *   sfld  - source vsr_t field
3121  *   tfld  - target vsr_t field
3122  */
3123 #define VSX_CVT_INT_TO_FP_VECTOR(op, stp, ttp, sfld, tfld)              \
3124 void helper_##op(CPUPPCState *env, uint32_t opcode,                     \
3125                  ppc_vsr_t *xt, ppc_vsr_t *xb)                          \
3126 {                                                                       \
3127     ppc_vsr_t t = *xt;                                                  \
3128                                                                         \
3129     t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status);                 \
3130     helper_compute_fprf_##ttp(env, t.tfld);                             \
3131                                                                         \
3132     *xt = t;                                                            \
3133     do_float_check_status(env, true, GETPC());                          \
3134 }
3135 
3136 VSX_CVT_INT_TO_FP_VECTOR(xscvsdqp, int64, float128, VsrD(0), f128)
3137 VSX_CVT_INT_TO_FP_VECTOR(xscvudqp, uint64, float128, VsrD(0), f128)
3138 
3139 /*
3140  * For "use current rounding mode", define a value that will not be
3141  * one of the existing rounding model enums.
3142  */
3143 #define FLOAT_ROUND_CURRENT (float_round_nearest_even + float_round_down + \
3144   float_round_up + float_round_to_zero)
3145 
3146 /*
3147  * VSX_ROUND - VSX floating point round
3148  *   op    - instruction mnemonic
3149  *   nels  - number of elements (1, 2 or 4)
3150  *   tp    - type (float32 or float64)
3151  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
3152  *   rmode - rounding mode
3153  *   sfifprf - set FI and FPRF
3154  */
3155 #define VSX_ROUND(op, nels, tp, fld, rmode, sfifprf)                   \
3156 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)       \
3157 {                                                                      \
3158     ppc_vsr_t t = { };                                                 \
3159     int i;                                                             \
3160     FloatRoundMode curr_rounding_mode;                                 \
3161                                                                        \
3162     if (rmode != FLOAT_ROUND_CURRENT) {                                \
3163         curr_rounding_mode = get_float_rounding_mode(&env->fp_status); \
3164         set_float_rounding_mode(rmode, &env->fp_status);               \
3165     }                                                                  \
3166                                                                        \
3167     for (i = 0; i < nels; i++) {                                       \
3168         if (unlikely(tp##_is_signaling_nan(xb->fld,                    \
3169                                            &env->fp_status))) {        \
3170             float_invalid_op_vxsnan(env, GETPC());                     \
3171             t.fld = tp##_snan_to_qnan(xb->fld);                        \
3172         } else {                                                       \
3173             t.fld = tp##_round_to_int(xb->fld, &env->fp_status);       \
3174         }                                                              \
3175         if (sfifprf) {                                                 \
3176             helper_compute_fprf_float64(env, t.fld);                   \
3177         }                                                              \
3178     }                                                                  \
3179                                                                        \
3180     /*                                                                 \
3181      * If this is not a "use current rounding mode" instruction,       \
3182      * then inhibit setting of the XX bit and restore rounding         \
3183      * mode from FPSCR                                                 \
3184      */                                                                \
3185     if (rmode != FLOAT_ROUND_CURRENT) {                                \
3186         set_float_rounding_mode(curr_rounding_mode, &env->fp_status);  \
3187         env->fp_status.float_exception_flags &= ~float_flag_inexact;   \
3188     }                                                                  \
3189                                                                        \
3190     *xt = t;                                                           \
3191     do_float_check_status(env, sfifprf, GETPC());                      \
3192 }
3193 
3194 VSX_ROUND(xsrdpi, 1, float64, VsrD(0), float_round_ties_away, 1)
3195 VSX_ROUND(xsrdpic, 1, float64, VsrD(0), FLOAT_ROUND_CURRENT, 1)
3196 VSX_ROUND(xsrdpim, 1, float64, VsrD(0), float_round_down, 1)
3197 VSX_ROUND(xsrdpip, 1, float64, VsrD(0), float_round_up, 1)
3198 VSX_ROUND(xsrdpiz, 1, float64, VsrD(0), float_round_to_zero, 1)
3199 
3200 VSX_ROUND(xvrdpi, 2, float64, VsrD(i), float_round_ties_away, 0)
3201 VSX_ROUND(xvrdpic, 2, float64, VsrD(i), FLOAT_ROUND_CURRENT, 0)
3202 VSX_ROUND(xvrdpim, 2, float64, VsrD(i), float_round_down, 0)
3203 VSX_ROUND(xvrdpip, 2, float64, VsrD(i), float_round_up, 0)
3204 VSX_ROUND(xvrdpiz, 2, float64, VsrD(i), float_round_to_zero, 0)
3205 
3206 VSX_ROUND(xvrspi, 4, float32, VsrW(i), float_round_ties_away, 0)
3207 VSX_ROUND(xvrspic, 4, float32, VsrW(i), FLOAT_ROUND_CURRENT, 0)
3208 VSX_ROUND(xvrspim, 4, float32, VsrW(i), float_round_down, 0)
3209 VSX_ROUND(xvrspip, 4, float32, VsrW(i), float_round_up, 0)
3210 VSX_ROUND(xvrspiz, 4, float32, VsrW(i), float_round_to_zero, 0)
3211 
3212 uint64_t helper_xsrsp(CPUPPCState *env, uint64_t xb)
3213 {
3214     helper_reset_fpstatus(env);
3215 
3216     uint64_t xt = do_frsp(env, xb, GETPC());
3217 
3218     helper_compute_fprf_float64(env, xt);
3219     do_float_check_status(env, true, GETPC());
3220     return xt;
3221 }
3222 
3223 void helper_XVXSIGSP(ppc_vsr_t *xt, ppc_vsr_t *xb)
3224 {
3225     ppc_vsr_t t = { };
3226     uint32_t exp, i, fraction;
3227 
3228     for (i = 0; i < 4; i++) {
3229         exp = (xb->VsrW(i) >> 23) & 0xFF;
3230         fraction = xb->VsrW(i) & 0x7FFFFF;
3231         if (exp != 0 && exp != 255) {
3232             t.VsrW(i) = fraction | 0x00800000;
3233         } else {
3234             t.VsrW(i) = fraction;
3235         }
3236     }
3237     *xt = t;
3238 }
3239 
3240 /*
3241  * VSX_TEST_DC - VSX floating point test data class
3242  *   op    - instruction mnemonic
3243  *   nels  - number of elements (1, 2 or 4)
3244  *   xbn   - VSR register number
3245  *   tp    - type (float32 or float64)
3246  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
3247  *   tfld   - target vsr_t field (VsrD(*) or VsrW(*))
3248  *   fld_max - target field max
3249  *   scrf - set result in CR and FPCC
3250  */
3251 #define VSX_TEST_DC(op, nels, xbn, tp, fld, tfld, fld_max, scrf)  \
3252 void helper_##op(CPUPPCState *env, uint32_t opcode)         \
3253 {                                                           \
3254     ppc_vsr_t *xt = &env->vsr[xT(opcode)];                  \
3255     ppc_vsr_t *xb = &env->vsr[xbn];                         \
3256     ppc_vsr_t t = { };                                      \
3257     uint32_t i, sign, dcmx;                                 \
3258     uint32_t cc, match = 0;                                 \
3259                                                             \
3260     if (!scrf) {                                            \
3261         dcmx = DCMX_XV(opcode);                             \
3262     } else {                                                \
3263         t = *xt;                                            \
3264         dcmx = DCMX(opcode);                                \
3265     }                                                       \
3266                                                             \
3267     for (i = 0; i < nels; i++) {                            \
3268         sign = tp##_is_neg(xb->fld);                        \
3269         if (tp##_is_any_nan(xb->fld)) {                     \
3270             match = extract32(dcmx, 6, 1);                  \
3271         } else if (tp##_is_infinity(xb->fld)) {             \
3272             match = extract32(dcmx, 4 + !sign, 1);          \
3273         } else if (tp##_is_zero(xb->fld)) {                 \
3274             match = extract32(dcmx, 2 + !sign, 1);          \
3275         } else if (tp##_is_zero_or_denormal(xb->fld)) {     \
3276             match = extract32(dcmx, 0 + !sign, 1);          \
3277         }                                                   \
3278                                                             \
3279         if (scrf) {                                         \
3280             cc = sign << CRF_LT_BIT | match << CRF_EQ_BIT;  \
3281             env->fpscr &= ~FP_FPCC;                         \
3282             env->fpscr |= cc << FPSCR_FPCC;                 \
3283             env->crf[BF(opcode)] = cc;                      \
3284         } else {                                            \
3285             t.tfld = match ? fld_max : 0;                   \
3286         }                                                   \
3287         match = 0;                                          \
3288     }                                                       \
3289     if (!scrf) {                                            \
3290         *xt = t;                                            \
3291     }                                                       \
3292 }
3293 
3294 VSX_TEST_DC(xvtstdcdp, 2, xB(opcode), float64, VsrD(i), VsrD(i), UINT64_MAX, 0)
3295 VSX_TEST_DC(xvtstdcsp, 4, xB(opcode), float32, VsrW(i), VsrW(i), UINT32_MAX, 0)
3296 VSX_TEST_DC(xststdcdp, 1, xB(opcode), float64, VsrD(0), VsrD(0), 0, 1)
3297 VSX_TEST_DC(xststdcqp, 1, (rB(opcode) + 32), float128, f128, VsrD(0), 0, 1)
3298 
3299 void helper_xststdcsp(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xb)
3300 {
3301     uint32_t dcmx, sign, exp;
3302     uint32_t cc, match = 0, not_sp = 0;
3303     float64 arg = xb->VsrD(0);
3304     float64 arg_sp;
3305 
3306     dcmx = DCMX(opcode);
3307     exp = (arg >> 52) & 0x7FF;
3308     sign = float64_is_neg(arg);
3309 
3310     if (float64_is_any_nan(arg)) {
3311         match = extract32(dcmx, 6, 1);
3312     } else if (float64_is_infinity(arg)) {
3313         match = extract32(dcmx, 4 + !sign, 1);
3314     } else if (float64_is_zero(arg)) {
3315         match = extract32(dcmx, 2 + !sign, 1);
3316     } else if (float64_is_zero_or_denormal(arg) || (exp > 0 && exp < 0x381)) {
3317         match = extract32(dcmx, 0 + !sign, 1);
3318     }
3319 
3320     arg_sp = helper_todouble(helper_tosingle(arg));
3321     not_sp = arg != arg_sp;
3322 
3323     cc = sign << CRF_LT_BIT | match << CRF_EQ_BIT | not_sp << CRF_SO_BIT;
3324     env->fpscr &= ~FP_FPCC;
3325     env->fpscr |= cc << FPSCR_FPCC;
3326     env->crf[BF(opcode)] = cc;
3327 }
3328 
3329 void helper_xsrqpi(CPUPPCState *env, uint32_t opcode,
3330                    ppc_vsr_t *xt, ppc_vsr_t *xb)
3331 {
3332     ppc_vsr_t t = { };
3333     uint8_t r = Rrm(opcode);
3334     uint8_t ex = Rc(opcode);
3335     uint8_t rmc = RMC(opcode);
3336     uint8_t rmode = 0;
3337     float_status tstat;
3338 
3339     helper_reset_fpstatus(env);
3340 
3341     if (r == 0 && rmc == 0) {
3342         rmode = float_round_ties_away;
3343     } else if (r == 0 && rmc == 0x3) {
3344         rmode = env->fpscr & FP_RN;
3345     } else if (r == 1) {
3346         switch (rmc) {
3347         case 0:
3348             rmode = float_round_nearest_even;
3349             break;
3350         case 1:
3351             rmode = float_round_to_zero;
3352             break;
3353         case 2:
3354             rmode = float_round_up;
3355             break;
3356         case 3:
3357             rmode = float_round_down;
3358             break;
3359         default:
3360             abort();
3361         }
3362     }
3363 
3364     tstat = env->fp_status;
3365     set_float_exception_flags(0, &tstat);
3366     set_float_rounding_mode(rmode, &tstat);
3367     t.f128 = float128_round_to_int(xb->f128, &tstat);
3368     env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3369 
3370     if (unlikely(tstat.float_exception_flags & float_flag_invalid_snan)) {
3371         float_invalid_op_vxsnan(env, GETPC());
3372     }
3373 
3374     if (ex == 0 && (tstat.float_exception_flags & float_flag_inexact)) {
3375         env->fp_status.float_exception_flags &= ~float_flag_inexact;
3376     }
3377 
3378     helper_compute_fprf_float128(env, t.f128);
3379     do_float_check_status(env, true, GETPC());
3380     *xt = t;
3381 }
3382 
3383 void helper_xsrqpxp(CPUPPCState *env, uint32_t opcode,
3384                     ppc_vsr_t *xt, ppc_vsr_t *xb)
3385 {
3386     ppc_vsr_t t = { };
3387     uint8_t r = Rrm(opcode);
3388     uint8_t rmc = RMC(opcode);
3389     uint8_t rmode = 0;
3390     floatx80 round_res;
3391     float_status tstat;
3392 
3393     helper_reset_fpstatus(env);
3394 
3395     if (r == 0 && rmc == 0) {
3396         rmode = float_round_ties_away;
3397     } else if (r == 0 && rmc == 0x3) {
3398         rmode = env->fpscr & FP_RN;
3399     } else if (r == 1) {
3400         switch (rmc) {
3401         case 0:
3402             rmode = float_round_nearest_even;
3403             break;
3404         case 1:
3405             rmode = float_round_to_zero;
3406             break;
3407         case 2:
3408             rmode = float_round_up;
3409             break;
3410         case 3:
3411             rmode = float_round_down;
3412             break;
3413         default:
3414             abort();
3415         }
3416     }
3417 
3418     tstat = env->fp_status;
3419     set_float_exception_flags(0, &tstat);
3420     set_float_rounding_mode(rmode, &tstat);
3421     round_res = float128_to_floatx80(xb->f128, &tstat);
3422     t.f128 = floatx80_to_float128(round_res, &tstat);
3423     env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3424 
3425     if (unlikely(tstat.float_exception_flags & float_flag_invalid_snan)) {
3426         float_invalid_op_vxsnan(env, GETPC());
3427         t.f128 = float128_snan_to_qnan(t.f128);
3428     }
3429 
3430     helper_compute_fprf_float128(env, t.f128);
3431     *xt = t;
3432     do_float_check_status(env, true, GETPC());
3433 }
3434 
3435 void helper_xssqrtqp(CPUPPCState *env, uint32_t opcode,
3436                      ppc_vsr_t *xt, ppc_vsr_t *xb)
3437 {
3438     ppc_vsr_t t = { };
3439     float_status tstat;
3440 
3441     helper_reset_fpstatus(env);
3442 
3443     tstat = env->fp_status;
3444     if (unlikely(Rc(opcode) != 0)) {
3445         tstat.float_rounding_mode = float_round_to_odd;
3446     }
3447 
3448     set_float_exception_flags(0, &tstat);
3449     t.f128 = float128_sqrt(xb->f128, &tstat);
3450     env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3451 
3452     if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
3453         float_invalid_op_sqrt(env, tstat.float_exception_flags, 1, GETPC());
3454     }
3455 
3456     helper_compute_fprf_float128(env, t.f128);
3457     *xt = t;
3458     do_float_check_status(env, true, GETPC());
3459 }
3460 
3461 void helper_xssubqp(CPUPPCState *env, uint32_t opcode,
3462                     ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb)
3463 {
3464     ppc_vsr_t t = *xt;
3465     float_status tstat;
3466 
3467     helper_reset_fpstatus(env);
3468 
3469     tstat = env->fp_status;
3470     if (unlikely(Rc(opcode) != 0)) {
3471         tstat.float_rounding_mode = float_round_to_odd;
3472     }
3473 
3474     set_float_exception_flags(0, &tstat);
3475     t.f128 = float128_sub(xa->f128, xb->f128, &tstat);
3476     env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3477 
3478     if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
3479         float_invalid_op_addsub(env, tstat.float_exception_flags, 1, GETPC());
3480     }
3481 
3482     helper_compute_fprf_float128(env, t.f128);
3483     *xt = t;
3484     do_float_check_status(env, true, GETPC());
3485 }
3486 
3487 static inline void vsxger_excp(CPUPPCState *env, uintptr_t retaddr)
3488 {
3489     /*
3490      * XV*GER instructions execute and set the FPSCR as if exceptions
3491      * are disabled and only at the end throw an exception
3492      */
3493     target_ulong enable;
3494     enable = env->fpscr & (FP_ENABLES | FP_FI | FP_FR);
3495     env->fpscr &= ~(FP_ENABLES | FP_FI | FP_FR);
3496     int status = get_float_exception_flags(&env->fp_status);
3497     if (unlikely(status & float_flag_invalid)) {
3498         if (status & float_flag_invalid_snan) {
3499             float_invalid_op_vxsnan(env, 0);
3500         }
3501         if (status & float_flag_invalid_imz) {
3502             float_invalid_op_vximz(env, false, 0);
3503         }
3504         if (status & float_flag_invalid_isi) {
3505             float_invalid_op_vxisi(env, false, 0);
3506         }
3507     }
3508     do_float_check_status(env, false, retaddr);
3509     env->fpscr |= enable;
3510     do_fpscr_check_status(env, retaddr);
3511 }
3512 
3513 typedef float64 extract_f16(float16, float_status *);
3514 
3515 static float64 extract_hf16(float16 in, float_status *fp_status)
3516 {
3517     return float16_to_float64(in, true, fp_status);
3518 }
3519 
3520 static float64 extract_bf16(bfloat16 in, float_status *fp_status)
3521 {
3522     return bfloat16_to_float64(in, fp_status);
3523 }
3524 
3525 static void vsxger16(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3526                      ppc_acc_t  *at, uint32_t mask, bool acc,
3527                      bool neg_mul, bool neg_acc, extract_f16 extract)
3528 {
3529     float32 r, aux_acc;
3530     float64 psum, va, vb, vc, vd;
3531     int i, j, xmsk_bit, ymsk_bit;
3532     uint8_t pmsk = FIELD_EX32(mask, GER_MSK, PMSK),
3533             xmsk = FIELD_EX32(mask, GER_MSK, XMSK),
3534             ymsk = FIELD_EX32(mask, GER_MSK, YMSK);
3535     float_status *excp_ptr = &env->fp_status;
3536     for (i = 0, xmsk_bit = 1 << 3; i < 4; i++, xmsk_bit >>= 1) {
3537         for (j = 0, ymsk_bit = 1 << 3; j < 4; j++, ymsk_bit >>= 1) {
3538             if ((xmsk_bit & xmsk) && (ymsk_bit & ymsk)) {
3539                 va = !(pmsk & 2) ? float64_zero :
3540                                    extract(a->VsrHF(2 * i), excp_ptr);
3541                 vb = !(pmsk & 2) ? float64_zero :
3542                                    extract(b->VsrHF(2 * j), excp_ptr);
3543                 vc = !(pmsk & 1) ? float64_zero :
3544                                    extract(a->VsrHF(2 * i + 1), excp_ptr);
3545                 vd = !(pmsk & 1) ? float64_zero :
3546                                    extract(b->VsrHF(2 * j + 1), excp_ptr);
3547                 psum = float64_mul(va, vb, excp_ptr);
3548                 psum = float64r32_muladd(vc, vd, psum, 0, excp_ptr);
3549                 r = float64_to_float32(psum, excp_ptr);
3550                 if (acc) {
3551                     aux_acc = at[i].VsrSF(j);
3552                     if (neg_mul) {
3553                         r = bfp32_neg(r);
3554                     }
3555                     if (neg_acc) {
3556                         aux_acc = bfp32_neg(aux_acc);
3557                     }
3558                     r = float32_add(r, aux_acc, excp_ptr);
3559                 }
3560                 at[i].VsrSF(j) = r;
3561             } else {
3562                 at[i].VsrSF(j) = float32_zero;
3563             }
3564         }
3565     }
3566     vsxger_excp(env, GETPC());
3567 }
3568 
3569 typedef void vsxger_zero(ppc_vsr_t *at, int, int);
3570 
3571 typedef void vsxger_muladd_f(ppc_vsr_t *, ppc_vsr_t *, ppc_vsr_t *, int, int,
3572                              int flags, float_status *s);
3573 
3574 static void vsxger_muladd32(ppc_vsr_t *at, ppc_vsr_t *a, ppc_vsr_t *b, int i,
3575                             int j, int flags, float_status *s)
3576 {
3577     at[i].VsrSF(j) = float32_muladd(a->VsrSF(i), b->VsrSF(j),
3578                                     at[i].VsrSF(j), flags, s);
3579 }
3580 
3581 static void vsxger_mul32(ppc_vsr_t *at, ppc_vsr_t *a, ppc_vsr_t *b, int i,
3582                          int j, int flags, float_status *s)
3583 {
3584     at[i].VsrSF(j) = float32_mul(a->VsrSF(i), b->VsrSF(j), s);
3585 }
3586 
3587 static void vsxger_zero32(ppc_vsr_t *at, int i, int j)
3588 {
3589     at[i].VsrSF(j) = float32_zero;
3590 }
3591 
3592 static void vsxger_muladd64(ppc_vsr_t *at, ppc_vsr_t *a, ppc_vsr_t *b, int i,
3593                             int j, int flags, float_status *s)
3594 {
3595     if (j >= 2) {
3596         j -= 2;
3597         at[i].VsrDF(j) = float64_muladd(a[i / 2].VsrDF(i % 2), b->VsrDF(j),
3598                                         at[i].VsrDF(j), flags, s);
3599     }
3600 }
3601 
3602 static void vsxger_mul64(ppc_vsr_t *at, ppc_vsr_t *a, ppc_vsr_t *b, int i,
3603                          int j, int flags, float_status *s)
3604 {
3605     if (j >= 2) {
3606         j -= 2;
3607         at[i].VsrDF(j) = float64_mul(a[i / 2].VsrDF(i % 2), b->VsrDF(j), s);
3608     }
3609 }
3610 
3611 static void vsxger_zero64(ppc_vsr_t *at, int i, int j)
3612 {
3613     if (j >= 2) {
3614         j -= 2;
3615         at[i].VsrDF(j) = float64_zero;
3616     }
3617 }
3618 
3619 static void vsxger(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3620                    ppc_acc_t  *at, uint32_t mask, bool acc, bool neg_mul,
3621                    bool neg_acc, vsxger_muladd_f mul, vsxger_muladd_f muladd,
3622                    vsxger_zero zero)
3623 {
3624     int i, j, xmsk_bit, ymsk_bit, op_flags;
3625     uint8_t xmsk = mask & 0x0F;
3626     uint8_t ymsk = (mask >> 4) & 0x0F;
3627     float_status *excp_ptr = &env->fp_status;
3628     op_flags = (neg_acc ^ neg_mul) ? float_muladd_negate_c : 0;
3629     op_flags |= (neg_mul) ? float_muladd_negate_result : 0;
3630     helper_reset_fpstatus(env);
3631     for (i = 0, xmsk_bit = 1 << 3; i < 4; i++, xmsk_bit >>= 1) {
3632         for (j = 0, ymsk_bit = 1 << 3; j < 4; j++, ymsk_bit >>= 1) {
3633             if ((xmsk_bit & xmsk) && (ymsk_bit & ymsk)) {
3634                 if (acc) {
3635                     muladd(at, a, b, i, j, op_flags, excp_ptr);
3636                 } else {
3637                     mul(at, a, b, i, j, op_flags, excp_ptr);
3638                 }
3639             } else {
3640                 zero(at, i, j);
3641             }
3642         }
3643     }
3644     vsxger_excp(env, GETPC());
3645 }
3646 
3647 QEMU_FLATTEN
3648 void helper_XVBF16GER2(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3649                        ppc_acc_t *at, uint32_t mask)
3650 {
3651     vsxger16(env, a, b, at, mask, false, false, false, extract_bf16);
3652 }
3653 
3654 QEMU_FLATTEN
3655 void helper_XVBF16GER2PP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3656                          ppc_acc_t *at, uint32_t mask)
3657 {
3658     vsxger16(env, a, b, at, mask, true, false, false, extract_bf16);
3659 }
3660 
3661 QEMU_FLATTEN
3662 void helper_XVBF16GER2PN(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3663                          ppc_acc_t *at, uint32_t mask)
3664 {
3665     vsxger16(env, a, b, at, mask, true, false, true, extract_bf16);
3666 }
3667 
3668 QEMU_FLATTEN
3669 void helper_XVBF16GER2NP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3670                          ppc_acc_t *at, uint32_t mask)
3671 {
3672     vsxger16(env, a, b, at, mask, true, true, false, extract_bf16);
3673 }
3674 
3675 QEMU_FLATTEN
3676 void helper_XVBF16GER2NN(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3677                          ppc_acc_t *at, uint32_t mask)
3678 {
3679     vsxger16(env, a, b, at, mask, true, true, true, extract_bf16);
3680 }
3681 
3682 QEMU_FLATTEN
3683 void helper_XVF16GER2(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3684                      ppc_acc_t *at, uint32_t mask)
3685 {
3686     vsxger16(env, a, b, at, mask, false, false, false, extract_hf16);
3687 }
3688 
3689 QEMU_FLATTEN
3690 void helper_XVF16GER2PP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3691                         ppc_acc_t *at, uint32_t mask)
3692 {
3693     vsxger16(env, a, b, at, mask, true, false, false, extract_hf16);
3694 }
3695 
3696 QEMU_FLATTEN
3697 void helper_XVF16GER2PN(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3698                         ppc_acc_t *at, uint32_t mask)
3699 {
3700     vsxger16(env, a, b, at, mask, true, false, true, extract_hf16);
3701 }
3702 
3703 QEMU_FLATTEN
3704 void helper_XVF16GER2NP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3705                         ppc_acc_t *at, uint32_t mask)
3706 {
3707     vsxger16(env, a, b, at, mask, true, true, false, extract_hf16);
3708 }
3709 
3710 QEMU_FLATTEN
3711 void helper_XVF16GER2NN(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3712                         ppc_acc_t *at, uint32_t mask)
3713 {
3714     vsxger16(env, a, b, at, mask, true, true, true, extract_hf16);
3715 }
3716 
3717 QEMU_FLATTEN
3718 void helper_XVF32GER(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3719                      ppc_acc_t *at, uint32_t mask)
3720 {
3721     vsxger(env, a, b, at, mask, false, false, false, vsxger_mul32,
3722            vsxger_muladd32, vsxger_zero32);
3723 }
3724 
3725 QEMU_FLATTEN
3726 void helper_XVF32GERPP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3727                        ppc_acc_t *at, uint32_t mask)
3728 {
3729     vsxger(env, a, b, at, mask, true, false, false, vsxger_mul32,
3730            vsxger_muladd32, vsxger_zero32);
3731 }
3732 
3733 QEMU_FLATTEN
3734 void helper_XVF32GERPN(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3735                        ppc_acc_t *at, uint32_t mask)
3736 {
3737     vsxger(env, a, b, at, mask, true, false, true, vsxger_mul32,
3738            vsxger_muladd32, vsxger_zero32);
3739 }
3740 
3741 QEMU_FLATTEN
3742 void helper_XVF32GERNP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3743                        ppc_acc_t *at, uint32_t mask)
3744 {
3745     vsxger(env, a, b, at, mask, true, true, false, vsxger_mul32,
3746            vsxger_muladd32, vsxger_zero32);
3747 }
3748 
3749 QEMU_FLATTEN
3750 void helper_XVF32GERNN(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3751                        ppc_acc_t *at, uint32_t mask)
3752 {
3753     vsxger(env, a, b, at, mask, true, true, true, vsxger_mul32,
3754            vsxger_muladd32, vsxger_zero32);
3755 }
3756 
3757 QEMU_FLATTEN
3758 void helper_XVF64GER(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3759                      ppc_acc_t *at, uint32_t mask)
3760 {
3761     vsxger(env, a, b, at, mask, false, false, false, vsxger_mul64,
3762            vsxger_muladd64, vsxger_zero64);
3763 }
3764 
3765 QEMU_FLATTEN
3766 void helper_XVF64GERPP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3767                        ppc_acc_t *at, uint32_t mask)
3768 {
3769     vsxger(env, a, b, at, mask, true, false, false, vsxger_mul64,
3770            vsxger_muladd64, vsxger_zero64);
3771 }
3772 
3773 QEMU_FLATTEN
3774 void helper_XVF64GERPN(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3775                        ppc_acc_t *at, uint32_t mask)
3776 {
3777     vsxger(env, a, b, at, mask, true, false, true, vsxger_mul64,
3778            vsxger_muladd64, vsxger_zero64);
3779 }
3780 
3781 QEMU_FLATTEN
3782 void helper_XVF64GERNP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3783                        ppc_acc_t *at, uint32_t mask)
3784 {
3785     vsxger(env, a, b, at, mask, true, true, false, vsxger_mul64,
3786            vsxger_muladd64, vsxger_zero64);
3787 }
3788 
3789 QEMU_FLATTEN
3790 void helper_XVF64GERNN(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3791                        ppc_acc_t *at, uint32_t mask)
3792 {
3793     vsxger(env, a, b, at, mask, true, true, true, vsxger_mul64,
3794            vsxger_muladd64, vsxger_zero64);
3795 }
3796