xref: /openbmc/qemu/target/ppc/fpu_helper.c (revision 5d07159d)
1 /*
2  *  PowerPC floating point and SPE emulation helpers for QEMU.
3  *
4  *  Copyright (c) 2003-2007 Jocelyn Mayer
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 #include "cpu.h"
21 #include "exec/helper-proto.h"
22 #include "exec/exec-all.h"
23 #include "internal.h"
24 #include "fpu/softfloat.h"
25 
26 static inline float128 float128_snan_to_qnan(float128 x)
27 {
28     float128 r;
29 
30     r.high = x.high | 0x0000800000000000;
31     r.low = x.low;
32     return r;
33 }
34 
35 #define float64_snan_to_qnan(x) ((x) | 0x0008000000000000ULL)
36 #define float32_snan_to_qnan(x) ((x) | 0x00400000)
37 #define float16_snan_to_qnan(x) ((x) | 0x0200)
38 
39 static inline float32 bfp32_neg(float32 a)
40 {
41     if (unlikely(float32_is_any_nan(a))) {
42         return a;
43     } else {
44         return float32_chs(a);
45     }
46 }
47 
48 static inline bool fp_exceptions_enabled(CPUPPCState *env)
49 {
50 #ifdef CONFIG_USER_ONLY
51     return true;
52 #else
53     return (env->msr & ((1U << MSR_FE0) | (1U << MSR_FE1))) != 0;
54 #endif
55 }
56 
57 /*****************************************************************************/
58 /* Floating point operations helpers */
59 
60 /*
61  * This is the non-arithmatic conversion that happens e.g. on loads.
62  * In the Power ISA pseudocode, this is called DOUBLE.
63  */
64 uint64_t helper_todouble(uint32_t arg)
65 {
66     uint32_t abs_arg = arg & 0x7fffffff;
67     uint64_t ret;
68 
69     if (likely(abs_arg >= 0x00800000)) {
70         if (unlikely(extract32(arg, 23, 8) == 0xff)) {
71             /* Inf or NAN.  */
72             ret  = (uint64_t)extract32(arg, 31, 1) << 63;
73             ret |= (uint64_t)0x7ff << 52;
74             ret |= (uint64_t)extract32(arg, 0, 23) << 29;
75         } else {
76             /* Normalized operand.  */
77             ret  = (uint64_t)extract32(arg, 30, 2) << 62;
78             ret |= ((extract32(arg, 30, 1) ^ 1) * (uint64_t)7) << 59;
79             ret |= (uint64_t)extract32(arg, 0, 30) << 29;
80         }
81     } else {
82         /* Zero or Denormalized operand.  */
83         ret = (uint64_t)extract32(arg, 31, 1) << 63;
84         if (unlikely(abs_arg != 0)) {
85             /*
86              * Denormalized operand.
87              * Shift fraction so that the msb is in the implicit bit position.
88              * Thus, shift is in the range [1:23].
89              */
90             int shift = clz32(abs_arg) - 8;
91             /*
92              * The first 3 terms compute the float64 exponent.  We then bias
93              * this result by -1 so that we can swallow the implicit bit below.
94              */
95             int exp = -126 - shift + 1023 - 1;
96 
97             ret |= (uint64_t)exp << 52;
98             ret += (uint64_t)abs_arg << (52 - 23 + shift);
99         }
100     }
101     return ret;
102 }
103 
104 /*
105  * This is the non-arithmatic conversion that happens e.g. on stores.
106  * In the Power ISA pseudocode, this is called SINGLE.
107  */
108 uint32_t helper_tosingle(uint64_t arg)
109 {
110     int exp = extract64(arg, 52, 11);
111     uint32_t ret;
112 
113     if (likely(exp > 896)) {
114         /* No denormalization required (includes Inf, NaN).  */
115         ret  = extract64(arg, 62, 2) << 30;
116         ret |= extract64(arg, 29, 30);
117     } else {
118         /*
119          * Zero or Denormal result.  If the exponent is in bounds for
120          * a single-precision denormal result, extract the proper
121          * bits.  If the input is not zero, and the exponent is out of
122          * bounds, then the result is undefined; this underflows to
123          * zero.
124          */
125         ret = extract64(arg, 63, 1) << 31;
126         if (unlikely(exp >= 874)) {
127             /* Denormal result.  */
128             ret |= ((1ULL << 52) | extract64(arg, 0, 52)) >> (896 + 30 - exp);
129         }
130     }
131     return ret;
132 }
133 
134 static inline int ppc_float32_get_unbiased_exp(float32 f)
135 {
136     return ((f >> 23) & 0xFF) - 127;
137 }
138 
139 static inline int ppc_float64_get_unbiased_exp(float64 f)
140 {
141     return ((f >> 52) & 0x7FF) - 1023;
142 }
143 
144 /* Classify a floating-point number.  */
145 enum {
146     is_normal   = 1,
147     is_zero     = 2,
148     is_denormal = 4,
149     is_inf      = 8,
150     is_qnan     = 16,
151     is_snan     = 32,
152     is_neg      = 64,
153 };
154 
155 #define COMPUTE_CLASS(tp)                                      \
156 static int tp##_classify(tp arg)                               \
157 {                                                              \
158     int ret = tp##_is_neg(arg) * is_neg;                       \
159     if (unlikely(tp##_is_any_nan(arg))) {                      \
160         float_status dummy = { };  /* snan_bit_is_one = 0 */   \
161         ret |= (tp##_is_signaling_nan(arg, &dummy)             \
162                 ? is_snan : is_qnan);                          \
163     } else if (unlikely(tp##_is_infinity(arg))) {              \
164         ret |= is_inf;                                         \
165     } else if (tp##_is_zero(arg)) {                            \
166         ret |= is_zero;                                        \
167     } else if (tp##_is_zero_or_denormal(arg)) {                \
168         ret |= is_denormal;                                    \
169     } else {                                                   \
170         ret |= is_normal;                                      \
171     }                                                          \
172     return ret;                                                \
173 }
174 
175 COMPUTE_CLASS(float16)
176 COMPUTE_CLASS(float32)
177 COMPUTE_CLASS(float64)
178 COMPUTE_CLASS(float128)
179 
180 static void set_fprf_from_class(CPUPPCState *env, int class)
181 {
182     static const uint8_t fprf[6][2] = {
183         { 0x04, 0x08 },  /* normalized */
184         { 0x02, 0x12 },  /* zero */
185         { 0x14, 0x18 },  /* denormalized */
186         { 0x05, 0x09 },  /* infinity */
187         { 0x11, 0x11 },  /* qnan */
188         { 0x00, 0x00 },  /* snan -- flags are undefined */
189     };
190     bool isneg = class & is_neg;
191 
192     env->fpscr &= ~FP_FPRF;
193     env->fpscr |= fprf[ctz32(class)][isneg] << FPSCR_FPRF;
194 }
195 
196 #define COMPUTE_FPRF(tp)                                \
197 void helper_compute_fprf_##tp(CPUPPCState *env, tp arg) \
198 {                                                       \
199     set_fprf_from_class(env, tp##_classify(arg));       \
200 }
201 
202 COMPUTE_FPRF(float16)
203 COMPUTE_FPRF(float32)
204 COMPUTE_FPRF(float64)
205 COMPUTE_FPRF(float128)
206 
207 /* Floating-point invalid operations exception */
208 static void finish_invalid_op_excp(CPUPPCState *env, int op, uintptr_t retaddr)
209 {
210     /* Update the floating-point invalid operation summary */
211     env->fpscr |= FP_VX;
212     /* Update the floating-point exception summary */
213     env->fpscr |= FP_FX;
214     if (env->fpscr & FP_VE) {
215         /* Update the floating-point enabled exception summary */
216         env->fpscr |= FP_FEX;
217         if (fp_exceptions_enabled(env)) {
218             raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
219                                    POWERPC_EXCP_FP | op, retaddr);
220         }
221     }
222 }
223 
224 static void finish_invalid_op_arith(CPUPPCState *env, int op,
225                                     bool set_fpcc, uintptr_t retaddr)
226 {
227     env->fpscr &= ~(FP_FR | FP_FI);
228     if (!(env->fpscr & FP_VE)) {
229         if (set_fpcc) {
230             env->fpscr &= ~FP_FPCC;
231             env->fpscr |= (FP_C | FP_FU);
232         }
233     }
234     finish_invalid_op_excp(env, op, retaddr);
235 }
236 
237 /* Signalling NaN */
238 static void float_invalid_op_vxsnan(CPUPPCState *env, uintptr_t retaddr)
239 {
240     env->fpscr |= FP_VXSNAN;
241     finish_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, retaddr);
242 }
243 
244 /* Magnitude subtraction of infinities */
245 static void float_invalid_op_vxisi(CPUPPCState *env, bool set_fpcc,
246                                    uintptr_t retaddr)
247 {
248     env->fpscr |= FP_VXISI;
249     finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXISI, set_fpcc, retaddr);
250 }
251 
252 /* Division of infinity by infinity */
253 static void float_invalid_op_vxidi(CPUPPCState *env, bool set_fpcc,
254                                    uintptr_t retaddr)
255 {
256     env->fpscr |= FP_VXIDI;
257     finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXIDI, set_fpcc, retaddr);
258 }
259 
260 /* Division of zero by zero */
261 static void float_invalid_op_vxzdz(CPUPPCState *env, bool set_fpcc,
262                                    uintptr_t retaddr)
263 {
264     env->fpscr |= FP_VXZDZ;
265     finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXZDZ, set_fpcc, retaddr);
266 }
267 
268 /* Multiplication of zero by infinity */
269 static void float_invalid_op_vximz(CPUPPCState *env, bool set_fpcc,
270                                    uintptr_t retaddr)
271 {
272     env->fpscr |= FP_VXIMZ;
273     finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXIMZ, set_fpcc, retaddr);
274 }
275 
276 /* Square root of a negative number */
277 static void float_invalid_op_vxsqrt(CPUPPCState *env, bool set_fpcc,
278                                     uintptr_t retaddr)
279 {
280     env->fpscr |= FP_VXSQRT;
281     finish_invalid_op_arith(env, POWERPC_EXCP_FP_VXSQRT, set_fpcc, retaddr);
282 }
283 
284 /* Ordered comparison of NaN */
285 static void float_invalid_op_vxvc(CPUPPCState *env, bool set_fpcc,
286                                   uintptr_t retaddr)
287 {
288     env->fpscr |= FP_VXVC;
289     if (set_fpcc) {
290         env->fpscr &= ~FP_FPCC;
291         env->fpscr |= (FP_C | FP_FU);
292     }
293     /* Update the floating-point invalid operation summary */
294     env->fpscr |= FP_VX;
295     /* Update the floating-point exception summary */
296     env->fpscr |= FP_FX;
297     /* We must update the target FPR before raising the exception */
298     if (env->fpscr & FP_VE) {
299         CPUState *cs = env_cpu(env);
300 
301         cs->exception_index = POWERPC_EXCP_PROGRAM;
302         env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
303         /* Update the floating-point enabled exception summary */
304         env->fpscr |= FP_FEX;
305         /* Exception is deferred */
306     }
307 }
308 
309 /* Invalid conversion */
310 static void float_invalid_op_vxcvi(CPUPPCState *env, bool set_fpcc,
311                                    uintptr_t retaddr)
312 {
313     env->fpscr |= FP_VXCVI;
314     env->fpscr &= ~(FP_FR | FP_FI);
315     if (!(env->fpscr & FP_VE)) {
316         if (set_fpcc) {
317             env->fpscr &= ~FP_FPCC;
318             env->fpscr |= (FP_C | FP_FU);
319         }
320     }
321     finish_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, retaddr);
322 }
323 
324 static inline void float_zero_divide_excp(CPUPPCState *env, uintptr_t raddr)
325 {
326     env->fpscr |= FP_ZX;
327     env->fpscr &= ~(FP_FR | FP_FI);
328     /* Update the floating-point exception summary */
329     env->fpscr |= FP_FX;
330     if (env->fpscr & FP_ZE) {
331         /* Update the floating-point enabled exception summary */
332         env->fpscr |= FP_FEX;
333         if (fp_exceptions_enabled(env)) {
334             raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
335                                    POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX,
336                                    raddr);
337         }
338     }
339 }
340 
341 static inline int float_overflow_excp(CPUPPCState *env)
342 {
343     CPUState *cs = env_cpu(env);
344 
345     env->fpscr |= FP_OX;
346     /* Update the floating-point exception summary */
347     env->fpscr |= FP_FX;
348 
349     bool overflow_enabled = !!(env->fpscr & FP_OE);
350     if (overflow_enabled) {
351         /* Update the floating-point enabled exception summary */
352         env->fpscr |= FP_FEX;
353         /* We must update the target FPR before raising the exception */
354         cs->exception_index = POWERPC_EXCP_PROGRAM;
355         env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
356     }
357 
358     return overflow_enabled ? 0 : float_flag_inexact;
359 }
360 
361 static inline void float_underflow_excp(CPUPPCState *env)
362 {
363     CPUState *cs = env_cpu(env);
364 
365     env->fpscr |= FP_UX;
366     /* Update the floating-point exception summary */
367     env->fpscr |= FP_FX;
368     if (env->fpscr & FP_UE) {
369         /* Update the floating-point enabled exception summary */
370         env->fpscr |= FP_FEX;
371         /* We must update the target FPR before raising the exception */
372         cs->exception_index = POWERPC_EXCP_PROGRAM;
373         env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
374     }
375 }
376 
377 static inline void float_inexact_excp(CPUPPCState *env)
378 {
379     CPUState *cs = env_cpu(env);
380 
381     env->fpscr |= FP_XX;
382     /* Update the floating-point exception summary */
383     env->fpscr |= FP_FX;
384     if (env->fpscr & FP_XE) {
385         /* Update the floating-point enabled exception summary */
386         env->fpscr |= FP_FEX;
387         /* We must update the target FPR before raising the exception */
388         cs->exception_index = POWERPC_EXCP_PROGRAM;
389         env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
390     }
391 }
392 
393 void helper_fpscr_clrbit(CPUPPCState *env, uint32_t bit)
394 {
395     uint32_t mask = 1u << bit;
396     if (env->fpscr & mask) {
397         ppc_store_fpscr(env, env->fpscr & ~(target_ulong)mask);
398     }
399 }
400 
401 void helper_fpscr_setbit(CPUPPCState *env, uint32_t bit)
402 {
403     uint32_t mask = 1u << bit;
404     if (!(env->fpscr & mask)) {
405         ppc_store_fpscr(env, env->fpscr | mask);
406     }
407 }
408 
409 void helper_store_fpscr(CPUPPCState *env, uint64_t val, uint32_t nibbles)
410 {
411     target_ulong mask = 0;
412     int i;
413 
414     /* TODO: push this extension back to translation time */
415     for (i = 0; i < sizeof(target_ulong) * 2; i++) {
416         if (nibbles & (1 << i)) {
417             mask |= (target_ulong) 0xf << (4 * i);
418         }
419     }
420     val = (val & mask) | (env->fpscr & ~mask);
421     ppc_store_fpscr(env, val);
422 }
423 
424 static void do_fpscr_check_status(CPUPPCState *env, uintptr_t raddr)
425 {
426     CPUState *cs = env_cpu(env);
427     target_ulong fpscr = env->fpscr;
428     int error = 0;
429 
430     if ((fpscr & FP_OX) && (fpscr & FP_OE)) {
431         error = POWERPC_EXCP_FP_OX;
432     } else if ((fpscr & FP_UX) && (fpscr & FP_UE)) {
433         error = POWERPC_EXCP_FP_UX;
434     } else if ((fpscr & FP_XX) && (fpscr & FP_XE)) {
435         error = POWERPC_EXCP_FP_XX;
436     } else if ((fpscr & FP_ZX) && (fpscr & FP_ZE)) {
437         error = POWERPC_EXCP_FP_ZX;
438     } else if (fpscr & FP_VE) {
439         if (fpscr & FP_VXSOFT) {
440             error = POWERPC_EXCP_FP_VXSOFT;
441         } else if (fpscr & FP_VXSNAN) {
442             error = POWERPC_EXCP_FP_VXSNAN;
443         } else if (fpscr & FP_VXISI) {
444             error = POWERPC_EXCP_FP_VXISI;
445         } else if (fpscr & FP_VXIDI) {
446             error = POWERPC_EXCP_FP_VXIDI;
447         } else if (fpscr & FP_VXZDZ) {
448             error = POWERPC_EXCP_FP_VXZDZ;
449         } else if (fpscr & FP_VXIMZ) {
450             error = POWERPC_EXCP_FP_VXIMZ;
451         } else if (fpscr & FP_VXVC) {
452             error = POWERPC_EXCP_FP_VXVC;
453         } else if (fpscr & FP_VXSQRT) {
454             error = POWERPC_EXCP_FP_VXSQRT;
455         } else if (fpscr & FP_VXCVI) {
456             error = POWERPC_EXCP_FP_VXCVI;
457         } else {
458             return;
459         }
460     } else {
461         return;
462     }
463     cs->exception_index = POWERPC_EXCP_PROGRAM;
464     env->error_code = error | POWERPC_EXCP_FP;
465     env->fpscr |= FP_FEX;
466     /* Deferred floating-point exception after target FPSCR update */
467     if (fp_exceptions_enabled(env)) {
468         raise_exception_err_ra(env, cs->exception_index,
469                                env->error_code, raddr);
470     }
471 }
472 
473 void helper_fpscr_check_status(CPUPPCState *env)
474 {
475     do_fpscr_check_status(env, GETPC());
476 }
477 
478 static void do_float_check_status(CPUPPCState *env, bool change_fi,
479                                   uintptr_t raddr)
480 {
481     CPUState *cs = env_cpu(env);
482     int status = get_float_exception_flags(&env->fp_status);
483 
484     if (status & float_flag_overflow) {
485         status |= float_overflow_excp(env);
486     } else if (status & float_flag_underflow) {
487         float_underflow_excp(env);
488     }
489     if (status & float_flag_inexact) {
490         float_inexact_excp(env);
491     }
492     if (change_fi) {
493         env->fpscr = FIELD_DP64(env->fpscr, FPSCR, FI,
494                                 !!(status & float_flag_inexact));
495     }
496 
497     if (cs->exception_index == POWERPC_EXCP_PROGRAM &&
498         (env->error_code & POWERPC_EXCP_FP)) {
499         /* Deferred floating-point exception after target FPR update */
500         if (fp_exceptions_enabled(env)) {
501             raise_exception_err_ra(env, cs->exception_index,
502                                    env->error_code, raddr);
503         }
504     }
505 }
506 
507 void helper_float_check_status(CPUPPCState *env)
508 {
509     do_float_check_status(env, true, GETPC());
510 }
511 
512 void helper_reset_fpstatus(CPUPPCState *env)
513 {
514     set_float_exception_flags(0, &env->fp_status);
515 }
516 
517 static void float_invalid_op_addsub(CPUPPCState *env, int flags,
518                                     bool set_fpcc, uintptr_t retaddr)
519 {
520     if (flags & float_flag_invalid_isi) {
521         float_invalid_op_vxisi(env, set_fpcc, retaddr);
522     } else if (flags & float_flag_invalid_snan) {
523         float_invalid_op_vxsnan(env, retaddr);
524     }
525 }
526 
527 /* fadd - fadd. */
528 float64 helper_fadd(CPUPPCState *env, float64 arg1, float64 arg2)
529 {
530     float64 ret = float64_add(arg1, arg2, &env->fp_status);
531     int flags = get_float_exception_flags(&env->fp_status);
532 
533     if (unlikely(flags & float_flag_invalid)) {
534         float_invalid_op_addsub(env, flags, 1, GETPC());
535     }
536 
537     return ret;
538 }
539 
540 /* fadds - fadds. */
541 float64 helper_fadds(CPUPPCState *env, float64 arg1, float64 arg2)
542 {
543     float64 ret = float64r32_add(arg1, arg2, &env->fp_status);
544     int flags = get_float_exception_flags(&env->fp_status);
545 
546     if (unlikely(flags & float_flag_invalid)) {
547         float_invalid_op_addsub(env, flags, 1, GETPC());
548     }
549     return ret;
550 }
551 
552 /* fsub - fsub. */
553 float64 helper_fsub(CPUPPCState *env, float64 arg1, float64 arg2)
554 {
555     float64 ret = float64_sub(arg1, arg2, &env->fp_status);
556     int flags = get_float_exception_flags(&env->fp_status);
557 
558     if (unlikely(flags & float_flag_invalid)) {
559         float_invalid_op_addsub(env, flags, 1, GETPC());
560     }
561 
562     return ret;
563 }
564 
565 /* fsubs - fsubs. */
566 float64 helper_fsubs(CPUPPCState *env, float64 arg1, float64 arg2)
567 {
568     float64 ret = float64r32_sub(arg1, arg2, &env->fp_status);
569     int flags = get_float_exception_flags(&env->fp_status);
570 
571     if (unlikely(flags & float_flag_invalid)) {
572         float_invalid_op_addsub(env, flags, 1, GETPC());
573     }
574     return ret;
575 }
576 
577 static void float_invalid_op_mul(CPUPPCState *env, int flags,
578                                  bool set_fprc, uintptr_t retaddr)
579 {
580     if (flags & float_flag_invalid_imz) {
581         float_invalid_op_vximz(env, set_fprc, retaddr);
582     } else if (flags & float_flag_invalid_snan) {
583         float_invalid_op_vxsnan(env, retaddr);
584     }
585 }
586 
587 /* fmul - fmul. */
588 float64 helper_fmul(CPUPPCState *env, float64 arg1, float64 arg2)
589 {
590     float64 ret = float64_mul(arg1, arg2, &env->fp_status);
591     int flags = get_float_exception_flags(&env->fp_status);
592 
593     if (unlikely(flags & float_flag_invalid)) {
594         float_invalid_op_mul(env, flags, 1, GETPC());
595     }
596 
597     return ret;
598 }
599 
600 /* fmuls - fmuls. */
601 float64 helper_fmuls(CPUPPCState *env, float64 arg1, float64 arg2)
602 {
603     float64 ret = float64r32_mul(arg1, arg2, &env->fp_status);
604     int flags = get_float_exception_flags(&env->fp_status);
605 
606     if (unlikely(flags & float_flag_invalid)) {
607         float_invalid_op_mul(env, flags, 1, GETPC());
608     }
609     return ret;
610 }
611 
612 static void float_invalid_op_div(CPUPPCState *env, int flags,
613                                  bool set_fprc, uintptr_t retaddr)
614 {
615     if (flags & float_flag_invalid_idi) {
616         float_invalid_op_vxidi(env, set_fprc, retaddr);
617     } else if (flags & float_flag_invalid_zdz) {
618         float_invalid_op_vxzdz(env, set_fprc, retaddr);
619     } else if (flags & float_flag_invalid_snan) {
620         float_invalid_op_vxsnan(env, retaddr);
621     }
622 }
623 
624 /* fdiv - fdiv. */
625 float64 helper_fdiv(CPUPPCState *env, float64 arg1, float64 arg2)
626 {
627     float64 ret = float64_div(arg1, arg2, &env->fp_status);
628     int flags = get_float_exception_flags(&env->fp_status);
629 
630     if (unlikely(flags & float_flag_invalid)) {
631         float_invalid_op_div(env, flags, 1, GETPC());
632     }
633     if (unlikely(flags & float_flag_divbyzero)) {
634         float_zero_divide_excp(env, GETPC());
635     }
636 
637     return ret;
638 }
639 
640 /* fdivs - fdivs. */
641 float64 helper_fdivs(CPUPPCState *env, float64 arg1, float64 arg2)
642 {
643     float64 ret = float64r32_div(arg1, arg2, &env->fp_status);
644     int flags = get_float_exception_flags(&env->fp_status);
645 
646     if (unlikely(flags & float_flag_invalid)) {
647         float_invalid_op_div(env, flags, 1, GETPC());
648     }
649     if (unlikely(flags & float_flag_divbyzero)) {
650         float_zero_divide_excp(env, GETPC());
651     }
652 
653     return ret;
654 }
655 
656 static uint64_t float_invalid_cvt(CPUPPCState *env, int flags,
657                                   uint64_t ret, uint64_t ret_nan,
658                                   bool set_fprc, uintptr_t retaddr)
659 {
660     /*
661      * VXCVI is different from most in that it sets two exception bits,
662      * VXCVI and VXSNAN for an SNaN input.
663      */
664     if (flags & float_flag_invalid_snan) {
665         env->fpscr |= FP_VXSNAN;
666     }
667     float_invalid_op_vxcvi(env, set_fprc, retaddr);
668 
669     return flags & float_flag_invalid_cvti ? ret : ret_nan;
670 }
671 
672 #define FPU_FCTI(op, cvt, nanval)                                      \
673 uint64_t helper_##op(CPUPPCState *env, float64 arg)                    \
674 {                                                                      \
675     uint64_t ret = float64_to_##cvt(arg, &env->fp_status);             \
676     int flags = get_float_exception_flags(&env->fp_status);            \
677     if (unlikely(flags & float_flag_invalid)) {                        \
678         ret = float_invalid_cvt(env, flags, ret, nanval, 1, GETPC());  \
679     }                                                                  \
680     return ret;                                                        \
681 }
682 
683 FPU_FCTI(fctiw, int32, 0x80000000U)
684 FPU_FCTI(fctiwz, int32_round_to_zero, 0x80000000U)
685 FPU_FCTI(fctiwu, uint32, 0x00000000U)
686 FPU_FCTI(fctiwuz, uint32_round_to_zero, 0x00000000U)
687 FPU_FCTI(fctid, int64, 0x8000000000000000ULL)
688 FPU_FCTI(fctidz, int64_round_to_zero, 0x8000000000000000ULL)
689 FPU_FCTI(fctidu, uint64, 0x0000000000000000ULL)
690 FPU_FCTI(fctiduz, uint64_round_to_zero, 0x0000000000000000ULL)
691 
692 #define FPU_FCFI(op, cvtr, is_single)                      \
693 uint64_t helper_##op(CPUPPCState *env, uint64_t arg)       \
694 {                                                          \
695     CPU_DoubleU farg;                                      \
696                                                            \
697     if (is_single) {                                       \
698         float32 tmp = cvtr(arg, &env->fp_status);          \
699         farg.d = float32_to_float64(tmp, &env->fp_status); \
700     } else {                                               \
701         farg.d = cvtr(arg, &env->fp_status);               \
702     }                                                      \
703     do_float_check_status(env, true, GETPC());             \
704     return farg.ll;                                        \
705 }
706 
707 FPU_FCFI(fcfid, int64_to_float64, 0)
708 FPU_FCFI(fcfids, int64_to_float32, 1)
709 FPU_FCFI(fcfidu, uint64_to_float64, 0)
710 FPU_FCFI(fcfidus, uint64_to_float32, 1)
711 
712 static uint64_t do_fri(CPUPPCState *env, uint64_t arg,
713                        FloatRoundMode rounding_mode)
714 {
715     FloatRoundMode old_rounding_mode = get_float_rounding_mode(&env->fp_status);
716     int flags;
717 
718     set_float_rounding_mode(rounding_mode, &env->fp_status);
719     arg = float64_round_to_int(arg, &env->fp_status);
720     set_float_rounding_mode(old_rounding_mode, &env->fp_status);
721 
722     flags = get_float_exception_flags(&env->fp_status);
723     if (flags & float_flag_invalid_snan) {
724         float_invalid_op_vxsnan(env, GETPC());
725     }
726 
727     /* fri* does not set FPSCR[XX] */
728     set_float_exception_flags(flags & ~float_flag_inexact, &env->fp_status);
729     do_float_check_status(env, true, GETPC());
730 
731     return arg;
732 }
733 
734 uint64_t helper_frin(CPUPPCState *env, uint64_t arg)
735 {
736     return do_fri(env, arg, float_round_ties_away);
737 }
738 
739 uint64_t helper_friz(CPUPPCState *env, uint64_t arg)
740 {
741     return do_fri(env, arg, float_round_to_zero);
742 }
743 
744 uint64_t helper_frip(CPUPPCState *env, uint64_t arg)
745 {
746     return do_fri(env, arg, float_round_up);
747 }
748 
749 uint64_t helper_frim(CPUPPCState *env, uint64_t arg)
750 {
751     return do_fri(env, arg, float_round_down);
752 }
753 
754 static void float_invalid_op_madd(CPUPPCState *env, int flags,
755                                   bool set_fpcc, uintptr_t retaddr)
756 {
757     if (flags & float_flag_invalid_imz) {
758         float_invalid_op_vximz(env, set_fpcc, retaddr);
759     } else {
760         float_invalid_op_addsub(env, flags, set_fpcc, retaddr);
761     }
762 }
763 
764 static float64 do_fmadd(CPUPPCState *env, float64 a, float64 b,
765                          float64 c, int madd_flags, uintptr_t retaddr)
766 {
767     float64 ret = float64_muladd(a, b, c, madd_flags, &env->fp_status);
768     int flags = get_float_exception_flags(&env->fp_status);
769 
770     if (unlikely(flags & float_flag_invalid)) {
771         float_invalid_op_madd(env, flags, 1, retaddr);
772     }
773     return ret;
774 }
775 
776 static uint64_t do_fmadds(CPUPPCState *env, float64 a, float64 b,
777                           float64 c, int madd_flags, uintptr_t retaddr)
778 {
779     float64 ret = float64r32_muladd(a, b, c, madd_flags, &env->fp_status);
780     int flags = get_float_exception_flags(&env->fp_status);
781 
782     if (unlikely(flags & float_flag_invalid)) {
783         float_invalid_op_madd(env, flags, 1, retaddr);
784     }
785     return ret;
786 }
787 
788 #define FPU_FMADD(op, madd_flags)                                    \
789     uint64_t helper_##op(CPUPPCState *env, uint64_t arg1,            \
790                          uint64_t arg2, uint64_t arg3)               \
791     { return do_fmadd(env, arg1, arg2, arg3, madd_flags, GETPC()); } \
792     uint64_t helper_##op##s(CPUPPCState *env, uint64_t arg1,         \
793                          uint64_t arg2, uint64_t arg3)               \
794     { return do_fmadds(env, arg1, arg2, arg3, madd_flags, GETPC()); }
795 
796 #define MADD_FLGS 0
797 #define MSUB_FLGS float_muladd_negate_c
798 #define NMADD_FLGS float_muladd_negate_result
799 #define NMSUB_FLGS (float_muladd_negate_c | float_muladd_negate_result)
800 
801 FPU_FMADD(fmadd, MADD_FLGS)
802 FPU_FMADD(fnmadd, NMADD_FLGS)
803 FPU_FMADD(fmsub, MSUB_FLGS)
804 FPU_FMADD(fnmsub, NMSUB_FLGS)
805 
806 /* frsp - frsp. */
807 static uint64_t do_frsp(CPUPPCState *env, uint64_t arg, uintptr_t retaddr)
808 {
809     float32 f32 = float64_to_float32(arg, &env->fp_status);
810     int flags = get_float_exception_flags(&env->fp_status);
811 
812     if (unlikely(flags & float_flag_invalid_snan)) {
813         float_invalid_op_vxsnan(env, retaddr);
814     }
815     return helper_todouble(f32);
816 }
817 
818 uint64_t helper_frsp(CPUPPCState *env, uint64_t arg)
819 {
820     return do_frsp(env, arg, GETPC());
821 }
822 
823 static void float_invalid_op_sqrt(CPUPPCState *env, int flags,
824                                   bool set_fpcc, uintptr_t retaddr)
825 {
826     if (unlikely(flags & float_flag_invalid_sqrt)) {
827         float_invalid_op_vxsqrt(env, set_fpcc, retaddr);
828     } else if (unlikely(flags & float_flag_invalid_snan)) {
829         float_invalid_op_vxsnan(env, retaddr);
830     }
831 }
832 
833 /* fsqrt - fsqrt. */
834 float64 helper_fsqrt(CPUPPCState *env, float64 arg)
835 {
836     float64 ret = float64_sqrt(arg, &env->fp_status);
837     int flags = get_float_exception_flags(&env->fp_status);
838 
839     if (unlikely(flags & float_flag_invalid)) {
840         float_invalid_op_sqrt(env, flags, 1, GETPC());
841     }
842 
843     return ret;
844 }
845 
846 /* fsqrts - fsqrts. */
847 float64 helper_fsqrts(CPUPPCState *env, float64 arg)
848 {
849     float64 ret = float64r32_sqrt(arg, &env->fp_status);
850     int flags = get_float_exception_flags(&env->fp_status);
851 
852     if (unlikely(flags & float_flag_invalid)) {
853         float_invalid_op_sqrt(env, flags, 1, GETPC());
854     }
855     return ret;
856 }
857 
858 /* fre - fre. */
859 float64 helper_fre(CPUPPCState *env, float64 arg)
860 {
861     /* "Estimate" the reciprocal with actual division.  */
862     float64 ret = float64_div(float64_one, arg, &env->fp_status);
863     int flags = get_float_exception_flags(&env->fp_status);
864 
865     if (unlikely(flags & float_flag_invalid_snan)) {
866         float_invalid_op_vxsnan(env, GETPC());
867     }
868     if (unlikely(flags & float_flag_divbyzero)) {
869         float_zero_divide_excp(env, GETPC());
870         /* For FPSCR.ZE == 0, the result is 1/2.  */
871         ret = float64_set_sign(float64_half, float64_is_neg(arg));
872     }
873 
874     return ret;
875 }
876 
877 /* fres - fres. */
878 uint64_t helper_fres(CPUPPCState *env, uint64_t arg)
879 {
880     /* "Estimate" the reciprocal with actual division.  */
881     float64 ret = float64r32_div(float64_one, arg, &env->fp_status);
882     int flags = get_float_exception_flags(&env->fp_status);
883 
884     if (unlikely(flags & float_flag_invalid_snan)) {
885         float_invalid_op_vxsnan(env, GETPC());
886     }
887     if (unlikely(flags & float_flag_divbyzero)) {
888         float_zero_divide_excp(env, GETPC());
889         /* For FPSCR.ZE == 0, the result is 1/2.  */
890         ret = float64_set_sign(float64_half, float64_is_neg(arg));
891     }
892 
893     return ret;
894 }
895 
896 /* frsqrte  - frsqrte. */
897 float64 helper_frsqrte(CPUPPCState *env, float64 arg)
898 {
899     /* "Estimate" the reciprocal with actual division.  */
900     float64 rets = float64_sqrt(arg, &env->fp_status);
901     float64 retd = float64_div(float64_one, rets, &env->fp_status);
902     int flags = get_float_exception_flags(&env->fp_status);
903 
904     if (unlikely(flags & float_flag_invalid)) {
905         float_invalid_op_sqrt(env, flags, 1, GETPC());
906     }
907     if (unlikely(flags & float_flag_divbyzero)) {
908         /* Reciprocal of (square root of) zero.  */
909         float_zero_divide_excp(env, GETPC());
910     }
911 
912     return retd;
913 }
914 
915 /* frsqrtes  - frsqrtes. */
916 float64 helper_frsqrtes(CPUPPCState *env, float64 arg)
917 {
918     /* "Estimate" the reciprocal with actual division.  */
919     float64 rets = float64_sqrt(arg, &env->fp_status);
920     float64 retd = float64r32_div(float64_one, rets, &env->fp_status);
921     int flags = get_float_exception_flags(&env->fp_status);
922 
923     if (unlikely(flags & float_flag_invalid)) {
924         float_invalid_op_sqrt(env, flags, 1, GETPC());
925     }
926     if (unlikely(flags & float_flag_divbyzero)) {
927         /* Reciprocal of (square root of) zero.  */
928         float_zero_divide_excp(env, GETPC());
929     }
930 
931     return retd;
932 }
933 
934 /* fsel - fsel. */
935 uint64_t helper_FSEL(uint64_t a, uint64_t b, uint64_t c)
936 {
937     CPU_DoubleU fa;
938 
939     fa.ll = a;
940 
941     if ((!float64_is_neg(fa.d) || float64_is_zero(fa.d)) &&
942         !float64_is_any_nan(fa.d)) {
943         return c;
944     } else {
945         return b;
946     }
947 }
948 
949 uint32_t helper_ftdiv(uint64_t fra, uint64_t frb)
950 {
951     int fe_flag = 0;
952     int fg_flag = 0;
953 
954     if (unlikely(float64_is_infinity(fra) ||
955                  float64_is_infinity(frb) ||
956                  float64_is_zero(frb))) {
957         fe_flag = 1;
958         fg_flag = 1;
959     } else {
960         int e_a = ppc_float64_get_unbiased_exp(fra);
961         int e_b = ppc_float64_get_unbiased_exp(frb);
962 
963         if (unlikely(float64_is_any_nan(fra) ||
964                      float64_is_any_nan(frb))) {
965             fe_flag = 1;
966         } else if ((e_b <= -1022) || (e_b >= 1021)) {
967             fe_flag = 1;
968         } else if (!float64_is_zero(fra) &&
969                    (((e_a - e_b) >= 1023) ||
970                     ((e_a - e_b) <= -1021) ||
971                     (e_a <= -970))) {
972             fe_flag = 1;
973         }
974 
975         if (unlikely(float64_is_zero_or_denormal(frb))) {
976             /* XB is not zero because of the above check and */
977             /* so must be denormalized.                      */
978             fg_flag = 1;
979         }
980     }
981 
982     return 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0);
983 }
984 
985 uint32_t helper_ftsqrt(uint64_t frb)
986 {
987     int fe_flag = 0;
988     int fg_flag = 0;
989 
990     if (unlikely(float64_is_infinity(frb) || float64_is_zero(frb))) {
991         fe_flag = 1;
992         fg_flag = 1;
993     } else {
994         int e_b = ppc_float64_get_unbiased_exp(frb);
995 
996         if (unlikely(float64_is_any_nan(frb))) {
997             fe_flag = 1;
998         } else if (unlikely(float64_is_zero(frb))) {
999             fe_flag = 1;
1000         } else if (unlikely(float64_is_neg(frb))) {
1001             fe_flag = 1;
1002         } else if (!float64_is_zero(frb) && (e_b <= (-1022 + 52))) {
1003             fe_flag = 1;
1004         }
1005 
1006         if (unlikely(float64_is_zero_or_denormal(frb))) {
1007             /* XB is not zero because of the above check and */
1008             /* therefore must be denormalized.               */
1009             fg_flag = 1;
1010         }
1011     }
1012 
1013     return 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0);
1014 }
1015 
1016 void helper_fcmpu(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
1017                   uint32_t crfD)
1018 {
1019     CPU_DoubleU farg1, farg2;
1020     uint32_t ret = 0;
1021 
1022     farg1.ll = arg1;
1023     farg2.ll = arg2;
1024 
1025     if (unlikely(float64_is_any_nan(farg1.d) ||
1026                  float64_is_any_nan(farg2.d))) {
1027         ret = 0x01UL;
1028     } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1029         ret = 0x08UL;
1030     } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1031         ret = 0x04UL;
1032     } else {
1033         ret = 0x02UL;
1034     }
1035 
1036     env->fpscr &= ~FP_FPCC;
1037     env->fpscr |= ret << FPSCR_FPCC;
1038     env->crf[crfD] = ret;
1039     if (unlikely(ret == 0x01UL
1040                  && (float64_is_signaling_nan(farg1.d, &env->fp_status) ||
1041                      float64_is_signaling_nan(farg2.d, &env->fp_status)))) {
1042         /* sNaN comparison */
1043         float_invalid_op_vxsnan(env, GETPC());
1044     }
1045 }
1046 
1047 void helper_fcmpo(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
1048                   uint32_t crfD)
1049 {
1050     CPU_DoubleU farg1, farg2;
1051     uint32_t ret = 0;
1052 
1053     farg1.ll = arg1;
1054     farg2.ll = arg2;
1055 
1056     if (unlikely(float64_is_any_nan(farg1.d) ||
1057                  float64_is_any_nan(farg2.d))) {
1058         ret = 0x01UL;
1059     } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1060         ret = 0x08UL;
1061     } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1062         ret = 0x04UL;
1063     } else {
1064         ret = 0x02UL;
1065     }
1066 
1067     env->fpscr &= ~FP_FPCC;
1068     env->fpscr |= ret << FPSCR_FPCC;
1069     env->crf[crfD] = (uint32_t) ret;
1070     if (unlikely(ret == 0x01UL)) {
1071         float_invalid_op_vxvc(env, 1, GETPC());
1072         if (float64_is_signaling_nan(farg1.d, &env->fp_status) ||
1073             float64_is_signaling_nan(farg2.d, &env->fp_status)) {
1074             /* sNaN comparison */
1075             float_invalid_op_vxsnan(env, GETPC());
1076         }
1077     }
1078 }
1079 
1080 /* Single-precision floating-point conversions */
1081 static inline uint32_t efscfsi(CPUPPCState *env, uint32_t val)
1082 {
1083     CPU_FloatU u;
1084 
1085     u.f = int32_to_float32(val, &env->vec_status);
1086 
1087     return u.l;
1088 }
1089 
1090 static inline uint32_t efscfui(CPUPPCState *env, uint32_t val)
1091 {
1092     CPU_FloatU u;
1093 
1094     u.f = uint32_to_float32(val, &env->vec_status);
1095 
1096     return u.l;
1097 }
1098 
1099 static inline int32_t efsctsi(CPUPPCState *env, uint32_t val)
1100 {
1101     CPU_FloatU u;
1102 
1103     u.l = val;
1104     /* NaN are not treated the same way IEEE 754 does */
1105     if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1106         return 0;
1107     }
1108 
1109     return float32_to_int32(u.f, &env->vec_status);
1110 }
1111 
1112 static inline uint32_t efsctui(CPUPPCState *env, uint32_t val)
1113 {
1114     CPU_FloatU u;
1115 
1116     u.l = val;
1117     /* NaN are not treated the same way IEEE 754 does */
1118     if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1119         return 0;
1120     }
1121 
1122     return float32_to_uint32(u.f, &env->vec_status);
1123 }
1124 
1125 static inline uint32_t efsctsiz(CPUPPCState *env, uint32_t val)
1126 {
1127     CPU_FloatU u;
1128 
1129     u.l = val;
1130     /* NaN are not treated the same way IEEE 754 does */
1131     if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1132         return 0;
1133     }
1134 
1135     return float32_to_int32_round_to_zero(u.f, &env->vec_status);
1136 }
1137 
1138 static inline uint32_t efsctuiz(CPUPPCState *env, uint32_t val)
1139 {
1140     CPU_FloatU u;
1141 
1142     u.l = val;
1143     /* NaN are not treated the same way IEEE 754 does */
1144     if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1145         return 0;
1146     }
1147 
1148     return float32_to_uint32_round_to_zero(u.f, &env->vec_status);
1149 }
1150 
1151 static inline uint32_t efscfsf(CPUPPCState *env, uint32_t val)
1152 {
1153     CPU_FloatU u;
1154     float32 tmp;
1155 
1156     u.f = int32_to_float32(val, &env->vec_status);
1157     tmp = int64_to_float32(1ULL << 32, &env->vec_status);
1158     u.f = float32_div(u.f, tmp, &env->vec_status);
1159 
1160     return u.l;
1161 }
1162 
1163 static inline uint32_t efscfuf(CPUPPCState *env, uint32_t val)
1164 {
1165     CPU_FloatU u;
1166     float32 tmp;
1167 
1168     u.f = uint32_to_float32(val, &env->vec_status);
1169     tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1170     u.f = float32_div(u.f, tmp, &env->vec_status);
1171 
1172     return u.l;
1173 }
1174 
1175 static inline uint32_t efsctsf(CPUPPCState *env, uint32_t val)
1176 {
1177     CPU_FloatU u;
1178     float32 tmp;
1179 
1180     u.l = val;
1181     /* NaN are not treated the same way IEEE 754 does */
1182     if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1183         return 0;
1184     }
1185     tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1186     u.f = float32_mul(u.f, tmp, &env->vec_status);
1187 
1188     return float32_to_int32(u.f, &env->vec_status);
1189 }
1190 
1191 static inline uint32_t efsctuf(CPUPPCState *env, uint32_t val)
1192 {
1193     CPU_FloatU u;
1194     float32 tmp;
1195 
1196     u.l = val;
1197     /* NaN are not treated the same way IEEE 754 does */
1198     if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1199         return 0;
1200     }
1201     tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1202     u.f = float32_mul(u.f, tmp, &env->vec_status);
1203 
1204     return float32_to_uint32(u.f, &env->vec_status);
1205 }
1206 
1207 #define HELPER_SPE_SINGLE_CONV(name)                              \
1208     uint32_t helper_e##name(CPUPPCState *env, uint32_t val)       \
1209     {                                                             \
1210         return e##name(env, val);                                 \
1211     }
1212 /* efscfsi */
1213 HELPER_SPE_SINGLE_CONV(fscfsi);
1214 /* efscfui */
1215 HELPER_SPE_SINGLE_CONV(fscfui);
1216 /* efscfuf */
1217 HELPER_SPE_SINGLE_CONV(fscfuf);
1218 /* efscfsf */
1219 HELPER_SPE_SINGLE_CONV(fscfsf);
1220 /* efsctsi */
1221 HELPER_SPE_SINGLE_CONV(fsctsi);
1222 /* efsctui */
1223 HELPER_SPE_SINGLE_CONV(fsctui);
1224 /* efsctsiz */
1225 HELPER_SPE_SINGLE_CONV(fsctsiz);
1226 /* efsctuiz */
1227 HELPER_SPE_SINGLE_CONV(fsctuiz);
1228 /* efsctsf */
1229 HELPER_SPE_SINGLE_CONV(fsctsf);
1230 /* efsctuf */
1231 HELPER_SPE_SINGLE_CONV(fsctuf);
1232 
1233 #define HELPER_SPE_VECTOR_CONV(name)                            \
1234     uint64_t helper_ev##name(CPUPPCState *env, uint64_t val)    \
1235     {                                                           \
1236         return ((uint64_t)e##name(env, val >> 32) << 32) |      \
1237             (uint64_t)e##name(env, val);                        \
1238     }
1239 /* evfscfsi */
1240 HELPER_SPE_VECTOR_CONV(fscfsi);
1241 /* evfscfui */
1242 HELPER_SPE_VECTOR_CONV(fscfui);
1243 /* evfscfuf */
1244 HELPER_SPE_VECTOR_CONV(fscfuf);
1245 /* evfscfsf */
1246 HELPER_SPE_VECTOR_CONV(fscfsf);
1247 /* evfsctsi */
1248 HELPER_SPE_VECTOR_CONV(fsctsi);
1249 /* evfsctui */
1250 HELPER_SPE_VECTOR_CONV(fsctui);
1251 /* evfsctsiz */
1252 HELPER_SPE_VECTOR_CONV(fsctsiz);
1253 /* evfsctuiz */
1254 HELPER_SPE_VECTOR_CONV(fsctuiz);
1255 /* evfsctsf */
1256 HELPER_SPE_VECTOR_CONV(fsctsf);
1257 /* evfsctuf */
1258 HELPER_SPE_VECTOR_CONV(fsctuf);
1259 
1260 /* Single-precision floating-point arithmetic */
1261 static inline uint32_t efsadd(CPUPPCState *env, uint32_t op1, uint32_t op2)
1262 {
1263     CPU_FloatU u1, u2;
1264 
1265     u1.l = op1;
1266     u2.l = op2;
1267     u1.f = float32_add(u1.f, u2.f, &env->vec_status);
1268     return u1.l;
1269 }
1270 
1271 static inline uint32_t efssub(CPUPPCState *env, uint32_t op1, uint32_t op2)
1272 {
1273     CPU_FloatU u1, u2;
1274 
1275     u1.l = op1;
1276     u2.l = op2;
1277     u1.f = float32_sub(u1.f, u2.f, &env->vec_status);
1278     return u1.l;
1279 }
1280 
1281 static inline uint32_t efsmul(CPUPPCState *env, uint32_t op1, uint32_t op2)
1282 {
1283     CPU_FloatU u1, u2;
1284 
1285     u1.l = op1;
1286     u2.l = op2;
1287     u1.f = float32_mul(u1.f, u2.f, &env->vec_status);
1288     return u1.l;
1289 }
1290 
1291 static inline uint32_t efsdiv(CPUPPCState *env, uint32_t op1, uint32_t op2)
1292 {
1293     CPU_FloatU u1, u2;
1294 
1295     u1.l = op1;
1296     u2.l = op2;
1297     u1.f = float32_div(u1.f, u2.f, &env->vec_status);
1298     return u1.l;
1299 }
1300 
1301 #define HELPER_SPE_SINGLE_ARITH(name)                                   \
1302     uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1303     {                                                                   \
1304         return e##name(env, op1, op2);                                  \
1305     }
1306 /* efsadd */
1307 HELPER_SPE_SINGLE_ARITH(fsadd);
1308 /* efssub */
1309 HELPER_SPE_SINGLE_ARITH(fssub);
1310 /* efsmul */
1311 HELPER_SPE_SINGLE_ARITH(fsmul);
1312 /* efsdiv */
1313 HELPER_SPE_SINGLE_ARITH(fsdiv);
1314 
1315 #define HELPER_SPE_VECTOR_ARITH(name)                                   \
1316     uint64_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1317     {                                                                   \
1318         return ((uint64_t)e##name(env, op1 >> 32, op2 >> 32) << 32) |   \
1319             (uint64_t)e##name(env, op1, op2);                           \
1320     }
1321 /* evfsadd */
1322 HELPER_SPE_VECTOR_ARITH(fsadd);
1323 /* evfssub */
1324 HELPER_SPE_VECTOR_ARITH(fssub);
1325 /* evfsmul */
1326 HELPER_SPE_VECTOR_ARITH(fsmul);
1327 /* evfsdiv */
1328 HELPER_SPE_VECTOR_ARITH(fsdiv);
1329 
1330 /* Single-precision floating-point comparisons */
1331 static inline uint32_t efscmplt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1332 {
1333     CPU_FloatU u1, u2;
1334 
1335     u1.l = op1;
1336     u2.l = op2;
1337     return float32_lt(u1.f, u2.f, &env->vec_status) ? 4 : 0;
1338 }
1339 
1340 static inline uint32_t efscmpgt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1341 {
1342     CPU_FloatU u1, u2;
1343 
1344     u1.l = op1;
1345     u2.l = op2;
1346     return float32_le(u1.f, u2.f, &env->vec_status) ? 0 : 4;
1347 }
1348 
1349 static inline uint32_t efscmpeq(CPUPPCState *env, uint32_t op1, uint32_t op2)
1350 {
1351     CPU_FloatU u1, u2;
1352 
1353     u1.l = op1;
1354     u2.l = op2;
1355     return float32_eq(u1.f, u2.f, &env->vec_status) ? 4 : 0;
1356 }
1357 
1358 static inline uint32_t efststlt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1359 {
1360     /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1361     return efscmplt(env, op1, op2);
1362 }
1363 
1364 static inline uint32_t efststgt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1365 {
1366     /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1367     return efscmpgt(env, op1, op2);
1368 }
1369 
1370 static inline uint32_t efststeq(CPUPPCState *env, uint32_t op1, uint32_t op2)
1371 {
1372     /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1373     return efscmpeq(env, op1, op2);
1374 }
1375 
1376 #define HELPER_SINGLE_SPE_CMP(name)                                     \
1377     uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1378     {                                                                   \
1379         return e##name(env, op1, op2);                                  \
1380     }
1381 /* efststlt */
1382 HELPER_SINGLE_SPE_CMP(fststlt);
1383 /* efststgt */
1384 HELPER_SINGLE_SPE_CMP(fststgt);
1385 /* efststeq */
1386 HELPER_SINGLE_SPE_CMP(fststeq);
1387 /* efscmplt */
1388 HELPER_SINGLE_SPE_CMP(fscmplt);
1389 /* efscmpgt */
1390 HELPER_SINGLE_SPE_CMP(fscmpgt);
1391 /* efscmpeq */
1392 HELPER_SINGLE_SPE_CMP(fscmpeq);
1393 
1394 static inline uint32_t evcmp_merge(int t0, int t1)
1395 {
1396     return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
1397 }
1398 
1399 #define HELPER_VECTOR_SPE_CMP(name)                                     \
1400     uint32_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1401     {                                                                   \
1402         return evcmp_merge(e##name(env, op1 >> 32, op2 >> 32),          \
1403                            e##name(env, op1, op2));                     \
1404     }
1405 /* evfststlt */
1406 HELPER_VECTOR_SPE_CMP(fststlt);
1407 /* evfststgt */
1408 HELPER_VECTOR_SPE_CMP(fststgt);
1409 /* evfststeq */
1410 HELPER_VECTOR_SPE_CMP(fststeq);
1411 /* evfscmplt */
1412 HELPER_VECTOR_SPE_CMP(fscmplt);
1413 /* evfscmpgt */
1414 HELPER_VECTOR_SPE_CMP(fscmpgt);
1415 /* evfscmpeq */
1416 HELPER_VECTOR_SPE_CMP(fscmpeq);
1417 
1418 /* Double-precision floating-point conversion */
1419 uint64_t helper_efdcfsi(CPUPPCState *env, uint32_t val)
1420 {
1421     CPU_DoubleU u;
1422 
1423     u.d = int32_to_float64(val, &env->vec_status);
1424 
1425     return u.ll;
1426 }
1427 
1428 uint64_t helper_efdcfsid(CPUPPCState *env, uint64_t val)
1429 {
1430     CPU_DoubleU u;
1431 
1432     u.d = int64_to_float64(val, &env->vec_status);
1433 
1434     return u.ll;
1435 }
1436 
1437 uint64_t helper_efdcfui(CPUPPCState *env, uint32_t val)
1438 {
1439     CPU_DoubleU u;
1440 
1441     u.d = uint32_to_float64(val, &env->vec_status);
1442 
1443     return u.ll;
1444 }
1445 
1446 uint64_t helper_efdcfuid(CPUPPCState *env, uint64_t val)
1447 {
1448     CPU_DoubleU u;
1449 
1450     u.d = uint64_to_float64(val, &env->vec_status);
1451 
1452     return u.ll;
1453 }
1454 
1455 uint32_t helper_efdctsi(CPUPPCState *env, uint64_t val)
1456 {
1457     CPU_DoubleU u;
1458 
1459     u.ll = val;
1460     /* NaN are not treated the same way IEEE 754 does */
1461     if (unlikely(float64_is_any_nan(u.d))) {
1462         return 0;
1463     }
1464 
1465     return float64_to_int32(u.d, &env->vec_status);
1466 }
1467 
1468 uint32_t helper_efdctui(CPUPPCState *env, uint64_t val)
1469 {
1470     CPU_DoubleU u;
1471 
1472     u.ll = val;
1473     /* NaN are not treated the same way IEEE 754 does */
1474     if (unlikely(float64_is_any_nan(u.d))) {
1475         return 0;
1476     }
1477 
1478     return float64_to_uint32(u.d, &env->vec_status);
1479 }
1480 
1481 uint32_t helper_efdctsiz(CPUPPCState *env, uint64_t val)
1482 {
1483     CPU_DoubleU u;
1484 
1485     u.ll = val;
1486     /* NaN are not treated the same way IEEE 754 does */
1487     if (unlikely(float64_is_any_nan(u.d))) {
1488         return 0;
1489     }
1490 
1491     return float64_to_int32_round_to_zero(u.d, &env->vec_status);
1492 }
1493 
1494 uint64_t helper_efdctsidz(CPUPPCState *env, uint64_t val)
1495 {
1496     CPU_DoubleU u;
1497 
1498     u.ll = val;
1499     /* NaN are not treated the same way IEEE 754 does */
1500     if (unlikely(float64_is_any_nan(u.d))) {
1501         return 0;
1502     }
1503 
1504     return float64_to_int64_round_to_zero(u.d, &env->vec_status);
1505 }
1506 
1507 uint32_t helper_efdctuiz(CPUPPCState *env, uint64_t val)
1508 {
1509     CPU_DoubleU u;
1510 
1511     u.ll = val;
1512     /* NaN are not treated the same way IEEE 754 does */
1513     if (unlikely(float64_is_any_nan(u.d))) {
1514         return 0;
1515     }
1516 
1517     return float64_to_uint32_round_to_zero(u.d, &env->vec_status);
1518 }
1519 
1520 uint64_t helper_efdctuidz(CPUPPCState *env, uint64_t val)
1521 {
1522     CPU_DoubleU u;
1523 
1524     u.ll = val;
1525     /* NaN are not treated the same way IEEE 754 does */
1526     if (unlikely(float64_is_any_nan(u.d))) {
1527         return 0;
1528     }
1529 
1530     return float64_to_uint64_round_to_zero(u.d, &env->vec_status);
1531 }
1532 
1533 uint64_t helper_efdcfsf(CPUPPCState *env, uint32_t val)
1534 {
1535     CPU_DoubleU u;
1536     float64 tmp;
1537 
1538     u.d = int32_to_float64(val, &env->vec_status);
1539     tmp = int64_to_float64(1ULL << 32, &env->vec_status);
1540     u.d = float64_div(u.d, tmp, &env->vec_status);
1541 
1542     return u.ll;
1543 }
1544 
1545 uint64_t helper_efdcfuf(CPUPPCState *env, uint32_t val)
1546 {
1547     CPU_DoubleU u;
1548     float64 tmp;
1549 
1550     u.d = uint32_to_float64(val, &env->vec_status);
1551     tmp = int64_to_float64(1ULL << 32, &env->vec_status);
1552     u.d = float64_div(u.d, tmp, &env->vec_status);
1553 
1554     return u.ll;
1555 }
1556 
1557 uint32_t helper_efdctsf(CPUPPCState *env, uint64_t val)
1558 {
1559     CPU_DoubleU u;
1560     float64 tmp;
1561 
1562     u.ll = val;
1563     /* NaN are not treated the same way IEEE 754 does */
1564     if (unlikely(float64_is_any_nan(u.d))) {
1565         return 0;
1566     }
1567     tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
1568     u.d = float64_mul(u.d, tmp, &env->vec_status);
1569 
1570     return float64_to_int32(u.d, &env->vec_status);
1571 }
1572 
1573 uint32_t helper_efdctuf(CPUPPCState *env, uint64_t val)
1574 {
1575     CPU_DoubleU u;
1576     float64 tmp;
1577 
1578     u.ll = val;
1579     /* NaN are not treated the same way IEEE 754 does */
1580     if (unlikely(float64_is_any_nan(u.d))) {
1581         return 0;
1582     }
1583     tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
1584     u.d = float64_mul(u.d, tmp, &env->vec_status);
1585 
1586     return float64_to_uint32(u.d, &env->vec_status);
1587 }
1588 
1589 uint32_t helper_efscfd(CPUPPCState *env, uint64_t val)
1590 {
1591     CPU_DoubleU u1;
1592     CPU_FloatU u2;
1593 
1594     u1.ll = val;
1595     u2.f = float64_to_float32(u1.d, &env->vec_status);
1596 
1597     return u2.l;
1598 }
1599 
1600 uint64_t helper_efdcfs(CPUPPCState *env, uint32_t val)
1601 {
1602     CPU_DoubleU u2;
1603     CPU_FloatU u1;
1604 
1605     u1.l = val;
1606     u2.d = float32_to_float64(u1.f, &env->vec_status);
1607 
1608     return u2.ll;
1609 }
1610 
1611 /* Double precision fixed-point arithmetic */
1612 uint64_t helper_efdadd(CPUPPCState *env, uint64_t op1, uint64_t op2)
1613 {
1614     CPU_DoubleU u1, u2;
1615 
1616     u1.ll = op1;
1617     u2.ll = op2;
1618     u1.d = float64_add(u1.d, u2.d, &env->vec_status);
1619     return u1.ll;
1620 }
1621 
1622 uint64_t helper_efdsub(CPUPPCState *env, uint64_t op1, uint64_t op2)
1623 {
1624     CPU_DoubleU u1, u2;
1625 
1626     u1.ll = op1;
1627     u2.ll = op2;
1628     u1.d = float64_sub(u1.d, u2.d, &env->vec_status);
1629     return u1.ll;
1630 }
1631 
1632 uint64_t helper_efdmul(CPUPPCState *env, uint64_t op1, uint64_t op2)
1633 {
1634     CPU_DoubleU u1, u2;
1635 
1636     u1.ll = op1;
1637     u2.ll = op2;
1638     u1.d = float64_mul(u1.d, u2.d, &env->vec_status);
1639     return u1.ll;
1640 }
1641 
1642 uint64_t helper_efddiv(CPUPPCState *env, uint64_t op1, uint64_t op2)
1643 {
1644     CPU_DoubleU u1, u2;
1645 
1646     u1.ll = op1;
1647     u2.ll = op2;
1648     u1.d = float64_div(u1.d, u2.d, &env->vec_status);
1649     return u1.ll;
1650 }
1651 
1652 /* Double precision floating point helpers */
1653 uint32_t helper_efdtstlt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1654 {
1655     CPU_DoubleU u1, u2;
1656 
1657     u1.ll = op1;
1658     u2.ll = op2;
1659     return float64_lt(u1.d, u2.d, &env->vec_status) ? 4 : 0;
1660 }
1661 
1662 uint32_t helper_efdtstgt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1663 {
1664     CPU_DoubleU u1, u2;
1665 
1666     u1.ll = op1;
1667     u2.ll = op2;
1668     return float64_le(u1.d, u2.d, &env->vec_status) ? 0 : 4;
1669 }
1670 
1671 uint32_t helper_efdtsteq(CPUPPCState *env, uint64_t op1, uint64_t op2)
1672 {
1673     CPU_DoubleU u1, u2;
1674 
1675     u1.ll = op1;
1676     u2.ll = op2;
1677     return float64_eq_quiet(u1.d, u2.d, &env->vec_status) ? 4 : 0;
1678 }
1679 
1680 uint32_t helper_efdcmplt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1681 {
1682     /* XXX: TODO: test special values (NaN, infinites, ...) */
1683     return helper_efdtstlt(env, op1, op2);
1684 }
1685 
1686 uint32_t helper_efdcmpgt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1687 {
1688     /* XXX: TODO: test special values (NaN, infinites, ...) */
1689     return helper_efdtstgt(env, op1, op2);
1690 }
1691 
1692 uint32_t helper_efdcmpeq(CPUPPCState *env, uint64_t op1, uint64_t op2)
1693 {
1694     /* XXX: TODO: test special values (NaN, infinites, ...) */
1695     return helper_efdtsteq(env, op1, op2);
1696 }
1697 
1698 #define float64_to_float64(x, env) x
1699 
1700 
1701 /*
1702  * VSX_ADD_SUB - VSX floating point add/subtract
1703  *   name  - instruction mnemonic
1704  *   op    - operation (add or sub)
1705  *   nels  - number of elements (1, 2 or 4)
1706  *   tp    - type (float32 or float64)
1707  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
1708  *   sfifprf - set FI and FPRF
1709  */
1710 #define VSX_ADD_SUB(name, op, nels, tp, fld, sfifprf, r2sp)                  \
1711 void helper_##name(CPUPPCState *env, ppc_vsr_t *xt,                          \
1712                    ppc_vsr_t *xa, ppc_vsr_t *xb)                             \
1713 {                                                                            \
1714     ppc_vsr_t t = { };                                                       \
1715     int i;                                                                   \
1716                                                                              \
1717     helper_reset_fpstatus(env);                                              \
1718                                                                              \
1719     for (i = 0; i < nels; i++) {                                             \
1720         float_status tstat = env->fp_status;                                 \
1721         set_float_exception_flags(0, &tstat);                                \
1722         t.fld = tp##_##op(xa->fld, xb->fld, &tstat);                         \
1723         env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1724                                                                              \
1725         if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
1726             float_invalid_op_addsub(env, tstat.float_exception_flags,        \
1727                                     sfifprf, GETPC());                       \
1728         }                                                                    \
1729                                                                              \
1730         if (r2sp) {                                                          \
1731             t.fld = do_frsp(env, t.fld, GETPC());                            \
1732         }                                                                    \
1733                                                                              \
1734         if (sfifprf) {                                                       \
1735             helper_compute_fprf_float64(env, t.fld);                         \
1736         }                                                                    \
1737     }                                                                        \
1738     *xt = t;                                                                 \
1739     do_float_check_status(env, sfifprf, GETPC());                            \
1740 }
1741 
1742 VSX_ADD_SUB(xsadddp, add, 1, float64, VsrD(0), 1, 0)
1743 VSX_ADD_SUB(xsaddsp, add, 1, float64, VsrD(0), 1, 1)
1744 VSX_ADD_SUB(xvadddp, add, 2, float64, VsrD(i), 0, 0)
1745 VSX_ADD_SUB(xvaddsp, add, 4, float32, VsrW(i), 0, 0)
1746 VSX_ADD_SUB(xssubdp, sub, 1, float64, VsrD(0), 1, 0)
1747 VSX_ADD_SUB(xssubsp, sub, 1, float64, VsrD(0), 1, 1)
1748 VSX_ADD_SUB(xvsubdp, sub, 2, float64, VsrD(i), 0, 0)
1749 VSX_ADD_SUB(xvsubsp, sub, 4, float32, VsrW(i), 0, 0)
1750 
1751 void helper_xsaddqp(CPUPPCState *env, uint32_t opcode,
1752                     ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb)
1753 {
1754     ppc_vsr_t t = *xt;
1755     float_status tstat;
1756 
1757     helper_reset_fpstatus(env);
1758 
1759     tstat = env->fp_status;
1760     if (unlikely(Rc(opcode) != 0)) {
1761         tstat.float_rounding_mode = float_round_to_odd;
1762     }
1763 
1764     set_float_exception_flags(0, &tstat);
1765     t.f128 = float128_add(xa->f128, xb->f128, &tstat);
1766     env->fp_status.float_exception_flags |= tstat.float_exception_flags;
1767 
1768     if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
1769         float_invalid_op_addsub(env, tstat.float_exception_flags, 1, GETPC());
1770     }
1771 
1772     helper_compute_fprf_float128(env, t.f128);
1773 
1774     *xt = t;
1775     do_float_check_status(env, true, GETPC());
1776 }
1777 
1778 /*
1779  * VSX_MUL - VSX floating point multiply
1780  *   op    - instruction mnemonic
1781  *   nels  - number of elements (1, 2 or 4)
1782  *   tp    - type (float32 or float64)
1783  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
1784  *   sfifprf - set FI and FPRF
1785  */
1786 #define VSX_MUL(op, nels, tp, fld, sfifprf, r2sp)                            \
1787 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt,                            \
1788                  ppc_vsr_t *xa, ppc_vsr_t *xb)                               \
1789 {                                                                            \
1790     ppc_vsr_t t = { };                                                       \
1791     int i;                                                                   \
1792                                                                              \
1793     helper_reset_fpstatus(env);                                              \
1794                                                                              \
1795     for (i = 0; i < nels; i++) {                                             \
1796         float_status tstat = env->fp_status;                                 \
1797         set_float_exception_flags(0, &tstat);                                \
1798         t.fld = tp##_mul(xa->fld, xb->fld, &tstat);                          \
1799         env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1800                                                                              \
1801         if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
1802             float_invalid_op_mul(env, tstat.float_exception_flags,           \
1803                                  sfifprf, GETPC());                          \
1804         }                                                                    \
1805                                                                              \
1806         if (r2sp) {                                                          \
1807             t.fld = do_frsp(env, t.fld, GETPC());                            \
1808         }                                                                    \
1809                                                                              \
1810         if (sfifprf) {                                                       \
1811             helper_compute_fprf_float64(env, t.fld);                         \
1812         }                                                                    \
1813     }                                                                        \
1814                                                                              \
1815     *xt = t;                                                                 \
1816     do_float_check_status(env, sfifprf, GETPC());                            \
1817 }
1818 
1819 VSX_MUL(xsmuldp, 1, float64, VsrD(0), 1, 0)
1820 VSX_MUL(xsmulsp, 1, float64, VsrD(0), 1, 1)
1821 VSX_MUL(xvmuldp, 2, float64, VsrD(i), 0, 0)
1822 VSX_MUL(xvmulsp, 4, float32, VsrW(i), 0, 0)
1823 
1824 void helper_xsmulqp(CPUPPCState *env, uint32_t opcode,
1825                     ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb)
1826 {
1827     ppc_vsr_t t = *xt;
1828     float_status tstat;
1829 
1830     helper_reset_fpstatus(env);
1831     tstat = env->fp_status;
1832     if (unlikely(Rc(opcode) != 0)) {
1833         tstat.float_rounding_mode = float_round_to_odd;
1834     }
1835 
1836     set_float_exception_flags(0, &tstat);
1837     t.f128 = float128_mul(xa->f128, xb->f128, &tstat);
1838     env->fp_status.float_exception_flags |= tstat.float_exception_flags;
1839 
1840     if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
1841         float_invalid_op_mul(env, tstat.float_exception_flags, 1, GETPC());
1842     }
1843     helper_compute_fprf_float128(env, t.f128);
1844 
1845     *xt = t;
1846     do_float_check_status(env, true, GETPC());
1847 }
1848 
1849 /*
1850  * VSX_DIV - VSX floating point divide
1851  *   op    - instruction mnemonic
1852  *   nels  - number of elements (1, 2 or 4)
1853  *   tp    - type (float32 or float64)
1854  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
1855  *   sfifprf - set FI and FPRF
1856  */
1857 #define VSX_DIV(op, nels, tp, fld, sfifprf, r2sp)                             \
1858 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt,                             \
1859                  ppc_vsr_t *xa, ppc_vsr_t *xb)                                \
1860 {                                                                             \
1861     ppc_vsr_t t = { };                                                        \
1862     int i;                                                                    \
1863                                                                               \
1864     helper_reset_fpstatus(env);                                               \
1865                                                                               \
1866     for (i = 0; i < nels; i++) {                                              \
1867         float_status tstat = env->fp_status;                                  \
1868         set_float_exception_flags(0, &tstat);                                 \
1869         t.fld = tp##_div(xa->fld, xb->fld, &tstat);                           \
1870         env->fp_status.float_exception_flags |= tstat.float_exception_flags;  \
1871                                                                               \
1872         if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {     \
1873             float_invalid_op_div(env, tstat.float_exception_flags,            \
1874                                  sfifprf, GETPC());                           \
1875         }                                                                     \
1876         if (unlikely(tstat.float_exception_flags & float_flag_divbyzero)) {   \
1877             float_zero_divide_excp(env, GETPC());                             \
1878         }                                                                     \
1879                                                                               \
1880         if (r2sp) {                                                           \
1881             t.fld = do_frsp(env, t.fld, GETPC());                             \
1882         }                                                                     \
1883                                                                               \
1884         if (sfifprf) {                                                        \
1885             helper_compute_fprf_float64(env, t.fld);                          \
1886         }                                                                     \
1887     }                                                                         \
1888                                                                               \
1889     *xt = t;                                                                  \
1890     do_float_check_status(env, sfifprf, GETPC());                             \
1891 }
1892 
1893 VSX_DIV(xsdivdp, 1, float64, VsrD(0), 1, 0)
1894 VSX_DIV(xsdivsp, 1, float64, VsrD(0), 1, 1)
1895 VSX_DIV(xvdivdp, 2, float64, VsrD(i), 0, 0)
1896 VSX_DIV(xvdivsp, 4, float32, VsrW(i), 0, 0)
1897 
1898 void helper_xsdivqp(CPUPPCState *env, uint32_t opcode,
1899                     ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb)
1900 {
1901     ppc_vsr_t t = *xt;
1902     float_status tstat;
1903 
1904     helper_reset_fpstatus(env);
1905     tstat = env->fp_status;
1906     if (unlikely(Rc(opcode) != 0)) {
1907         tstat.float_rounding_mode = float_round_to_odd;
1908     }
1909 
1910     set_float_exception_flags(0, &tstat);
1911     t.f128 = float128_div(xa->f128, xb->f128, &tstat);
1912     env->fp_status.float_exception_flags |= tstat.float_exception_flags;
1913 
1914     if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
1915         float_invalid_op_div(env, tstat.float_exception_flags, 1, GETPC());
1916     }
1917     if (unlikely(tstat.float_exception_flags & float_flag_divbyzero)) {
1918         float_zero_divide_excp(env, GETPC());
1919     }
1920 
1921     helper_compute_fprf_float128(env, t.f128);
1922     *xt = t;
1923     do_float_check_status(env, true, GETPC());
1924 }
1925 
1926 /*
1927  * VSX_RE  - VSX floating point reciprocal estimate
1928  *   op    - instruction mnemonic
1929  *   nels  - number of elements (1, 2 or 4)
1930  *   tp    - type (float32 or float64)
1931  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
1932  *   sfifprf - set FI and FPRF
1933  */
1934 #define VSX_RE(op, nels, tp, fld, sfifprf, r2sp)                              \
1935 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)              \
1936 {                                                                             \
1937     ppc_vsr_t t = { };                                                        \
1938     int i;                                                                    \
1939                                                                               \
1940     helper_reset_fpstatus(env);                                               \
1941                                                                               \
1942     for (i = 0; i < nels; i++) {                                              \
1943         if (unlikely(tp##_is_signaling_nan(xb->fld, &env->fp_status))) {      \
1944             float_invalid_op_vxsnan(env, GETPC());                            \
1945         }                                                                     \
1946         t.fld = tp##_div(tp##_one, xb->fld, &env->fp_status);                 \
1947                                                                               \
1948         if (r2sp) {                                                           \
1949             t.fld = do_frsp(env, t.fld, GETPC());                             \
1950         }                                                                     \
1951                                                                               \
1952         if (sfifprf) {                                                        \
1953             helper_compute_fprf_float64(env, t.fld);                          \
1954         }                                                                     \
1955     }                                                                         \
1956                                                                               \
1957     *xt = t;                                                                  \
1958     do_float_check_status(env, sfifprf, GETPC());                             \
1959 }
1960 
1961 VSX_RE(xsredp, 1, float64, VsrD(0), 1, 0)
1962 VSX_RE(xsresp, 1, float64, VsrD(0), 1, 1)
1963 VSX_RE(xvredp, 2, float64, VsrD(i), 0, 0)
1964 VSX_RE(xvresp, 4, float32, VsrW(i), 0, 0)
1965 
1966 /*
1967  * VSX_SQRT - VSX floating point square root
1968  *   op    - instruction mnemonic
1969  *   nels  - number of elements (1, 2 or 4)
1970  *   tp    - type (float32 or float64)
1971  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
1972  *   sfifprf - set FI and FPRF
1973  */
1974 #define VSX_SQRT(op, nels, tp, fld, sfifprf, r2sp)                           \
1975 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)             \
1976 {                                                                            \
1977     ppc_vsr_t t = { };                                                       \
1978     int i;                                                                   \
1979                                                                              \
1980     helper_reset_fpstatus(env);                                              \
1981                                                                              \
1982     for (i = 0; i < nels; i++) {                                             \
1983         float_status tstat = env->fp_status;                                 \
1984         set_float_exception_flags(0, &tstat);                                \
1985         t.fld = tp##_sqrt(xb->fld, &tstat);                                  \
1986         env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1987                                                                              \
1988         if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
1989             float_invalid_op_sqrt(env, tstat.float_exception_flags,          \
1990                                   sfifprf, GETPC());                         \
1991         }                                                                    \
1992                                                                              \
1993         if (r2sp) {                                                          \
1994             t.fld = do_frsp(env, t.fld, GETPC());                            \
1995         }                                                                    \
1996                                                                              \
1997         if (sfifprf) {                                                       \
1998             helper_compute_fprf_float64(env, t.fld);                         \
1999         }                                                                    \
2000     }                                                                        \
2001                                                                              \
2002     *xt = t;                                                                 \
2003     do_float_check_status(env, sfifprf, GETPC());                            \
2004 }
2005 
2006 VSX_SQRT(xssqrtdp, 1, float64, VsrD(0), 1, 0)
2007 VSX_SQRT(xssqrtsp, 1, float64, VsrD(0), 1, 1)
2008 VSX_SQRT(xvsqrtdp, 2, float64, VsrD(i), 0, 0)
2009 VSX_SQRT(xvsqrtsp, 4, float32, VsrW(i), 0, 0)
2010 
2011 /*
2012  *VSX_RSQRTE - VSX floating point reciprocal square root estimate
2013  *   op    - instruction mnemonic
2014  *   nels  - number of elements (1, 2 or 4)
2015  *   tp    - type (float32 or float64)
2016  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2017  *   sfifprf - set FI and FPRF
2018  */
2019 #define VSX_RSQRTE(op, nels, tp, fld, sfifprf, r2sp)                         \
2020 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)             \
2021 {                                                                            \
2022     ppc_vsr_t t = { };                                                       \
2023     int i;                                                                   \
2024                                                                              \
2025     helper_reset_fpstatus(env);                                              \
2026                                                                              \
2027     for (i = 0; i < nels; i++) {                                             \
2028         float_status tstat = env->fp_status;                                 \
2029         set_float_exception_flags(0, &tstat);                                \
2030         t.fld = tp##_sqrt(xb->fld, &tstat);                                  \
2031         t.fld = tp##_div(tp##_one, t.fld, &tstat);                           \
2032         env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2033         if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
2034             float_invalid_op_sqrt(env, tstat.float_exception_flags,          \
2035                                   sfifprf, GETPC());                         \
2036         }                                                                    \
2037         if (r2sp) {                                                          \
2038             t.fld = do_frsp(env, t.fld, GETPC());                            \
2039         }                                                                    \
2040                                                                              \
2041         if (sfifprf) {                                                       \
2042             helper_compute_fprf_float64(env, t.fld);                         \
2043         }                                                                    \
2044     }                                                                        \
2045                                                                              \
2046     *xt = t;                                                                 \
2047     do_float_check_status(env, sfifprf, GETPC());                            \
2048 }
2049 
2050 VSX_RSQRTE(xsrsqrtedp, 1, float64, VsrD(0), 1, 0)
2051 VSX_RSQRTE(xsrsqrtesp, 1, float64, VsrD(0), 1, 1)
2052 VSX_RSQRTE(xvrsqrtedp, 2, float64, VsrD(i), 0, 0)
2053 VSX_RSQRTE(xvrsqrtesp, 4, float32, VsrW(i), 0, 0)
2054 
2055 /*
2056  * VSX_TDIV - VSX floating point test for divide
2057  *   op    - instruction mnemonic
2058  *   nels  - number of elements (1, 2 or 4)
2059  *   tp    - type (float32 or float64)
2060  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2061  *   emin  - minimum unbiased exponent
2062  *   emax  - maximum unbiased exponent
2063  *   nbits - number of fraction bits
2064  */
2065 #define VSX_TDIV(op, nels, tp, fld, emin, emax, nbits)                  \
2066 void helper_##op(CPUPPCState *env, uint32_t opcode,                     \
2067                  ppc_vsr_t *xa, ppc_vsr_t *xb)                          \
2068 {                                                                       \
2069     int i;                                                              \
2070     int fe_flag = 0;                                                    \
2071     int fg_flag = 0;                                                    \
2072                                                                         \
2073     for (i = 0; i < nels; i++) {                                        \
2074         if (unlikely(tp##_is_infinity(xa->fld) ||                       \
2075                      tp##_is_infinity(xb->fld) ||                       \
2076                      tp##_is_zero(xb->fld))) {                          \
2077             fe_flag = 1;                                                \
2078             fg_flag = 1;                                                \
2079         } else {                                                        \
2080             int e_a = ppc_##tp##_get_unbiased_exp(xa->fld);             \
2081             int e_b = ppc_##tp##_get_unbiased_exp(xb->fld);             \
2082                                                                         \
2083             if (unlikely(tp##_is_any_nan(xa->fld) ||                    \
2084                          tp##_is_any_nan(xb->fld))) {                   \
2085                 fe_flag = 1;                                            \
2086             } else if ((e_b <= emin) || (e_b >= (emax - 2))) {          \
2087                 fe_flag = 1;                                            \
2088             } else if (!tp##_is_zero(xa->fld) &&                        \
2089                        (((e_a - e_b) >= emax) ||                        \
2090                         ((e_a - e_b) <= (emin + 1)) ||                  \
2091                         (e_a <= (emin + nbits)))) {                     \
2092                 fe_flag = 1;                                            \
2093             }                                                           \
2094                                                                         \
2095             if (unlikely(tp##_is_zero_or_denormal(xb->fld))) {          \
2096                 /*                                                      \
2097                  * XB is not zero because of the above check and so     \
2098                  * must be denormalized.                                \
2099                  */                                                     \
2100                 fg_flag = 1;                                            \
2101             }                                                           \
2102         }                                                               \
2103     }                                                                   \
2104                                                                         \
2105     env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2106 }
2107 
2108 VSX_TDIV(xstdivdp, 1, float64, VsrD(0), -1022, 1023, 52)
2109 VSX_TDIV(xvtdivdp, 2, float64, VsrD(i), -1022, 1023, 52)
2110 VSX_TDIV(xvtdivsp, 4, float32, VsrW(i), -126, 127, 23)
2111 
2112 /*
2113  * VSX_TSQRT - VSX floating point test for square root
2114  *   op    - instruction mnemonic
2115  *   nels  - number of elements (1, 2 or 4)
2116  *   tp    - type (float32 or float64)
2117  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2118  *   emin  - minimum unbiased exponent
2119  *   emax  - maximum unbiased exponent
2120  *   nbits - number of fraction bits
2121  */
2122 #define VSX_TSQRT(op, nels, tp, fld, emin, nbits)                       \
2123 void helper_##op(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xb)      \
2124 {                                                                       \
2125     int i;                                                              \
2126     int fe_flag = 0;                                                    \
2127     int fg_flag = 0;                                                    \
2128                                                                         \
2129     for (i = 0; i < nels; i++) {                                        \
2130         if (unlikely(tp##_is_infinity(xb->fld) ||                       \
2131                      tp##_is_zero(xb->fld))) {                          \
2132             fe_flag = 1;                                                \
2133             fg_flag = 1;                                                \
2134         } else {                                                        \
2135             int e_b = ppc_##tp##_get_unbiased_exp(xb->fld);             \
2136                                                                         \
2137             if (unlikely(tp##_is_any_nan(xb->fld))) {                   \
2138                 fe_flag = 1;                                            \
2139             } else if (unlikely(tp##_is_zero(xb->fld))) {               \
2140                 fe_flag = 1;                                            \
2141             } else if (unlikely(tp##_is_neg(xb->fld))) {                \
2142                 fe_flag = 1;                                            \
2143             } else if (!tp##_is_zero(xb->fld) &&                        \
2144                        (e_b <= (emin + nbits))) {                       \
2145                 fe_flag = 1;                                            \
2146             }                                                           \
2147                                                                         \
2148             if (unlikely(tp##_is_zero_or_denormal(xb->fld))) {          \
2149                 /*                                                      \
2150                  * XB is not zero because of the above check and        \
2151                  * therefore must be denormalized.                      \
2152                  */                                                     \
2153                 fg_flag = 1;                                            \
2154             }                                                           \
2155         }                                                               \
2156     }                                                                   \
2157                                                                         \
2158     env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2159 }
2160 
2161 VSX_TSQRT(xstsqrtdp, 1, float64, VsrD(0), -1022, 52)
2162 VSX_TSQRT(xvtsqrtdp, 2, float64, VsrD(i), -1022, 52)
2163 VSX_TSQRT(xvtsqrtsp, 4, float32, VsrW(i), -126, 23)
2164 
2165 /*
2166  * VSX_MADD - VSX floating point muliply/add variations
2167  *   op    - instruction mnemonic
2168  *   nels  - number of elements (1, 2 or 4)
2169  *   tp    - type (float32 or float64)
2170  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2171  *   maddflgs - flags for the float*muladd routine that control the
2172  *           various forms (madd, msub, nmadd, nmsub)
2173  *   sfifprf - set FI and FPRF
2174  */
2175 #define VSX_MADD(op, nels, tp, fld, maddflgs, sfifprf)                        \
2176 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt,                             \
2177                  ppc_vsr_t *s1, ppc_vsr_t *s2, ppc_vsr_t *s3)                 \
2178 {                                                                             \
2179     ppc_vsr_t t = *xt;                                                        \
2180     int i;                                                                    \
2181                                                                               \
2182     helper_reset_fpstatus(env);                                               \
2183                                                                               \
2184     for (i = 0; i < nels; i++) {                                              \
2185         float_status tstat = env->fp_status;                                  \
2186         set_float_exception_flags(0, &tstat);                                 \
2187         t.fld = tp##_muladd(s1->fld, s3->fld, s2->fld, maddflgs, &tstat);     \
2188         env->fp_status.float_exception_flags |= tstat.float_exception_flags;  \
2189                                                                               \
2190         if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {     \
2191             float_invalid_op_madd(env, tstat.float_exception_flags,           \
2192                                   sfifprf, GETPC());                          \
2193         }                                                                     \
2194                                                                               \
2195         if (sfifprf) {                                                        \
2196             helper_compute_fprf_float64(env, t.fld);                          \
2197         }                                                                     \
2198     }                                                                         \
2199     *xt = t;                                                                  \
2200     do_float_check_status(env, sfifprf, GETPC());                             \
2201 }
2202 
2203 VSX_MADD(XSMADDDP, 1, float64, VsrD(0), MADD_FLGS, 1)
2204 VSX_MADD(XSMSUBDP, 1, float64, VsrD(0), MSUB_FLGS, 1)
2205 VSX_MADD(XSNMADDDP, 1, float64, VsrD(0), NMADD_FLGS, 1)
2206 VSX_MADD(XSNMSUBDP, 1, float64, VsrD(0), NMSUB_FLGS, 1)
2207 VSX_MADD(XSMADDSP, 1, float64r32, VsrD(0), MADD_FLGS, 1)
2208 VSX_MADD(XSMSUBSP, 1, float64r32, VsrD(0), MSUB_FLGS, 1)
2209 VSX_MADD(XSNMADDSP, 1, float64r32, VsrD(0), NMADD_FLGS, 1)
2210 VSX_MADD(XSNMSUBSP, 1, float64r32, VsrD(0), NMSUB_FLGS, 1)
2211 
2212 VSX_MADD(xvmadddp, 2, float64, VsrD(i), MADD_FLGS, 0)
2213 VSX_MADD(xvmsubdp, 2, float64, VsrD(i), MSUB_FLGS, 0)
2214 VSX_MADD(xvnmadddp, 2, float64, VsrD(i), NMADD_FLGS, 0)
2215 VSX_MADD(xvnmsubdp, 2, float64, VsrD(i), NMSUB_FLGS, 0)
2216 
2217 VSX_MADD(xvmaddsp, 4, float32, VsrW(i), MADD_FLGS, 0)
2218 VSX_MADD(xvmsubsp, 4, float32, VsrW(i), MSUB_FLGS, 0)
2219 VSX_MADD(xvnmaddsp, 4, float32, VsrW(i), NMADD_FLGS, 0)
2220 VSX_MADD(xvnmsubsp, 4, float32, VsrW(i), NMSUB_FLGS, 0)
2221 
2222 /*
2223  * VSX_MADDQ - VSX floating point quad-precision muliply/add
2224  *   op    - instruction mnemonic
2225  *   maddflgs - flags for the float*muladd routine that control the
2226  *           various forms (madd, msub, nmadd, nmsub)
2227  *   ro    - round to odd
2228  */
2229 #define VSX_MADDQ(op, maddflgs, ro)                                            \
2230 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *s1, ppc_vsr_t *s2,\
2231                  ppc_vsr_t *s3)                                                \
2232 {                                                                              \
2233     ppc_vsr_t t = *xt;                                                         \
2234                                                                                \
2235     helper_reset_fpstatus(env);                                                \
2236                                                                                \
2237     float_status tstat = env->fp_status;                                       \
2238     set_float_exception_flags(0, &tstat);                                      \
2239     if (ro) {                                                                  \
2240         tstat.float_rounding_mode = float_round_to_odd;                        \
2241     }                                                                          \
2242     t.f128 = float128_muladd(s1->f128, s3->f128, s2->f128, maddflgs, &tstat);  \
2243     env->fp_status.float_exception_flags |= tstat.float_exception_flags;       \
2244                                                                                \
2245     if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {          \
2246         float_invalid_op_madd(env, tstat.float_exception_flags,                \
2247                               false, GETPC());                                 \
2248     }                                                                          \
2249                                                                                \
2250     helper_compute_fprf_float128(env, t.f128);                                 \
2251     *xt = t;                                                                   \
2252     do_float_check_status(env, true, GETPC());                                 \
2253 }
2254 
2255 VSX_MADDQ(XSMADDQP, MADD_FLGS, 0)
2256 VSX_MADDQ(XSMADDQPO, MADD_FLGS, 1)
2257 VSX_MADDQ(XSMSUBQP, MSUB_FLGS, 0)
2258 VSX_MADDQ(XSMSUBQPO, MSUB_FLGS, 1)
2259 VSX_MADDQ(XSNMADDQP, NMADD_FLGS, 0)
2260 VSX_MADDQ(XSNMADDQPO, NMADD_FLGS, 1)
2261 VSX_MADDQ(XSNMSUBQP, NMSUB_FLGS, 0)
2262 VSX_MADDQ(XSNMSUBQPO, NMSUB_FLGS, 0)
2263 
2264 /*
2265  * VSX_SCALAR_CMP - VSX scalar floating point compare
2266  *   op    - instruction mnemonic
2267  *   tp    - type
2268  *   cmp   - comparison operation
2269  *   fld   - vsr_t field
2270  *   svxvc - set VXVC bit
2271  */
2272 #define VSX_SCALAR_CMP(op, tp, cmp, fld, svxvc)                               \
2273         void helper_##op(CPUPPCState *env, ppc_vsr_t *xt,                     \
2274                 ppc_vsr_t *xa, ppc_vsr_t *xb)                                 \
2275 {                                                                             \
2276     int flags;                                                                \
2277     bool r, vxvc;                                                             \
2278                                                                               \
2279     helper_reset_fpstatus(env);                                               \
2280                                                                               \
2281     if (svxvc) {                                                              \
2282         r = tp##_##cmp(xb->fld, xa->fld, &env->fp_status);                    \
2283     } else {                                                                  \
2284         r = tp##_##cmp##_quiet(xb->fld, xa->fld, &env->fp_status);            \
2285     }                                                                         \
2286                                                                               \
2287     flags = get_float_exception_flags(&env->fp_status);                       \
2288     if (unlikely(flags & float_flag_invalid)) {                               \
2289         vxvc = svxvc;                                                         \
2290         if (flags & float_flag_invalid_snan) {                                \
2291             float_invalid_op_vxsnan(env, GETPC());                            \
2292             vxvc &= !(env->fpscr & FP_VE);                                    \
2293         }                                                                     \
2294         if (vxvc) {                                                           \
2295             float_invalid_op_vxvc(env, 0, GETPC());                           \
2296         }                                                                     \
2297     }                                                                         \
2298                                                                               \
2299     memset(xt, 0, sizeof(*xt));                                               \
2300     memset(&xt->fld, -r, sizeof(xt->fld));                                    \
2301     do_float_check_status(env, false, GETPC());                               \
2302 }
2303 
2304 VSX_SCALAR_CMP(XSCMPEQDP, float64, eq, VsrD(0), 0)
2305 VSX_SCALAR_CMP(XSCMPGEDP, float64, le, VsrD(0), 1)
2306 VSX_SCALAR_CMP(XSCMPGTDP, float64, lt, VsrD(0), 1)
2307 VSX_SCALAR_CMP(XSCMPEQQP, float128, eq, f128, 0)
2308 VSX_SCALAR_CMP(XSCMPGEQP, float128, le, f128, 1)
2309 VSX_SCALAR_CMP(XSCMPGTQP, float128, lt, f128, 1)
2310 
2311 void helper_xscmpexpdp(CPUPPCState *env, uint32_t opcode,
2312                        ppc_vsr_t *xa, ppc_vsr_t *xb)
2313 {
2314     int64_t exp_a, exp_b;
2315     uint32_t cc;
2316 
2317     exp_a = extract64(xa->VsrD(0), 52, 11);
2318     exp_b = extract64(xb->VsrD(0), 52, 11);
2319 
2320     if (unlikely(float64_is_any_nan(xa->VsrD(0)) ||
2321                  float64_is_any_nan(xb->VsrD(0)))) {
2322         cc = CRF_SO;
2323     } else {
2324         if (exp_a < exp_b) {
2325             cc = CRF_LT;
2326         } else if (exp_a > exp_b) {
2327             cc = CRF_GT;
2328         } else {
2329             cc = CRF_EQ;
2330         }
2331     }
2332 
2333     env->fpscr &= ~FP_FPCC;
2334     env->fpscr |= cc << FPSCR_FPCC;
2335     env->crf[BF(opcode)] = cc;
2336 
2337     do_float_check_status(env, false, GETPC());
2338 }
2339 
2340 void helper_xscmpexpqp(CPUPPCState *env, uint32_t opcode,
2341                        ppc_vsr_t *xa, ppc_vsr_t *xb)
2342 {
2343     int64_t exp_a, exp_b;
2344     uint32_t cc;
2345 
2346     exp_a = extract64(xa->VsrD(0), 48, 15);
2347     exp_b = extract64(xb->VsrD(0), 48, 15);
2348 
2349     if (unlikely(float128_is_any_nan(xa->f128) ||
2350                  float128_is_any_nan(xb->f128))) {
2351         cc = CRF_SO;
2352     } else {
2353         if (exp_a < exp_b) {
2354             cc = CRF_LT;
2355         } else if (exp_a > exp_b) {
2356             cc = CRF_GT;
2357         } else {
2358             cc = CRF_EQ;
2359         }
2360     }
2361 
2362     env->fpscr &= ~FP_FPCC;
2363     env->fpscr |= cc << FPSCR_FPCC;
2364     env->crf[BF(opcode)] = cc;
2365 
2366     do_float_check_status(env, false, GETPC());
2367 }
2368 
2369 static inline void do_scalar_cmp(CPUPPCState *env, ppc_vsr_t *xa, ppc_vsr_t *xb,
2370                                  int crf_idx, bool ordered)
2371 {
2372     uint32_t cc;
2373     bool vxsnan_flag = false, vxvc_flag = false;
2374 
2375     helper_reset_fpstatus(env);
2376 
2377     switch (float64_compare(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) {
2378     case float_relation_less:
2379         cc = CRF_LT;
2380         break;
2381     case float_relation_equal:
2382         cc = CRF_EQ;
2383         break;
2384     case float_relation_greater:
2385         cc = CRF_GT;
2386         break;
2387     case float_relation_unordered:
2388         cc = CRF_SO;
2389 
2390         if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status) ||
2391             float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) {
2392             vxsnan_flag = true;
2393             if (!(env->fpscr & FP_VE) && ordered) {
2394                 vxvc_flag = true;
2395             }
2396         } else if (float64_is_quiet_nan(xa->VsrD(0), &env->fp_status) ||
2397                    float64_is_quiet_nan(xb->VsrD(0), &env->fp_status)) {
2398             if (ordered) {
2399                 vxvc_flag = true;
2400             }
2401         }
2402 
2403         break;
2404     default:
2405         g_assert_not_reached();
2406     }
2407 
2408     env->fpscr &= ~FP_FPCC;
2409     env->fpscr |= cc << FPSCR_FPCC;
2410     env->crf[crf_idx] = cc;
2411 
2412     if (vxsnan_flag) {
2413         float_invalid_op_vxsnan(env, GETPC());
2414     }
2415     if (vxvc_flag) {
2416         float_invalid_op_vxvc(env, 0, GETPC());
2417     }
2418 
2419     do_float_check_status(env, false, GETPC());
2420 }
2421 
2422 void helper_xscmpodp(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xa,
2423                      ppc_vsr_t *xb)
2424 {
2425     do_scalar_cmp(env, xa, xb, BF(opcode), true);
2426 }
2427 
2428 void helper_xscmpudp(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xa,
2429                      ppc_vsr_t *xb)
2430 {
2431     do_scalar_cmp(env, xa, xb, BF(opcode), false);
2432 }
2433 
2434 static inline void do_scalar_cmpq(CPUPPCState *env, ppc_vsr_t *xa,
2435                                   ppc_vsr_t *xb, int crf_idx, bool ordered)
2436 {
2437     uint32_t cc;
2438     bool vxsnan_flag = false, vxvc_flag = false;
2439 
2440     helper_reset_fpstatus(env);
2441 
2442     switch (float128_compare(xa->f128, xb->f128, &env->fp_status)) {
2443     case float_relation_less:
2444         cc = CRF_LT;
2445         break;
2446     case float_relation_equal:
2447         cc = CRF_EQ;
2448         break;
2449     case float_relation_greater:
2450         cc = CRF_GT;
2451         break;
2452     case float_relation_unordered:
2453         cc = CRF_SO;
2454 
2455         if (float128_is_signaling_nan(xa->f128, &env->fp_status) ||
2456             float128_is_signaling_nan(xb->f128, &env->fp_status)) {
2457             vxsnan_flag = true;
2458             if (!(env->fpscr & FP_VE) && ordered) {
2459                 vxvc_flag = true;
2460             }
2461         } else if (float128_is_quiet_nan(xa->f128, &env->fp_status) ||
2462                    float128_is_quiet_nan(xb->f128, &env->fp_status)) {
2463             if (ordered) {
2464                 vxvc_flag = true;
2465             }
2466         }
2467 
2468         break;
2469     default:
2470         g_assert_not_reached();
2471     }
2472 
2473     env->fpscr &= ~FP_FPCC;
2474     env->fpscr |= cc << FPSCR_FPCC;
2475     env->crf[crf_idx] = cc;
2476 
2477     if (vxsnan_flag) {
2478         float_invalid_op_vxsnan(env, GETPC());
2479     }
2480     if (vxvc_flag) {
2481         float_invalid_op_vxvc(env, 0, GETPC());
2482     }
2483 
2484     do_float_check_status(env, false, GETPC());
2485 }
2486 
2487 void helper_xscmpoqp(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xa,
2488                      ppc_vsr_t *xb)
2489 {
2490     do_scalar_cmpq(env, xa, xb, BF(opcode), true);
2491 }
2492 
2493 void helper_xscmpuqp(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xa,
2494                      ppc_vsr_t *xb)
2495 {
2496     do_scalar_cmpq(env, xa, xb, BF(opcode), false);
2497 }
2498 
2499 /*
2500  * VSX_MAX_MIN - VSX floating point maximum/minimum
2501  *   name  - instruction mnemonic
2502  *   op    - operation (max or min)
2503  *   nels  - number of elements (1, 2 or 4)
2504  *   tp    - type (float32 or float64)
2505  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2506  */
2507 #define VSX_MAX_MIN(name, op, nels, tp, fld)                                  \
2508 void helper_##name(CPUPPCState *env, ppc_vsr_t *xt,                           \
2509                    ppc_vsr_t *xa, ppc_vsr_t *xb)                              \
2510 {                                                                             \
2511     ppc_vsr_t t = { };                                                        \
2512     int i;                                                                    \
2513                                                                               \
2514     for (i = 0; i < nels; i++) {                                              \
2515         t.fld = tp##_##op(xa->fld, xb->fld, &env->fp_status);                 \
2516         if (unlikely(tp##_is_signaling_nan(xa->fld, &env->fp_status) ||       \
2517                      tp##_is_signaling_nan(xb->fld, &env->fp_status))) {      \
2518             float_invalid_op_vxsnan(env, GETPC());                            \
2519         }                                                                     \
2520     }                                                                         \
2521                                                                               \
2522     *xt = t;                                                                  \
2523     do_float_check_status(env, false, GETPC());                               \
2524 }
2525 
2526 VSX_MAX_MIN(xsmaxdp, maxnum, 1, float64, VsrD(0))
2527 VSX_MAX_MIN(xvmaxdp, maxnum, 2, float64, VsrD(i))
2528 VSX_MAX_MIN(xvmaxsp, maxnum, 4, float32, VsrW(i))
2529 VSX_MAX_MIN(xsmindp, minnum, 1, float64, VsrD(0))
2530 VSX_MAX_MIN(xvmindp, minnum, 2, float64, VsrD(i))
2531 VSX_MAX_MIN(xvminsp, minnum, 4, float32, VsrW(i))
2532 
2533 #define VSX_MAX_MINC(name, max, tp, fld)                                      \
2534 void helper_##name(CPUPPCState *env,                                          \
2535                    ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb)               \
2536 {                                                                             \
2537     ppc_vsr_t t = { };                                                        \
2538     bool first;                                                               \
2539                                                                               \
2540     helper_reset_fpstatus(env);                                               \
2541                                                                               \
2542     if (max) {                                                                \
2543         first = tp##_le_quiet(xb->fld, xa->fld, &env->fp_status);             \
2544     } else {                                                                  \
2545         first = tp##_lt_quiet(xa->fld, xb->fld, &env->fp_status);             \
2546     }                                                                         \
2547                                                                               \
2548     if (first) {                                                              \
2549         t.fld = xa->fld;                                                      \
2550     } else {                                                                  \
2551         t.fld = xb->fld;                                                      \
2552         if (env->fp_status.float_exception_flags & float_flag_invalid_snan) { \
2553             float_invalid_op_vxsnan(env, GETPC());                            \
2554         }                                                                     \
2555     }                                                                         \
2556                                                                               \
2557     *xt = t;                                                                  \
2558 }
2559 
2560 VSX_MAX_MINC(XSMAXCDP, true, float64, VsrD(0));
2561 VSX_MAX_MINC(XSMINCDP, false, float64, VsrD(0));
2562 VSX_MAX_MINC(XSMAXCQP, true, float128, f128);
2563 VSX_MAX_MINC(XSMINCQP, false, float128, f128);
2564 
2565 #define VSX_MAX_MINJ(name, max)                                               \
2566 void helper_##name(CPUPPCState *env,                                          \
2567                    ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb)               \
2568 {                                                                             \
2569     ppc_vsr_t t = { };                                                        \
2570     bool vxsnan_flag = false, vex_flag = false;                               \
2571                                                                               \
2572     if (unlikely(float64_is_any_nan(xa->VsrD(0)))) {                          \
2573         if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status)) {         \
2574             vxsnan_flag = true;                                               \
2575         }                                                                     \
2576         t.VsrD(0) = xa->VsrD(0);                                              \
2577     } else if (unlikely(float64_is_any_nan(xb->VsrD(0)))) {                   \
2578         if (float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) {         \
2579             vxsnan_flag = true;                                               \
2580         }                                                                     \
2581         t.VsrD(0) = xb->VsrD(0);                                              \
2582     } else if (float64_is_zero(xa->VsrD(0)) &&                                \
2583                float64_is_zero(xb->VsrD(0))) {                                \
2584         if (max) {                                                            \
2585             if (!float64_is_neg(xa->VsrD(0)) ||                               \
2586                 !float64_is_neg(xb->VsrD(0))) {                               \
2587                 t.VsrD(0) = 0ULL;                                             \
2588             } else {                                                          \
2589                 t.VsrD(0) = 0x8000000000000000ULL;                            \
2590             }                                                                 \
2591         } else {                                                              \
2592             if (float64_is_neg(xa->VsrD(0)) ||                                \
2593                 float64_is_neg(xb->VsrD(0))) {                                \
2594                 t.VsrD(0) = 0x8000000000000000ULL;                            \
2595             } else {                                                          \
2596                 t.VsrD(0) = 0ULL;                                             \
2597             }                                                                 \
2598         }                                                                     \
2599     } else if ((max &&                                                        \
2600                !float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) ||     \
2601                (!max &&                                                       \
2602                float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status))) {      \
2603         t.VsrD(0) = xa->VsrD(0);                                              \
2604     } else {                                                                  \
2605         t.VsrD(0) = xb->VsrD(0);                                              \
2606     }                                                                         \
2607                                                                               \
2608     vex_flag = (env->fpscr & FP_VE) && vxsnan_flag;                           \
2609     if (vxsnan_flag) {                                                        \
2610         float_invalid_op_vxsnan(env, GETPC());                                \
2611     }                                                                         \
2612     if (!vex_flag) {                                                          \
2613         *xt = t;                                                              \
2614     }                                                                         \
2615 }                                                                             \
2616 
2617 VSX_MAX_MINJ(XSMAXJDP, 1);
2618 VSX_MAX_MINJ(XSMINJDP, 0);
2619 
2620 /*
2621  * VSX_CMP - VSX floating point compare
2622  *   op    - instruction mnemonic
2623  *   nels  - number of elements (1, 2 or 4)
2624  *   tp    - type (float32 or float64)
2625  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2626  *   cmp   - comparison operation
2627  *   svxvc - set VXVC bit
2628  *   exp   - expected result of comparison
2629  */
2630 #define VSX_CMP(op, nels, tp, fld, cmp, svxvc, exp)                       \
2631 uint32_t helper_##op(CPUPPCState *env, ppc_vsr_t *xt,                     \
2632                      ppc_vsr_t *xa, ppc_vsr_t *xb)                        \
2633 {                                                                         \
2634     ppc_vsr_t t = *xt;                                                    \
2635     uint32_t crf6 = 0;                                                    \
2636     int i;                                                                \
2637     int all_true = 1;                                                     \
2638     int all_false = 1;                                                    \
2639                                                                           \
2640     for (i = 0; i < nels; i++) {                                          \
2641         if (unlikely(tp##_is_any_nan(xa->fld) ||                          \
2642                      tp##_is_any_nan(xb->fld))) {                         \
2643             if (tp##_is_signaling_nan(xa->fld, &env->fp_status) ||        \
2644                 tp##_is_signaling_nan(xb->fld, &env->fp_status)) {        \
2645                 float_invalid_op_vxsnan(env, GETPC());                    \
2646             }                                                             \
2647             if (svxvc) {                                                  \
2648                 float_invalid_op_vxvc(env, 0, GETPC());                   \
2649             }                                                             \
2650             t.fld = 0;                                                    \
2651             all_true = 0;                                                 \
2652         } else {                                                          \
2653             if (tp##_##cmp(xb->fld, xa->fld, &env->fp_status) == exp) {   \
2654                 t.fld = -1;                                               \
2655                 all_false = 0;                                            \
2656             } else {                                                      \
2657                 t.fld = 0;                                                \
2658                 all_true = 0;                                             \
2659             }                                                             \
2660         }                                                                 \
2661     }                                                                     \
2662                                                                           \
2663     *xt = t;                                                              \
2664     crf6 = (all_true ? 0x8 : 0) | (all_false ? 0x2 : 0);                  \
2665     return crf6;                                                          \
2666 }
2667 
2668 VSX_CMP(xvcmpeqdp, 2, float64, VsrD(i), eq, 0, 1)
2669 VSX_CMP(xvcmpgedp, 2, float64, VsrD(i), le, 1, 1)
2670 VSX_CMP(xvcmpgtdp, 2, float64, VsrD(i), lt, 1, 1)
2671 VSX_CMP(xvcmpnedp, 2, float64, VsrD(i), eq, 0, 0)
2672 VSX_CMP(xvcmpeqsp, 4, float32, VsrW(i), eq, 0, 1)
2673 VSX_CMP(xvcmpgesp, 4, float32, VsrW(i), le, 1, 1)
2674 VSX_CMP(xvcmpgtsp, 4, float32, VsrW(i), lt, 1, 1)
2675 VSX_CMP(xvcmpnesp, 4, float32, VsrW(i), eq, 0, 0)
2676 
2677 /*
2678  * VSX_CVT_FP_TO_FP - VSX floating point/floating point conversion
2679  *   op    - instruction mnemonic
2680  *   nels  - number of elements (1, 2 or 4)
2681  *   stp   - source type (float32 or float64)
2682  *   ttp   - target type (float32 or float64)
2683  *   sfld  - source vsr_t field
2684  *   tfld  - target vsr_t field (f32 or f64)
2685  *   sfifprf - set FI and FPRF
2686  */
2687 #define VSX_CVT_FP_TO_FP(op, nels, stp, ttp, sfld, tfld, sfifprf)  \
2688 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)   \
2689 {                                                                  \
2690     ppc_vsr_t t = { };                                             \
2691     int i;                                                         \
2692                                                                    \
2693     for (i = 0; i < nels; i++) {                                   \
2694         t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status);        \
2695         if (unlikely(stp##_is_signaling_nan(xb->sfld,              \
2696                                             &env->fp_status))) {   \
2697             float_invalid_op_vxsnan(env, GETPC());                 \
2698             t.tfld = ttp##_snan_to_qnan(t.tfld);                   \
2699         }                                                          \
2700         if (sfifprf) {                                             \
2701             helper_compute_fprf_##ttp(env, t.tfld);                \
2702         }                                                          \
2703     }                                                              \
2704                                                                    \
2705     *xt = t;                                                       \
2706     do_float_check_status(env, sfifprf, GETPC());                  \
2707 }
2708 
2709 VSX_CVT_FP_TO_FP(xscvspdp, 1, float32, float64, VsrW(0), VsrD(0), 1)
2710 VSX_CVT_FP_TO_FP(xvcvspdp, 2, float32, float64, VsrW(2 * i), VsrD(i), 0)
2711 
2712 #define VSX_CVT_FP_TO_FP2(op, nels, stp, ttp, sfifprf)                \
2713 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)      \
2714 {                                                                     \
2715     ppc_vsr_t t = { };                                                \
2716     int i;                                                            \
2717                                                                       \
2718     for (i = 0; i < nels; i++) {                                      \
2719         t.VsrW(2 * i) = stp##_to_##ttp(xb->VsrD(i), &env->fp_status); \
2720         if (unlikely(stp##_is_signaling_nan(xb->VsrD(i),              \
2721                                             &env->fp_status))) {      \
2722             float_invalid_op_vxsnan(env, GETPC());                    \
2723             t.VsrW(2 * i) = ttp##_snan_to_qnan(t.VsrW(2 * i));        \
2724         }                                                             \
2725         if (sfifprf) {                                                \
2726             helper_compute_fprf_##ttp(env, t.VsrW(2 * i));            \
2727         }                                                             \
2728         t.VsrW(2 * i + 1) = t.VsrW(2 * i);                            \
2729     }                                                                 \
2730                                                                       \
2731     *xt = t;                                                          \
2732     do_float_check_status(env, sfifprf, GETPC());                     \
2733 }
2734 
2735 VSX_CVT_FP_TO_FP2(xvcvdpsp, 2, float64, float32, 0)
2736 VSX_CVT_FP_TO_FP2(xscvdpsp, 1, float64, float32, 1)
2737 
2738 /*
2739  * VSX_CVT_FP_TO_FP_VECTOR - VSX floating point/floating point conversion
2740  *   op    - instruction mnemonic
2741  *   nels  - number of elements (1, 2 or 4)
2742  *   stp   - source type (float32 or float64)
2743  *   ttp   - target type (float32 or float64)
2744  *   sfld  - source vsr_t field
2745  *   tfld  - target vsr_t field (f32 or f64)
2746  *   sfprf - set FPRF
2747  */
2748 #define VSX_CVT_FP_TO_FP_VECTOR(op, nels, stp, ttp, sfld, tfld, sfprf)  \
2749 void helper_##op(CPUPPCState *env, uint32_t opcode,                     \
2750                  ppc_vsr_t *xt, ppc_vsr_t *xb)                          \
2751 {                                                                       \
2752     ppc_vsr_t t = *xt;                                                  \
2753     int i;                                                              \
2754                                                                         \
2755     for (i = 0; i < nels; i++) {                                        \
2756         t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status);             \
2757         if (unlikely(stp##_is_signaling_nan(xb->sfld,                   \
2758                                             &env->fp_status))) {        \
2759             float_invalid_op_vxsnan(env, GETPC());                      \
2760             t.tfld = ttp##_snan_to_qnan(t.tfld);                        \
2761         }                                                               \
2762         if (sfprf) {                                                    \
2763             helper_compute_fprf_##ttp(env, t.tfld);                     \
2764         }                                                               \
2765     }                                                                   \
2766                                                                         \
2767     *xt = t;                                                            \
2768     do_float_check_status(env, true, GETPC());                          \
2769 }
2770 
2771 VSX_CVT_FP_TO_FP_VECTOR(xscvdpqp, 1, float64, float128, VsrD(0), f128, 1)
2772 
2773 /*
2774  * VSX_CVT_FP_TO_FP_HP - VSX floating point/floating point conversion
2775  *                       involving one half precision value
2776  *   op    - instruction mnemonic
2777  *   nels  - number of elements (1, 2 or 4)
2778  *   stp   - source type
2779  *   ttp   - target type
2780  *   sfld  - source vsr_t field
2781  *   tfld  - target vsr_t field
2782  *   sfifprf - set FI and FPRF
2783  */
2784 #define VSX_CVT_FP_TO_FP_HP(op, nels, stp, ttp, sfld, tfld, sfifprf) \
2785 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)   \
2786 {                                                                  \
2787     ppc_vsr_t t = { };                                             \
2788     int i;                                                         \
2789                                                                    \
2790     for (i = 0; i < nels; i++) {                                   \
2791         t.tfld = stp##_to_##ttp(xb->sfld, 1, &env->fp_status);     \
2792         if (unlikely(stp##_is_signaling_nan(xb->sfld,              \
2793                                             &env->fp_status))) {   \
2794             float_invalid_op_vxsnan(env, GETPC());                 \
2795             t.tfld = ttp##_snan_to_qnan(t.tfld);                   \
2796         }                                                          \
2797         if (sfifprf) {                                             \
2798             helper_compute_fprf_##ttp(env, t.tfld);                \
2799         }                                                          \
2800     }                                                              \
2801                                                                    \
2802     *xt = t;                                                       \
2803     do_float_check_status(env, sfifprf, GETPC());                  \
2804 }
2805 
2806 VSX_CVT_FP_TO_FP_HP(xscvdphp, 1, float64, float16, VsrD(0), VsrH(3), 1)
2807 VSX_CVT_FP_TO_FP_HP(xscvhpdp, 1, float16, float64, VsrH(3), VsrD(0), 1)
2808 VSX_CVT_FP_TO_FP_HP(xvcvsphp, 4, float32, float16, VsrW(i), VsrH(2 * i  + 1), 0)
2809 VSX_CVT_FP_TO_FP_HP(xvcvhpsp, 4, float16, float32, VsrH(2 * i + 1), VsrW(i), 0)
2810 
2811 void helper_XVCVSPBF16(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)
2812 {
2813     ppc_vsr_t t = { };
2814     int i, status;
2815 
2816     helper_reset_fpstatus(env);
2817 
2818     for (i = 0; i < 4; i++) {
2819         t.VsrH(2 * i + 1) = float32_to_bfloat16(xb->VsrW(i), &env->fp_status);
2820     }
2821 
2822     status = get_float_exception_flags(&env->fp_status);
2823     if (unlikely(status & float_flag_invalid_snan)) {
2824         float_invalid_op_vxsnan(env, GETPC());
2825     }
2826 
2827     *xt = t;
2828     do_float_check_status(env, false, GETPC());
2829 }
2830 
2831 void helper_XSCVQPDP(CPUPPCState *env, uint32_t ro, ppc_vsr_t *xt,
2832                      ppc_vsr_t *xb)
2833 {
2834     ppc_vsr_t t = { };
2835     float_status tstat;
2836 
2837     tstat = env->fp_status;
2838     if (ro != 0) {
2839         tstat.float_rounding_mode = float_round_to_odd;
2840     }
2841 
2842     t.VsrD(0) = float128_to_float64(xb->f128, &tstat);
2843     env->fp_status.float_exception_flags |= tstat.float_exception_flags;
2844     if (unlikely(float128_is_signaling_nan(xb->f128, &tstat))) {
2845         float_invalid_op_vxsnan(env, GETPC());
2846         t.VsrD(0) = float64_snan_to_qnan(t.VsrD(0));
2847     }
2848     helper_compute_fprf_float64(env, t.VsrD(0));
2849 
2850     *xt = t;
2851     do_float_check_status(env, true, GETPC());
2852 }
2853 
2854 uint64_t helper_xscvdpspn(CPUPPCState *env, uint64_t xb)
2855 {
2856     uint64_t result, sign, exp, frac;
2857 
2858     float_status tstat = env->fp_status;
2859     set_float_exception_flags(0, &tstat);
2860 
2861     sign = extract64(xb, 63,  1);
2862     exp  = extract64(xb, 52, 11);
2863     frac = extract64(xb,  0, 52) | 0x10000000000000ULL;
2864 
2865     if (unlikely(exp == 0 && extract64(frac, 0, 52) != 0)) {
2866         /* DP denormal operand.  */
2867         /* Exponent override to DP min exp.  */
2868         exp = 1;
2869         /* Implicit bit override to 0.  */
2870         frac = deposit64(frac, 53, 1, 0);
2871     }
2872 
2873     if (unlikely(exp < 897 && frac != 0)) {
2874         /* SP tiny operand.  */
2875         if (897 - exp > 63) {
2876             frac = 0;
2877         } else {
2878             /* Denormalize until exp = SP min exp.  */
2879             frac >>= (897 - exp);
2880         }
2881         /* Exponent override to SP min exp - 1.  */
2882         exp = 896;
2883     }
2884 
2885     result = sign << 31;
2886     result |= extract64(exp, 10, 1) << 30;
2887     result |= extract64(exp, 0, 7) << 23;
2888     result |= extract64(frac, 29, 23);
2889 
2890     /* hardware replicates result to both words of the doubleword result.  */
2891     return (result << 32) | result;
2892 }
2893 
2894 uint64_t helper_XSCVSPDPN(uint64_t xb)
2895 {
2896     return helper_todouble(xb >> 32);
2897 }
2898 
2899 /*
2900  * VSX_CVT_FP_TO_INT - VSX floating point to integer conversion
2901  *   op    - instruction mnemonic
2902  *   nels  - number of elements (1, 2 or 4)
2903  *   stp   - source type (float32 or float64)
2904  *   ttp   - target type (int32, uint32, int64 or uint64)
2905  *   sfld  - source vsr_t field
2906  *   tfld  - target vsr_t field
2907  *   sfi   - set FI
2908  *   rnan  - resulting NaN
2909  */
2910 #define VSX_CVT_FP_TO_INT(op, nels, stp, ttp, sfld, tfld, sfi, rnan)         \
2911 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)             \
2912 {                                                                            \
2913     int all_flags = env->fp_status.float_exception_flags, flags;             \
2914     ppc_vsr_t t = { };                                                       \
2915     int i;                                                                   \
2916                                                                              \
2917     for (i = 0; i < nels; i++) {                                             \
2918         env->fp_status.float_exception_flags = 0;                            \
2919         t.tfld = stp##_to_##ttp##_round_to_zero(xb->sfld, &env->fp_status);  \
2920         flags = env->fp_status.float_exception_flags;                        \
2921         if (unlikely(flags & float_flag_invalid)) {                          \
2922             t.tfld = float_invalid_cvt(env, flags, t.tfld, rnan, 0, GETPC());\
2923         }                                                                    \
2924         all_flags |= flags;                                                  \
2925     }                                                                        \
2926                                                                              \
2927     *xt = t;                                                                 \
2928     env->fp_status.float_exception_flags = all_flags;                        \
2929     do_float_check_status(env, sfi, GETPC());                                \
2930 }
2931 
2932 VSX_CVT_FP_TO_INT(xscvdpsxds, 1, float64, int64, VsrD(0), VsrD(0), true, \
2933                   0x8000000000000000ULL)
2934 VSX_CVT_FP_TO_INT(xscvdpuxds, 1, float64, uint64, VsrD(0), VsrD(0), true, 0ULL)
2935 VSX_CVT_FP_TO_INT(xvcvdpsxds, 2, float64, int64, VsrD(i), VsrD(i), false, \
2936                   0x8000000000000000ULL)
2937 VSX_CVT_FP_TO_INT(xvcvdpuxds, 2, float64, uint64, VsrD(i), VsrD(i), false, \
2938                   0ULL)
2939 VSX_CVT_FP_TO_INT(xvcvspsxds, 2, float32, int64, VsrW(2 * i), VsrD(i), false, \
2940                   0x8000000000000000ULL)
2941 VSX_CVT_FP_TO_INT(xvcvspsxws, 4, float32, int32, VsrW(i), VsrW(i), false, \
2942                   0x80000000ULL)
2943 VSX_CVT_FP_TO_INT(xvcvspuxds, 2, float32, uint64, VsrW(2 * i), VsrD(i), \
2944                   false, 0ULL)
2945 VSX_CVT_FP_TO_INT(xvcvspuxws, 4, float32, uint32, VsrW(i), VsrW(i), false, 0U)
2946 
2947 #define VSX_CVT_FP_TO_INT128(op, tp, rnan)                                     \
2948 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)               \
2949 {                                                                              \
2950     ppc_vsr_t t;                                                               \
2951     int flags;                                                                 \
2952                                                                                \
2953     helper_reset_fpstatus(env);                                                \
2954     t.s128 = float128_to_##tp##_round_to_zero(xb->f128, &env->fp_status);      \
2955     flags = get_float_exception_flags(&env->fp_status);                        \
2956     if (unlikely(flags & float_flag_invalid)) {                                \
2957         t.VsrD(0) = float_invalid_cvt(env, flags, t.VsrD(0), rnan, 0, GETPC());\
2958         t.VsrD(1) = -(t.VsrD(0) & 1);                                          \
2959     }                                                                          \
2960                                                                                \
2961     *xt = t;                                                                   \
2962     do_float_check_status(env, true, GETPC());                                 \
2963 }
2964 
2965 VSX_CVT_FP_TO_INT128(XSCVQPUQZ, uint128, 0)
2966 VSX_CVT_FP_TO_INT128(XSCVQPSQZ, int128, 0x8000000000000000ULL);
2967 
2968 /*
2969  * Likewise, except that the result is duplicated into both subwords.
2970  * Power ISA v3.1 has Programming Notes for these insns:
2971  *     Previous versions of the architecture allowed the contents of
2972  *     word 0 of the result register to be undefined. However, all
2973  *     processors that support this instruction write the result into
2974  *     words 0 and 1 (and words 2 and 3) of the result register, as
2975  *     is required by this version of the architecture.
2976  */
2977 #define VSX_CVT_FP_TO_INT2(op, nels, stp, ttp, sfi, rnan)                    \
2978 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)             \
2979 {                                                                            \
2980     int all_flags = env->fp_status.float_exception_flags, flags;             \
2981     ppc_vsr_t t = { };                                                       \
2982     int i;                                                                   \
2983                                                                              \
2984     for (i = 0; i < nels; i++) {                                             \
2985         env->fp_status.float_exception_flags = 0;                            \
2986         t.VsrW(2 * i) = stp##_to_##ttp##_round_to_zero(xb->VsrD(i),          \
2987                                                        &env->fp_status);     \
2988         flags = env->fp_status.float_exception_flags;                        \
2989         if (unlikely(flags & float_flag_invalid)) {                          \
2990             t.VsrW(2 * i) = float_invalid_cvt(env, flags, t.VsrW(2 * i),     \
2991                                               rnan, 0, GETPC());             \
2992         }                                                                    \
2993         t.VsrW(2 * i + 1) = t.VsrW(2 * i);                                   \
2994         all_flags |= flags;                                                  \
2995     }                                                                        \
2996                                                                              \
2997     *xt = t;                                                                 \
2998     env->fp_status.float_exception_flags = all_flags;                        \
2999     do_float_check_status(env, sfi, GETPC());                                \
3000 }
3001 
3002 VSX_CVT_FP_TO_INT2(xscvdpsxws, 1, float64, int32, true, 0x80000000U)
3003 VSX_CVT_FP_TO_INT2(xscvdpuxws, 1, float64, uint32, true, 0U)
3004 VSX_CVT_FP_TO_INT2(xvcvdpsxws, 2, float64, int32, false, 0x80000000U)
3005 VSX_CVT_FP_TO_INT2(xvcvdpuxws, 2, float64, uint32, false, 0U)
3006 
3007 /*
3008  * VSX_CVT_FP_TO_INT_VECTOR - VSX floating point to integer conversion
3009  *   op    - instruction mnemonic
3010  *   stp   - source type (float32 or float64)
3011  *   ttp   - target type (int32, uint32, int64 or uint64)
3012  *   sfld  - source vsr_t field
3013  *   tfld  - target vsr_t field
3014  *   rnan  - resulting NaN
3015  */
3016 #define VSX_CVT_FP_TO_INT_VECTOR(op, stp, ttp, sfld, tfld, rnan)             \
3017 void helper_##op(CPUPPCState *env, uint32_t opcode,                          \
3018                  ppc_vsr_t *xt, ppc_vsr_t *xb)                               \
3019 {                                                                            \
3020     ppc_vsr_t t = { };                                                       \
3021     int flags;                                                               \
3022                                                                              \
3023     t.tfld = stp##_to_##ttp##_round_to_zero(xb->sfld, &env->fp_status);      \
3024     flags = get_float_exception_flags(&env->fp_status);                      \
3025     if (flags & float_flag_invalid) {                                        \
3026         t.tfld = float_invalid_cvt(env, flags, t.tfld, rnan, 0, GETPC());    \
3027     }                                                                        \
3028                                                                              \
3029     *xt = t;                                                                 \
3030     do_float_check_status(env, true, GETPC());                               \
3031 }
3032 
3033 VSX_CVT_FP_TO_INT_VECTOR(xscvqpsdz, float128, int64, f128, VsrD(0),          \
3034                   0x8000000000000000ULL)
3035 
3036 VSX_CVT_FP_TO_INT_VECTOR(xscvqpswz, float128, int32, f128, VsrD(0),          \
3037                   0xffffffff80000000ULL)
3038 VSX_CVT_FP_TO_INT_VECTOR(xscvqpudz, float128, uint64, f128, VsrD(0), 0x0ULL)
3039 VSX_CVT_FP_TO_INT_VECTOR(xscvqpuwz, float128, uint32, f128, VsrD(0), 0x0ULL)
3040 
3041 /*
3042  * VSX_CVT_INT_TO_FP - VSX integer to floating point conversion
3043  *   op    - instruction mnemonic
3044  *   nels  - number of elements (1, 2 or 4)
3045  *   stp   - source type (int32, uint32, int64 or uint64)
3046  *   ttp   - target type (float32 or float64)
3047  *   sfld  - source vsr_t field
3048  *   tfld  - target vsr_t field
3049  *   jdef  - definition of the j index (i or 2*i)
3050  *   sfifprf - set FI and FPRF
3051  */
3052 #define VSX_CVT_INT_TO_FP(op, nels, stp, ttp, sfld, tfld, sfifprf, r2sp)\
3053 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)        \
3054 {                                                                       \
3055     ppc_vsr_t t = { };                                                  \
3056     int i;                                                              \
3057                                                                         \
3058     for (i = 0; i < nels; i++) {                                        \
3059         t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status);             \
3060         if (r2sp) {                                                     \
3061             t.tfld = do_frsp(env, t.tfld, GETPC());                     \
3062         }                                                               \
3063         if (sfifprf) {                                                  \
3064             helper_compute_fprf_float64(env, t.tfld);                   \
3065         }                                                               \
3066     }                                                                   \
3067                                                                         \
3068     *xt = t;                                                            \
3069     do_float_check_status(env, sfifprf, GETPC());                       \
3070 }
3071 
3072 VSX_CVT_INT_TO_FP(xscvsxddp, 1, int64, float64, VsrD(0), VsrD(0), 1, 0)
3073 VSX_CVT_INT_TO_FP(xscvuxddp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 0)
3074 VSX_CVT_INT_TO_FP(xscvsxdsp, 1, int64, float64, VsrD(0), VsrD(0), 1, 1)
3075 VSX_CVT_INT_TO_FP(xscvuxdsp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 1)
3076 VSX_CVT_INT_TO_FP(xvcvsxddp, 2, int64, float64, VsrD(i), VsrD(i), 0, 0)
3077 VSX_CVT_INT_TO_FP(xvcvuxddp, 2, uint64, float64, VsrD(i), VsrD(i), 0, 0)
3078 VSX_CVT_INT_TO_FP(xvcvsxwdp, 2, int32, float64, VsrW(2 * i), VsrD(i), 0, 0)
3079 VSX_CVT_INT_TO_FP(xvcvuxwdp, 2, uint64, float64, VsrW(2 * i), VsrD(i), 0, 0)
3080 VSX_CVT_INT_TO_FP(xvcvsxwsp, 4, int32, float32, VsrW(i), VsrW(i), 0, 0)
3081 VSX_CVT_INT_TO_FP(xvcvuxwsp, 4, uint32, float32, VsrW(i), VsrW(i), 0, 0)
3082 
3083 #define VSX_CVT_INT_TO_FP2(op, stp, ttp)                                \
3084 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)        \
3085 {                                                                       \
3086     ppc_vsr_t t = { };                                                  \
3087     int i;                                                              \
3088                                                                         \
3089     for (i = 0; i < 2; i++) {                                           \
3090         t.VsrW(2 * i) = stp##_to_##ttp(xb->VsrD(i), &env->fp_status);   \
3091         t.VsrW(2 * i + 1) = t.VsrW(2 * i);                              \
3092     }                                                                   \
3093                                                                         \
3094     *xt = t;                                                            \
3095     do_float_check_status(env, false, GETPC());                         \
3096 }
3097 
3098 VSX_CVT_INT_TO_FP2(xvcvsxdsp, int64, float32)
3099 VSX_CVT_INT_TO_FP2(xvcvuxdsp, uint64, float32)
3100 
3101 #define VSX_CVT_INT128_TO_FP(op, tp)                            \
3102 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)\
3103 {                                                               \
3104     helper_reset_fpstatus(env);                                 \
3105     xt->f128 = tp##_to_float128(xb->s128, &env->fp_status);     \
3106     helper_compute_fprf_float128(env, xt->f128);                \
3107     do_float_check_status(env, true, GETPC());                  \
3108 }
3109 
3110 VSX_CVT_INT128_TO_FP(XSCVUQQP, uint128);
3111 VSX_CVT_INT128_TO_FP(XSCVSQQP, int128);
3112 
3113 /*
3114  * VSX_CVT_INT_TO_FP_VECTOR - VSX integer to floating point conversion
3115  *   op    - instruction mnemonic
3116  *   stp   - source type (int32, uint32, int64 or uint64)
3117  *   ttp   - target type (float32 or float64)
3118  *   sfld  - source vsr_t field
3119  *   tfld  - target vsr_t field
3120  */
3121 #define VSX_CVT_INT_TO_FP_VECTOR(op, stp, ttp, sfld, tfld)              \
3122 void helper_##op(CPUPPCState *env, uint32_t opcode,                     \
3123                  ppc_vsr_t *xt, ppc_vsr_t *xb)                          \
3124 {                                                                       \
3125     ppc_vsr_t t = *xt;                                                  \
3126                                                                         \
3127     t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status);                 \
3128     helper_compute_fprf_##ttp(env, t.tfld);                             \
3129                                                                         \
3130     *xt = t;                                                            \
3131     do_float_check_status(env, true, GETPC());                          \
3132 }
3133 
3134 VSX_CVT_INT_TO_FP_VECTOR(xscvsdqp, int64, float128, VsrD(0), f128)
3135 VSX_CVT_INT_TO_FP_VECTOR(xscvudqp, uint64, float128, VsrD(0), f128)
3136 
3137 /*
3138  * For "use current rounding mode", define a value that will not be
3139  * one of the existing rounding model enums.
3140  */
3141 #define FLOAT_ROUND_CURRENT (float_round_nearest_even + float_round_down + \
3142   float_round_up + float_round_to_zero)
3143 
3144 /*
3145  * VSX_ROUND - VSX floating point round
3146  *   op    - instruction mnemonic
3147  *   nels  - number of elements (1, 2 or 4)
3148  *   tp    - type (float32 or float64)
3149  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
3150  *   rmode - rounding mode
3151  *   sfifprf - set FI and FPRF
3152  */
3153 #define VSX_ROUND(op, nels, tp, fld, rmode, sfifprf)                   \
3154 void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)       \
3155 {                                                                      \
3156     ppc_vsr_t t = { };                                                 \
3157     int i;                                                             \
3158     FloatRoundMode curr_rounding_mode;                                 \
3159                                                                        \
3160     if (rmode != FLOAT_ROUND_CURRENT) {                                \
3161         curr_rounding_mode = get_float_rounding_mode(&env->fp_status); \
3162         set_float_rounding_mode(rmode, &env->fp_status);               \
3163     }                                                                  \
3164                                                                        \
3165     for (i = 0; i < nels; i++) {                                       \
3166         if (unlikely(tp##_is_signaling_nan(xb->fld,                    \
3167                                            &env->fp_status))) {        \
3168             float_invalid_op_vxsnan(env, GETPC());                     \
3169             t.fld = tp##_snan_to_qnan(xb->fld);                        \
3170         } else {                                                       \
3171             t.fld = tp##_round_to_int(xb->fld, &env->fp_status);       \
3172         }                                                              \
3173         if (sfifprf) {                                                 \
3174             helper_compute_fprf_float64(env, t.fld);                   \
3175         }                                                              \
3176     }                                                                  \
3177                                                                        \
3178     /*                                                                 \
3179      * If this is not a "use current rounding mode" instruction,       \
3180      * then inhibit setting of the XX bit and restore rounding         \
3181      * mode from FPSCR                                                 \
3182      */                                                                \
3183     if (rmode != FLOAT_ROUND_CURRENT) {                                \
3184         set_float_rounding_mode(curr_rounding_mode, &env->fp_status);  \
3185         env->fp_status.float_exception_flags &= ~float_flag_inexact;   \
3186     }                                                                  \
3187                                                                        \
3188     *xt = t;                                                           \
3189     do_float_check_status(env, sfifprf, GETPC());                      \
3190 }
3191 
3192 VSX_ROUND(xsrdpi, 1, float64, VsrD(0), float_round_ties_away, 1)
3193 VSX_ROUND(xsrdpic, 1, float64, VsrD(0), FLOAT_ROUND_CURRENT, 1)
3194 VSX_ROUND(xsrdpim, 1, float64, VsrD(0), float_round_down, 1)
3195 VSX_ROUND(xsrdpip, 1, float64, VsrD(0), float_round_up, 1)
3196 VSX_ROUND(xsrdpiz, 1, float64, VsrD(0), float_round_to_zero, 1)
3197 
3198 VSX_ROUND(xvrdpi, 2, float64, VsrD(i), float_round_ties_away, 0)
3199 VSX_ROUND(xvrdpic, 2, float64, VsrD(i), FLOAT_ROUND_CURRENT, 0)
3200 VSX_ROUND(xvrdpim, 2, float64, VsrD(i), float_round_down, 0)
3201 VSX_ROUND(xvrdpip, 2, float64, VsrD(i), float_round_up, 0)
3202 VSX_ROUND(xvrdpiz, 2, float64, VsrD(i), float_round_to_zero, 0)
3203 
3204 VSX_ROUND(xvrspi, 4, float32, VsrW(i), float_round_ties_away, 0)
3205 VSX_ROUND(xvrspic, 4, float32, VsrW(i), FLOAT_ROUND_CURRENT, 0)
3206 VSX_ROUND(xvrspim, 4, float32, VsrW(i), float_round_down, 0)
3207 VSX_ROUND(xvrspip, 4, float32, VsrW(i), float_round_up, 0)
3208 VSX_ROUND(xvrspiz, 4, float32, VsrW(i), float_round_to_zero, 0)
3209 
3210 uint64_t helper_xsrsp(CPUPPCState *env, uint64_t xb)
3211 {
3212     helper_reset_fpstatus(env);
3213 
3214     uint64_t xt = do_frsp(env, xb, GETPC());
3215 
3216     helper_compute_fprf_float64(env, xt);
3217     do_float_check_status(env, true, GETPC());
3218     return xt;
3219 }
3220 
3221 void helper_XVXSIGSP(ppc_vsr_t *xt, ppc_vsr_t *xb)
3222 {
3223     ppc_vsr_t t = { };
3224     uint32_t exp, i, fraction;
3225 
3226     for (i = 0; i < 4; i++) {
3227         exp = (xb->VsrW(i) >> 23) & 0xFF;
3228         fraction = xb->VsrW(i) & 0x7FFFFF;
3229         if (exp != 0 && exp != 255) {
3230             t.VsrW(i) = fraction | 0x00800000;
3231         } else {
3232             t.VsrW(i) = fraction;
3233         }
3234     }
3235     *xt = t;
3236 }
3237 
3238 /*
3239  * VSX_TEST_DC - VSX floating point test data class
3240  *   op    - instruction mnemonic
3241  *   nels  - number of elements (1, 2 or 4)
3242  *   xbn   - VSR register number
3243  *   tp    - type (float32 or float64)
3244  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
3245  *   tfld   - target vsr_t field (VsrD(*) or VsrW(*))
3246  *   fld_max - target field max
3247  *   scrf - set result in CR and FPCC
3248  */
3249 #define VSX_TEST_DC(op, nels, xbn, tp, fld, tfld, fld_max, scrf)  \
3250 void helper_##op(CPUPPCState *env, uint32_t opcode)         \
3251 {                                                           \
3252     ppc_vsr_t *xt = &env->vsr[xT(opcode)];                  \
3253     ppc_vsr_t *xb = &env->vsr[xbn];                         \
3254     ppc_vsr_t t = { };                                      \
3255     uint32_t i, sign, dcmx;                                 \
3256     uint32_t cc, match = 0;                                 \
3257                                                             \
3258     if (!scrf) {                                            \
3259         dcmx = DCMX_XV(opcode);                             \
3260     } else {                                                \
3261         t = *xt;                                            \
3262         dcmx = DCMX(opcode);                                \
3263     }                                                       \
3264                                                             \
3265     for (i = 0; i < nels; i++) {                            \
3266         sign = tp##_is_neg(xb->fld);                        \
3267         if (tp##_is_any_nan(xb->fld)) {                     \
3268             match = extract32(dcmx, 6, 1);                  \
3269         } else if (tp##_is_infinity(xb->fld)) {             \
3270             match = extract32(dcmx, 4 + !sign, 1);          \
3271         } else if (tp##_is_zero(xb->fld)) {                 \
3272             match = extract32(dcmx, 2 + !sign, 1);          \
3273         } else if (tp##_is_zero_or_denormal(xb->fld)) {     \
3274             match = extract32(dcmx, 0 + !sign, 1);          \
3275         }                                                   \
3276                                                             \
3277         if (scrf) {                                         \
3278             cc = sign << CRF_LT_BIT | match << CRF_EQ_BIT;  \
3279             env->fpscr &= ~FP_FPCC;                         \
3280             env->fpscr |= cc << FPSCR_FPCC;                 \
3281             env->crf[BF(opcode)] = cc;                      \
3282         } else {                                            \
3283             t.tfld = match ? fld_max : 0;                   \
3284         }                                                   \
3285         match = 0;                                          \
3286     }                                                       \
3287     if (!scrf) {                                            \
3288         *xt = t;                                            \
3289     }                                                       \
3290 }
3291 
3292 VSX_TEST_DC(xvtstdcdp, 2, xB(opcode), float64, VsrD(i), VsrD(i), UINT64_MAX, 0)
3293 VSX_TEST_DC(xvtstdcsp, 4, xB(opcode), float32, VsrW(i), VsrW(i), UINT32_MAX, 0)
3294 VSX_TEST_DC(xststdcdp, 1, xB(opcode), float64, VsrD(0), VsrD(0), 0, 1)
3295 VSX_TEST_DC(xststdcqp, 1, (rB(opcode) + 32), float128, f128, VsrD(0), 0, 1)
3296 
3297 void helper_xststdcsp(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xb)
3298 {
3299     uint32_t dcmx, sign, exp;
3300     uint32_t cc, match = 0, not_sp = 0;
3301     float64 arg = xb->VsrD(0);
3302     float64 arg_sp;
3303 
3304     dcmx = DCMX(opcode);
3305     exp = (arg >> 52) & 0x7FF;
3306     sign = float64_is_neg(arg);
3307 
3308     if (float64_is_any_nan(arg)) {
3309         match = extract32(dcmx, 6, 1);
3310     } else if (float64_is_infinity(arg)) {
3311         match = extract32(dcmx, 4 + !sign, 1);
3312     } else if (float64_is_zero(arg)) {
3313         match = extract32(dcmx, 2 + !sign, 1);
3314     } else if (float64_is_zero_or_denormal(arg) || (exp > 0 && exp < 0x381)) {
3315         match = extract32(dcmx, 0 + !sign, 1);
3316     }
3317 
3318     arg_sp = helper_todouble(helper_tosingle(arg));
3319     not_sp = arg != arg_sp;
3320 
3321     cc = sign << CRF_LT_BIT | match << CRF_EQ_BIT | not_sp << CRF_SO_BIT;
3322     env->fpscr &= ~FP_FPCC;
3323     env->fpscr |= cc << FPSCR_FPCC;
3324     env->crf[BF(opcode)] = cc;
3325 }
3326 
3327 void helper_xsrqpi(CPUPPCState *env, uint32_t opcode,
3328                    ppc_vsr_t *xt, ppc_vsr_t *xb)
3329 {
3330     ppc_vsr_t t = { };
3331     uint8_t r = Rrm(opcode);
3332     uint8_t ex = Rc(opcode);
3333     uint8_t rmc = RMC(opcode);
3334     uint8_t rmode = 0;
3335     float_status tstat;
3336 
3337     helper_reset_fpstatus(env);
3338 
3339     if (r == 0 && rmc == 0) {
3340         rmode = float_round_ties_away;
3341     } else if (r == 0 && rmc == 0x3) {
3342         rmode = env->fpscr & FP_RN;
3343     } else if (r == 1) {
3344         switch (rmc) {
3345         case 0:
3346             rmode = float_round_nearest_even;
3347             break;
3348         case 1:
3349             rmode = float_round_to_zero;
3350             break;
3351         case 2:
3352             rmode = float_round_up;
3353             break;
3354         case 3:
3355             rmode = float_round_down;
3356             break;
3357         default:
3358             abort();
3359         }
3360     }
3361 
3362     tstat = env->fp_status;
3363     set_float_exception_flags(0, &tstat);
3364     set_float_rounding_mode(rmode, &tstat);
3365     t.f128 = float128_round_to_int(xb->f128, &tstat);
3366     env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3367 
3368     if (unlikely(tstat.float_exception_flags & float_flag_invalid_snan)) {
3369         float_invalid_op_vxsnan(env, GETPC());
3370     }
3371 
3372     if (ex == 0 && (tstat.float_exception_flags & float_flag_inexact)) {
3373         env->fp_status.float_exception_flags &= ~float_flag_inexact;
3374     }
3375 
3376     helper_compute_fprf_float128(env, t.f128);
3377     do_float_check_status(env, true, GETPC());
3378     *xt = t;
3379 }
3380 
3381 void helper_xsrqpxp(CPUPPCState *env, uint32_t opcode,
3382                     ppc_vsr_t *xt, ppc_vsr_t *xb)
3383 {
3384     ppc_vsr_t t = { };
3385     uint8_t r = Rrm(opcode);
3386     uint8_t rmc = RMC(opcode);
3387     uint8_t rmode = 0;
3388     floatx80 round_res;
3389     float_status tstat;
3390 
3391     helper_reset_fpstatus(env);
3392 
3393     if (r == 0 && rmc == 0) {
3394         rmode = float_round_ties_away;
3395     } else if (r == 0 && rmc == 0x3) {
3396         rmode = env->fpscr & FP_RN;
3397     } else if (r == 1) {
3398         switch (rmc) {
3399         case 0:
3400             rmode = float_round_nearest_even;
3401             break;
3402         case 1:
3403             rmode = float_round_to_zero;
3404             break;
3405         case 2:
3406             rmode = float_round_up;
3407             break;
3408         case 3:
3409             rmode = float_round_down;
3410             break;
3411         default:
3412             abort();
3413         }
3414     }
3415 
3416     tstat = env->fp_status;
3417     set_float_exception_flags(0, &tstat);
3418     set_float_rounding_mode(rmode, &tstat);
3419     round_res = float128_to_floatx80(xb->f128, &tstat);
3420     t.f128 = floatx80_to_float128(round_res, &tstat);
3421     env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3422 
3423     if (unlikely(tstat.float_exception_flags & float_flag_invalid_snan)) {
3424         float_invalid_op_vxsnan(env, GETPC());
3425         t.f128 = float128_snan_to_qnan(t.f128);
3426     }
3427 
3428     helper_compute_fprf_float128(env, t.f128);
3429     *xt = t;
3430     do_float_check_status(env, true, GETPC());
3431 }
3432 
3433 void helper_xssqrtqp(CPUPPCState *env, uint32_t opcode,
3434                      ppc_vsr_t *xt, ppc_vsr_t *xb)
3435 {
3436     ppc_vsr_t t = { };
3437     float_status tstat;
3438 
3439     helper_reset_fpstatus(env);
3440 
3441     tstat = env->fp_status;
3442     if (unlikely(Rc(opcode) != 0)) {
3443         tstat.float_rounding_mode = float_round_to_odd;
3444     }
3445 
3446     set_float_exception_flags(0, &tstat);
3447     t.f128 = float128_sqrt(xb->f128, &tstat);
3448     env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3449 
3450     if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
3451         float_invalid_op_sqrt(env, tstat.float_exception_flags, 1, GETPC());
3452     }
3453 
3454     helper_compute_fprf_float128(env, t.f128);
3455     *xt = t;
3456     do_float_check_status(env, true, GETPC());
3457 }
3458 
3459 void helper_xssubqp(CPUPPCState *env, uint32_t opcode,
3460                     ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb)
3461 {
3462     ppc_vsr_t t = *xt;
3463     float_status tstat;
3464 
3465     helper_reset_fpstatus(env);
3466 
3467     tstat = env->fp_status;
3468     if (unlikely(Rc(opcode) != 0)) {
3469         tstat.float_rounding_mode = float_round_to_odd;
3470     }
3471 
3472     set_float_exception_flags(0, &tstat);
3473     t.f128 = float128_sub(xa->f128, xb->f128, &tstat);
3474     env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3475 
3476     if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
3477         float_invalid_op_addsub(env, tstat.float_exception_flags, 1, GETPC());
3478     }
3479 
3480     helper_compute_fprf_float128(env, t.f128);
3481     *xt = t;
3482     do_float_check_status(env, true, GETPC());
3483 }
3484 
3485 static inline void vsxger_excp(CPUPPCState *env, uintptr_t retaddr)
3486 {
3487     /*
3488      * XV*GER instructions execute and set the FPSCR as if exceptions
3489      * are disabled and only at the end throw an exception
3490      */
3491     target_ulong enable;
3492     enable = env->fpscr & (FP_ENABLES | FP_FI | FP_FR);
3493     env->fpscr &= ~(FP_ENABLES | FP_FI | FP_FR);
3494     int status = get_float_exception_flags(&env->fp_status);
3495     if (unlikely(status & float_flag_invalid)) {
3496         if (status & float_flag_invalid_snan) {
3497             float_invalid_op_vxsnan(env, 0);
3498         }
3499         if (status & float_flag_invalid_imz) {
3500             float_invalid_op_vximz(env, false, 0);
3501         }
3502         if (status & float_flag_invalid_isi) {
3503             float_invalid_op_vxisi(env, false, 0);
3504         }
3505     }
3506     do_float_check_status(env, false, retaddr);
3507     env->fpscr |= enable;
3508     do_fpscr_check_status(env, retaddr);
3509 }
3510 
3511 typedef float64 extract_f16(float16, float_status *);
3512 
3513 static float64 extract_hf16(float16 in, float_status *fp_status)
3514 {
3515     return float16_to_float64(in, true, fp_status);
3516 }
3517 
3518 static float64 extract_bf16(bfloat16 in, float_status *fp_status)
3519 {
3520     return bfloat16_to_float64(in, fp_status);
3521 }
3522 
3523 static void vsxger16(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3524                      ppc_acc_t  *at, uint32_t mask, bool acc,
3525                      bool neg_mul, bool neg_acc, extract_f16 extract)
3526 {
3527     float32 r, aux_acc;
3528     float64 psum, va, vb, vc, vd;
3529     int i, j, xmsk_bit, ymsk_bit;
3530     uint8_t pmsk = FIELD_EX32(mask, GER_MSK, PMSK),
3531             xmsk = FIELD_EX32(mask, GER_MSK, XMSK),
3532             ymsk = FIELD_EX32(mask, GER_MSK, YMSK);
3533     float_status *excp_ptr = &env->fp_status;
3534     for (i = 0, xmsk_bit = 1 << 3; i < 4; i++, xmsk_bit >>= 1) {
3535         for (j = 0, ymsk_bit = 1 << 3; j < 4; j++, ymsk_bit >>= 1) {
3536             if ((xmsk_bit & xmsk) && (ymsk_bit & ymsk)) {
3537                 va = !(pmsk & 2) ? float64_zero :
3538                                    extract(a->VsrHF(2 * i), excp_ptr);
3539                 vb = !(pmsk & 2) ? float64_zero :
3540                                    extract(b->VsrHF(2 * j), excp_ptr);
3541                 vc = !(pmsk & 1) ? float64_zero :
3542                                    extract(a->VsrHF(2 * i + 1), excp_ptr);
3543                 vd = !(pmsk & 1) ? float64_zero :
3544                                    extract(b->VsrHF(2 * j + 1), excp_ptr);
3545                 psum = float64_mul(va, vb, excp_ptr);
3546                 psum = float64r32_muladd(vc, vd, psum, 0, excp_ptr);
3547                 r = float64_to_float32(psum, excp_ptr);
3548                 if (acc) {
3549                     aux_acc = at[i].VsrSF(j);
3550                     if (neg_mul) {
3551                         r = bfp32_neg(r);
3552                     }
3553                     if (neg_acc) {
3554                         aux_acc = bfp32_neg(aux_acc);
3555                     }
3556                     r = float32_add(r, aux_acc, excp_ptr);
3557                 }
3558                 at[i].VsrSF(j) = r;
3559             } else {
3560                 at[i].VsrSF(j) = float32_zero;
3561             }
3562         }
3563     }
3564     vsxger_excp(env, GETPC());
3565 }
3566 
3567 typedef void vsxger_zero(ppc_vsr_t *at, int, int);
3568 
3569 typedef void vsxger_muladd_f(ppc_vsr_t *, ppc_vsr_t *, ppc_vsr_t *, int, int,
3570                              int flags, float_status *s);
3571 
3572 static void vsxger_muladd32(ppc_vsr_t *at, ppc_vsr_t *a, ppc_vsr_t *b, int i,
3573                             int j, int flags, float_status *s)
3574 {
3575     at[i].VsrSF(j) = float32_muladd(a->VsrSF(i), b->VsrSF(j),
3576                                     at[i].VsrSF(j), flags, s);
3577 }
3578 
3579 static void vsxger_mul32(ppc_vsr_t *at, ppc_vsr_t *a, ppc_vsr_t *b, int i,
3580                          int j, int flags, float_status *s)
3581 {
3582     at[i].VsrSF(j) = float32_mul(a->VsrSF(i), b->VsrSF(j), s);
3583 }
3584 
3585 static void vsxger_zero32(ppc_vsr_t *at, int i, int j)
3586 {
3587     at[i].VsrSF(j) = float32_zero;
3588 }
3589 
3590 static void vsxger_muladd64(ppc_vsr_t *at, ppc_vsr_t *a, ppc_vsr_t *b, int i,
3591                             int j, int flags, float_status *s)
3592 {
3593     if (j >= 2) {
3594         j -= 2;
3595         at[i].VsrDF(j) = float64_muladd(a[i / 2].VsrDF(i % 2), b->VsrDF(j),
3596                                         at[i].VsrDF(j), flags, s);
3597     }
3598 }
3599 
3600 static void vsxger_mul64(ppc_vsr_t *at, ppc_vsr_t *a, ppc_vsr_t *b, int i,
3601                          int j, int flags, float_status *s)
3602 {
3603     if (j >= 2) {
3604         j -= 2;
3605         at[i].VsrDF(j) = float64_mul(a[i / 2].VsrDF(i % 2), b->VsrDF(j), s);
3606     }
3607 }
3608 
3609 static void vsxger_zero64(ppc_vsr_t *at, int i, int j)
3610 {
3611     if (j >= 2) {
3612         j -= 2;
3613         at[i].VsrDF(j) = float64_zero;
3614     }
3615 }
3616 
3617 static void vsxger(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3618                    ppc_acc_t  *at, uint32_t mask, bool acc, bool neg_mul,
3619                    bool neg_acc, vsxger_muladd_f mul, vsxger_muladd_f muladd,
3620                    vsxger_zero zero)
3621 {
3622     int i, j, xmsk_bit, ymsk_bit, op_flags;
3623     uint8_t xmsk = mask & 0x0F;
3624     uint8_t ymsk = (mask >> 4) & 0x0F;
3625     float_status *excp_ptr = &env->fp_status;
3626     op_flags = (neg_acc ^ neg_mul) ? float_muladd_negate_c : 0;
3627     op_flags |= (neg_mul) ? float_muladd_negate_result : 0;
3628     helper_reset_fpstatus(env);
3629     for (i = 0, xmsk_bit = 1 << 3; i < 4; i++, xmsk_bit >>= 1) {
3630         for (j = 0, ymsk_bit = 1 << 3; j < 4; j++, ymsk_bit >>= 1) {
3631             if ((xmsk_bit & xmsk) && (ymsk_bit & ymsk)) {
3632                 if (acc) {
3633                     muladd(at, a, b, i, j, op_flags, excp_ptr);
3634                 } else {
3635                     mul(at, a, b, i, j, op_flags, excp_ptr);
3636                 }
3637             } else {
3638                 zero(at, i, j);
3639             }
3640         }
3641     }
3642     vsxger_excp(env, GETPC());
3643 }
3644 
3645 QEMU_FLATTEN
3646 void helper_XVBF16GER2(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3647                        ppc_acc_t *at, uint32_t mask)
3648 {
3649     vsxger16(env, a, b, at, mask, false, false, false, extract_bf16);
3650 }
3651 
3652 QEMU_FLATTEN
3653 void helper_XVBF16GER2PP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3654                          ppc_acc_t *at, uint32_t mask)
3655 {
3656     vsxger16(env, a, b, at, mask, true, false, false, extract_bf16);
3657 }
3658 
3659 QEMU_FLATTEN
3660 void helper_XVBF16GER2PN(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3661                          ppc_acc_t *at, uint32_t mask)
3662 {
3663     vsxger16(env, a, b, at, mask, true, false, true, extract_bf16);
3664 }
3665 
3666 QEMU_FLATTEN
3667 void helper_XVBF16GER2NP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3668                          ppc_acc_t *at, uint32_t mask)
3669 {
3670     vsxger16(env, a, b, at, mask, true, true, false, extract_bf16);
3671 }
3672 
3673 QEMU_FLATTEN
3674 void helper_XVBF16GER2NN(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3675                          ppc_acc_t *at, uint32_t mask)
3676 {
3677     vsxger16(env, a, b, at, mask, true, true, true, extract_bf16);
3678 }
3679 
3680 QEMU_FLATTEN
3681 void helper_XVF16GER2(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3682                      ppc_acc_t *at, uint32_t mask)
3683 {
3684     vsxger16(env, a, b, at, mask, false, false, false, extract_hf16);
3685 }
3686 
3687 QEMU_FLATTEN
3688 void helper_XVF16GER2PP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3689                         ppc_acc_t *at, uint32_t mask)
3690 {
3691     vsxger16(env, a, b, at, mask, true, false, false, extract_hf16);
3692 }
3693 
3694 QEMU_FLATTEN
3695 void helper_XVF16GER2PN(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3696                         ppc_acc_t *at, uint32_t mask)
3697 {
3698     vsxger16(env, a, b, at, mask, true, false, true, extract_hf16);
3699 }
3700 
3701 QEMU_FLATTEN
3702 void helper_XVF16GER2NP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3703                         ppc_acc_t *at, uint32_t mask)
3704 {
3705     vsxger16(env, a, b, at, mask, true, true, false, extract_hf16);
3706 }
3707 
3708 QEMU_FLATTEN
3709 void helper_XVF16GER2NN(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3710                         ppc_acc_t *at, uint32_t mask)
3711 {
3712     vsxger16(env, a, b, at, mask, true, true, true, extract_hf16);
3713 }
3714 
3715 QEMU_FLATTEN
3716 void helper_XVF32GER(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3717                      ppc_acc_t *at, uint32_t mask)
3718 {
3719     vsxger(env, a, b, at, mask, false, false, false, vsxger_mul32,
3720            vsxger_muladd32, vsxger_zero32);
3721 }
3722 
3723 QEMU_FLATTEN
3724 void helper_XVF32GERPP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3725                        ppc_acc_t *at, uint32_t mask)
3726 {
3727     vsxger(env, a, b, at, mask, true, false, false, vsxger_mul32,
3728            vsxger_muladd32, vsxger_zero32);
3729 }
3730 
3731 QEMU_FLATTEN
3732 void helper_XVF32GERPN(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3733                        ppc_acc_t *at, uint32_t mask)
3734 {
3735     vsxger(env, a, b, at, mask, true, false, true, vsxger_mul32,
3736            vsxger_muladd32, vsxger_zero32);
3737 }
3738 
3739 QEMU_FLATTEN
3740 void helper_XVF32GERNP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3741                        ppc_acc_t *at, uint32_t mask)
3742 {
3743     vsxger(env, a, b, at, mask, true, true, false, vsxger_mul32,
3744            vsxger_muladd32, vsxger_zero32);
3745 }
3746 
3747 QEMU_FLATTEN
3748 void helper_XVF32GERNN(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3749                        ppc_acc_t *at, uint32_t mask)
3750 {
3751     vsxger(env, a, b, at, mask, true, true, true, vsxger_mul32,
3752            vsxger_muladd32, vsxger_zero32);
3753 }
3754 
3755 QEMU_FLATTEN
3756 void helper_XVF64GER(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3757                      ppc_acc_t *at, uint32_t mask)
3758 {
3759     vsxger(env, a, b, at, mask, false, false, false, vsxger_mul64,
3760            vsxger_muladd64, vsxger_zero64);
3761 }
3762 
3763 QEMU_FLATTEN
3764 void helper_XVF64GERPP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3765                        ppc_acc_t *at, uint32_t mask)
3766 {
3767     vsxger(env, a, b, at, mask, true, false, false, vsxger_mul64,
3768            vsxger_muladd64, vsxger_zero64);
3769 }
3770 
3771 QEMU_FLATTEN
3772 void helper_XVF64GERPN(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3773                        ppc_acc_t *at, uint32_t mask)
3774 {
3775     vsxger(env, a, b, at, mask, true, false, true, vsxger_mul64,
3776            vsxger_muladd64, vsxger_zero64);
3777 }
3778 
3779 QEMU_FLATTEN
3780 void helper_XVF64GERNP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3781                        ppc_acc_t *at, uint32_t mask)
3782 {
3783     vsxger(env, a, b, at, mask, true, true, false, vsxger_mul64,
3784            vsxger_muladd64, vsxger_zero64);
3785 }
3786 
3787 QEMU_FLATTEN
3788 void helper_XVF64GERNN(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
3789                        ppc_acc_t *at, uint32_t mask)
3790 {
3791     vsxger(env, a, b, at, mask, true, true, true, vsxger_mul64,
3792            vsxger_muladd64, vsxger_zero64);
3793 }
3794