xref: /openbmc/qemu/target/ppc/fpu_helper.c (revision 7ea6e0671754330510bcdde2e7f5b5f2db426472)
1 /*
2  *  PowerPC floating point and SPE emulation helpers for QEMU.
3  *
4  *  Copyright (c) 2003-2007 Jocelyn Mayer
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 #include "cpu.h"
21 #include "exec/helper-proto.h"
22 #include "exec/exec-all.h"
23 #include "internal.h"
24 
25 static inline float128 float128_snan_to_qnan(float128 x)
26 {
27     float128 r;
28 
29     r.high = x.high | 0x0000800000000000;
30     r.low = x.low;
31     return r;
32 }
33 
34 #define float64_snan_to_qnan(x) ((x) | 0x0008000000000000ULL)
35 #define float32_snan_to_qnan(x) ((x) | 0x00400000)
36 #define float16_snan_to_qnan(x) ((x) | 0x0200)
37 
38 /*****************************************************************************/
39 /* Floating point operations helpers */
40 uint64_t helper_float32_to_float64(CPUPPCState *env, uint32_t arg)
41 {
42     CPU_FloatU f;
43     CPU_DoubleU d;
44 
45     f.l = arg;
46     d.d = float32_to_float64(f.f, &env->fp_status);
47     return d.ll;
48 }
49 
50 uint32_t helper_float64_to_float32(CPUPPCState *env, uint64_t arg)
51 {
52     CPU_FloatU f;
53     CPU_DoubleU d;
54 
55     d.ll = arg;
56     f.f = float64_to_float32(d.d, &env->fp_status);
57     return f.l;
58 }
59 
60 static inline int ppc_float32_get_unbiased_exp(float32 f)
61 {
62     return ((f >> 23) & 0xFF) - 127;
63 }
64 
65 static inline int ppc_float64_get_unbiased_exp(float64 f)
66 {
67     return ((f >> 52) & 0x7FF) - 1023;
68 }
69 
70 #define COMPUTE_FPRF(tp)                                       \
71 void helper_compute_fprf_##tp(CPUPPCState *env, tp arg)        \
72 {                                                              \
73     int isneg;                                                 \
74     int fprf;                                                  \
75                                                                \
76     isneg = tp##_is_neg(arg);                                  \
77     if (unlikely(tp##_is_any_nan(arg))) {                      \
78         if (tp##_is_signaling_nan(arg, &env->fp_status)) {     \
79             /* Signaling NaN: flags are undefined */           \
80             fprf = 0x00;                                       \
81         } else {                                               \
82             /* Quiet NaN */                                    \
83             fprf = 0x11;                                       \
84         }                                                      \
85     } else if (unlikely(tp##_is_infinity(arg))) {              \
86         /* +/- infinity */                                     \
87         if (isneg) {                                           \
88             fprf = 0x09;                                       \
89         } else {                                               \
90             fprf = 0x05;                                       \
91         }                                                      \
92     } else {                                                   \
93         if (tp##_is_zero(arg)) {                               \
94             /* +/- zero */                                     \
95             if (isneg) {                                       \
96                 fprf = 0x12;                                   \
97             } else {                                           \
98                 fprf = 0x02;                                   \
99             }                                                  \
100         } else {                                               \
101             if (tp##_is_zero_or_denormal(arg)) {               \
102                 /* Denormalized numbers */                     \
103                 fprf = 0x10;                                   \
104             } else {                                           \
105                 /* Normalized numbers */                       \
106                 fprf = 0x00;                                   \
107             }                                                  \
108             if (isneg) {                                       \
109                 fprf |= 0x08;                                  \
110             } else {                                           \
111                 fprf |= 0x04;                                  \
112             }                                                  \
113         }                                                      \
114     }                                                          \
115     /* We update FPSCR_FPRF */                                 \
116     env->fpscr &= ~(0x1F << FPSCR_FPRF);                       \
117     env->fpscr |= fprf << FPSCR_FPRF;                          \
118 }
119 
120 COMPUTE_FPRF(float16)
121 COMPUTE_FPRF(float32)
122 COMPUTE_FPRF(float64)
123 COMPUTE_FPRF(float128)
124 
125 /* Floating-point invalid operations exception */
126 static inline __attribute__((__always_inline__))
127 uint64_t float_invalid_op_excp(CPUPPCState *env, int op, int set_fpcc)
128 {
129     CPUState *cs = CPU(ppc_env_get_cpu(env));
130     uint64_t ret = 0;
131     int ve;
132 
133     ve = fpscr_ve;
134     switch (op) {
135     case POWERPC_EXCP_FP_VXSNAN:
136         env->fpscr |= 1 << FPSCR_VXSNAN;
137         break;
138     case POWERPC_EXCP_FP_VXSOFT:
139         env->fpscr |= 1 << FPSCR_VXSOFT;
140         break;
141     case POWERPC_EXCP_FP_VXISI:
142         /* Magnitude subtraction of infinities */
143         env->fpscr |= 1 << FPSCR_VXISI;
144         goto update_arith;
145     case POWERPC_EXCP_FP_VXIDI:
146         /* Division of infinity by infinity */
147         env->fpscr |= 1 << FPSCR_VXIDI;
148         goto update_arith;
149     case POWERPC_EXCP_FP_VXZDZ:
150         /* Division of zero by zero */
151         env->fpscr |= 1 << FPSCR_VXZDZ;
152         goto update_arith;
153     case POWERPC_EXCP_FP_VXIMZ:
154         /* Multiplication of zero by infinity */
155         env->fpscr |= 1 << FPSCR_VXIMZ;
156         goto update_arith;
157     case POWERPC_EXCP_FP_VXVC:
158         /* Ordered comparison of NaN */
159         env->fpscr |= 1 << FPSCR_VXVC;
160         if (set_fpcc) {
161             env->fpscr &= ~(0xF << FPSCR_FPCC);
162             env->fpscr |= 0x11 << FPSCR_FPCC;
163         }
164         /* We must update the target FPR before raising the exception */
165         if (ve != 0) {
166             cs->exception_index = POWERPC_EXCP_PROGRAM;
167             env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
168             /* Update the floating-point enabled exception summary */
169             env->fpscr |= 1 << FPSCR_FEX;
170             /* Exception is differed */
171             ve = 0;
172         }
173         break;
174     case POWERPC_EXCP_FP_VXSQRT:
175         /* Square root of a negative number */
176         env->fpscr |= 1 << FPSCR_VXSQRT;
177     update_arith:
178         env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
179         if (ve == 0) {
180             /* Set the result to quiet NaN */
181             ret = 0x7FF8000000000000ULL;
182             if (set_fpcc) {
183                 env->fpscr &= ~(0xF << FPSCR_FPCC);
184                 env->fpscr |= 0x11 << FPSCR_FPCC;
185             }
186         }
187         break;
188     case POWERPC_EXCP_FP_VXCVI:
189         /* Invalid conversion */
190         env->fpscr |= 1 << FPSCR_VXCVI;
191         env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
192         if (ve == 0) {
193             /* Set the result to quiet NaN */
194             ret = 0x7FF8000000000000ULL;
195             if (set_fpcc) {
196                 env->fpscr &= ~(0xF << FPSCR_FPCC);
197                 env->fpscr |= 0x11 << FPSCR_FPCC;
198             }
199         }
200         break;
201     }
202     /* Update the floating-point invalid operation summary */
203     env->fpscr |= 1 << FPSCR_VX;
204     /* Update the floating-point exception summary */
205     env->fpscr |= FP_FX;
206     if (ve != 0) {
207         /* Update the floating-point enabled exception summary */
208         env->fpscr |= 1 << FPSCR_FEX;
209         if (msr_fe0 != 0 || msr_fe1 != 0) {
210             /* GETPC() works here because this is inline */
211             raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
212                                    POWERPC_EXCP_FP | op, GETPC());
213         }
214     }
215     return ret;
216 }
217 
218 static inline void float_zero_divide_excp(CPUPPCState *env, uintptr_t raddr)
219 {
220     env->fpscr |= 1 << FPSCR_ZX;
221     env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
222     /* Update the floating-point exception summary */
223     env->fpscr |= FP_FX;
224     if (fpscr_ze != 0) {
225         /* Update the floating-point enabled exception summary */
226         env->fpscr |= 1 << FPSCR_FEX;
227         if (msr_fe0 != 0 || msr_fe1 != 0) {
228             raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
229                                    POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX,
230                                    raddr);
231         }
232     }
233 }
234 
235 static inline void float_overflow_excp(CPUPPCState *env)
236 {
237     CPUState *cs = CPU(ppc_env_get_cpu(env));
238 
239     env->fpscr |= 1 << FPSCR_OX;
240     /* Update the floating-point exception summary */
241     env->fpscr |= FP_FX;
242     if (fpscr_oe != 0) {
243         /* XXX: should adjust the result */
244         /* Update the floating-point enabled exception summary */
245         env->fpscr |= 1 << FPSCR_FEX;
246         /* We must update the target FPR before raising the exception */
247         cs->exception_index = POWERPC_EXCP_PROGRAM;
248         env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
249     } else {
250         env->fpscr |= 1 << FPSCR_XX;
251         env->fpscr |= 1 << FPSCR_FI;
252     }
253 }
254 
255 static inline void float_underflow_excp(CPUPPCState *env)
256 {
257     CPUState *cs = CPU(ppc_env_get_cpu(env));
258 
259     env->fpscr |= 1 << FPSCR_UX;
260     /* Update the floating-point exception summary */
261     env->fpscr |= FP_FX;
262     if (fpscr_ue != 0) {
263         /* XXX: should adjust the result */
264         /* Update the floating-point enabled exception summary */
265         env->fpscr |= 1 << FPSCR_FEX;
266         /* We must update the target FPR before raising the exception */
267         cs->exception_index = POWERPC_EXCP_PROGRAM;
268         env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
269     }
270 }
271 
272 static inline void float_inexact_excp(CPUPPCState *env)
273 {
274     CPUState *cs = CPU(ppc_env_get_cpu(env));
275 
276     env->fpscr |= 1 << FPSCR_XX;
277     /* Update the floating-point exception summary */
278     env->fpscr |= FP_FX;
279     if (fpscr_xe != 0) {
280         /* Update the floating-point enabled exception summary */
281         env->fpscr |= 1 << FPSCR_FEX;
282         /* We must update the target FPR before raising the exception */
283         cs->exception_index = POWERPC_EXCP_PROGRAM;
284         env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
285     }
286 }
287 
288 static inline void fpscr_set_rounding_mode(CPUPPCState *env)
289 {
290     int rnd_type;
291 
292     /* Set rounding mode */
293     switch (fpscr_rn) {
294     case 0:
295         /* Best approximation (round to nearest) */
296         rnd_type = float_round_nearest_even;
297         break;
298     case 1:
299         /* Smaller magnitude (round toward zero) */
300         rnd_type = float_round_to_zero;
301         break;
302     case 2:
303         /* Round toward +infinite */
304         rnd_type = float_round_up;
305         break;
306     default:
307     case 3:
308         /* Round toward -infinite */
309         rnd_type = float_round_down;
310         break;
311     }
312     set_float_rounding_mode(rnd_type, &env->fp_status);
313 }
314 
315 void helper_fpscr_clrbit(CPUPPCState *env, uint32_t bit)
316 {
317     int prev;
318 
319     prev = (env->fpscr >> bit) & 1;
320     env->fpscr &= ~(1 << bit);
321     if (prev == 1) {
322         switch (bit) {
323         case FPSCR_RN1:
324         case FPSCR_RN:
325             fpscr_set_rounding_mode(env);
326             break;
327         default:
328             break;
329         }
330     }
331 }
332 
333 void helper_fpscr_setbit(CPUPPCState *env, uint32_t bit)
334 {
335     CPUState *cs = CPU(ppc_env_get_cpu(env));
336     int prev;
337 
338     prev = (env->fpscr >> bit) & 1;
339     env->fpscr |= 1 << bit;
340     if (prev == 0) {
341         switch (bit) {
342         case FPSCR_VX:
343             env->fpscr |= FP_FX;
344             if (fpscr_ve) {
345                 goto raise_ve;
346             }
347             break;
348         case FPSCR_OX:
349             env->fpscr |= FP_FX;
350             if (fpscr_oe) {
351                 goto raise_oe;
352             }
353             break;
354         case FPSCR_UX:
355             env->fpscr |= FP_FX;
356             if (fpscr_ue) {
357                 goto raise_ue;
358             }
359             break;
360         case FPSCR_ZX:
361             env->fpscr |= FP_FX;
362             if (fpscr_ze) {
363                 goto raise_ze;
364             }
365             break;
366         case FPSCR_XX:
367             env->fpscr |= FP_FX;
368             if (fpscr_xe) {
369                 goto raise_xe;
370             }
371             break;
372         case FPSCR_VXSNAN:
373         case FPSCR_VXISI:
374         case FPSCR_VXIDI:
375         case FPSCR_VXZDZ:
376         case FPSCR_VXIMZ:
377         case FPSCR_VXVC:
378         case FPSCR_VXSOFT:
379         case FPSCR_VXSQRT:
380         case FPSCR_VXCVI:
381             env->fpscr |= 1 << FPSCR_VX;
382             env->fpscr |= FP_FX;
383             if (fpscr_ve != 0) {
384                 goto raise_ve;
385             }
386             break;
387         case FPSCR_VE:
388             if (fpscr_vx != 0) {
389             raise_ve:
390                 env->error_code = POWERPC_EXCP_FP;
391                 if (fpscr_vxsnan) {
392                     env->error_code |= POWERPC_EXCP_FP_VXSNAN;
393                 }
394                 if (fpscr_vxisi) {
395                     env->error_code |= POWERPC_EXCP_FP_VXISI;
396                 }
397                 if (fpscr_vxidi) {
398                     env->error_code |= POWERPC_EXCP_FP_VXIDI;
399                 }
400                 if (fpscr_vxzdz) {
401                     env->error_code |= POWERPC_EXCP_FP_VXZDZ;
402                 }
403                 if (fpscr_vximz) {
404                     env->error_code |= POWERPC_EXCP_FP_VXIMZ;
405                 }
406                 if (fpscr_vxvc) {
407                     env->error_code |= POWERPC_EXCP_FP_VXVC;
408                 }
409                 if (fpscr_vxsoft) {
410                     env->error_code |= POWERPC_EXCP_FP_VXSOFT;
411                 }
412                 if (fpscr_vxsqrt) {
413                     env->error_code |= POWERPC_EXCP_FP_VXSQRT;
414                 }
415                 if (fpscr_vxcvi) {
416                     env->error_code |= POWERPC_EXCP_FP_VXCVI;
417                 }
418                 goto raise_excp;
419             }
420             break;
421         case FPSCR_OE:
422             if (fpscr_ox != 0) {
423             raise_oe:
424                 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
425                 goto raise_excp;
426             }
427             break;
428         case FPSCR_UE:
429             if (fpscr_ux != 0) {
430             raise_ue:
431                 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
432                 goto raise_excp;
433             }
434             break;
435         case FPSCR_ZE:
436             if (fpscr_zx != 0) {
437             raise_ze:
438                 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
439                 goto raise_excp;
440             }
441             break;
442         case FPSCR_XE:
443             if (fpscr_xx != 0) {
444             raise_xe:
445                 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
446                 goto raise_excp;
447             }
448             break;
449         case FPSCR_RN1:
450         case FPSCR_RN:
451             fpscr_set_rounding_mode(env);
452             break;
453         default:
454             break;
455         raise_excp:
456             /* Update the floating-point enabled exception summary */
457             env->fpscr |= 1 << FPSCR_FEX;
458             /* We have to update Rc1 before raising the exception */
459             cs->exception_index = POWERPC_EXCP_PROGRAM;
460             break;
461         }
462     }
463 }
464 
465 void helper_store_fpscr(CPUPPCState *env, uint64_t arg, uint32_t mask)
466 {
467     CPUState *cs = CPU(ppc_env_get_cpu(env));
468     target_ulong prev, new;
469     int i;
470 
471     prev = env->fpscr;
472     new = (target_ulong)arg;
473     new &= ~0x60000000LL;
474     new |= prev & 0x60000000LL;
475     for (i = 0; i < sizeof(target_ulong) * 2; i++) {
476         if (mask & (1 << i)) {
477             env->fpscr &= ~(0xFLL << (4 * i));
478             env->fpscr |= new & (0xFLL << (4 * i));
479         }
480     }
481     /* Update VX and FEX */
482     if (fpscr_ix != 0) {
483         env->fpscr |= 1 << FPSCR_VX;
484     } else {
485         env->fpscr &= ~(1 << FPSCR_VX);
486     }
487     if ((fpscr_ex & fpscr_eex) != 0) {
488         env->fpscr |= 1 << FPSCR_FEX;
489         cs->exception_index = POWERPC_EXCP_PROGRAM;
490         /* XXX: we should compute it properly */
491         env->error_code = POWERPC_EXCP_FP;
492     } else {
493         env->fpscr &= ~(1 << FPSCR_FEX);
494     }
495     fpscr_set_rounding_mode(env);
496 }
497 
498 void store_fpscr(CPUPPCState *env, uint64_t arg, uint32_t mask)
499 {
500     helper_store_fpscr(env, arg, mask);
501 }
502 
503 static void do_float_check_status(CPUPPCState *env, uintptr_t raddr)
504 {
505     CPUState *cs = CPU(ppc_env_get_cpu(env));
506     int status = get_float_exception_flags(&env->fp_status);
507 
508     if (status & float_flag_divbyzero) {
509         float_zero_divide_excp(env, raddr);
510     } else if (status & float_flag_overflow) {
511         float_overflow_excp(env);
512     } else if (status & float_flag_underflow) {
513         float_underflow_excp(env);
514     } else if (status & float_flag_inexact) {
515         float_inexact_excp(env);
516     }
517 
518     if (cs->exception_index == POWERPC_EXCP_PROGRAM &&
519         (env->error_code & POWERPC_EXCP_FP)) {
520         /* Differred floating-point exception after target FPR update */
521         if (msr_fe0 != 0 || msr_fe1 != 0) {
522             raise_exception_err_ra(env, cs->exception_index,
523                                    env->error_code, raddr);
524         }
525     }
526 }
527 
528 static inline  __attribute__((__always_inline__))
529 void float_check_status(CPUPPCState *env)
530 {
531     /* GETPC() works here because this is inline */
532     do_float_check_status(env, GETPC());
533 }
534 
535 void helper_float_check_status(CPUPPCState *env)
536 {
537     do_float_check_status(env, GETPC());
538 }
539 
540 void helper_reset_fpstatus(CPUPPCState *env)
541 {
542     set_float_exception_flags(0, &env->fp_status);
543 }
544 
545 /* fadd - fadd. */
546 uint64_t helper_fadd(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
547 {
548     CPU_DoubleU farg1, farg2;
549 
550     farg1.ll = arg1;
551     farg2.ll = arg2;
552 
553     if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
554                  float64_is_neg(farg1.d) != float64_is_neg(farg2.d))) {
555         /* Magnitude subtraction of infinities */
556         farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
557     } else {
558         if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
559                      float64_is_signaling_nan(farg2.d, &env->fp_status))) {
560             /* sNaN addition */
561             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
562         }
563         farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
564     }
565 
566     return farg1.ll;
567 }
568 
569 /* fsub - fsub. */
570 uint64_t helper_fsub(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
571 {
572     CPU_DoubleU farg1, farg2;
573 
574     farg1.ll = arg1;
575     farg2.ll = arg2;
576 
577     if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
578                  float64_is_neg(farg1.d) == float64_is_neg(farg2.d))) {
579         /* Magnitude subtraction of infinities */
580         farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
581     } else {
582         if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
583                      float64_is_signaling_nan(farg2.d, &env->fp_status))) {
584             /* sNaN subtraction */
585             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
586         }
587         farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
588     }
589 
590     return farg1.ll;
591 }
592 
593 /* fmul - fmul. */
594 uint64_t helper_fmul(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
595 {
596     CPU_DoubleU farg1, farg2;
597 
598     farg1.ll = arg1;
599     farg2.ll = arg2;
600 
601     if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
602                  (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
603         /* Multiplication of zero by infinity */
604         farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
605     } else {
606         if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
607                      float64_is_signaling_nan(farg2.d, &env->fp_status))) {
608             /* sNaN multiplication */
609             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
610         }
611         farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
612     }
613 
614     return farg1.ll;
615 }
616 
617 /* fdiv - fdiv. */
618 uint64_t helper_fdiv(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
619 {
620     CPU_DoubleU farg1, farg2;
621 
622     farg1.ll = arg1;
623     farg2.ll = arg2;
624 
625     if (unlikely(float64_is_infinity(farg1.d) &&
626                  float64_is_infinity(farg2.d))) {
627         /* Division of infinity by infinity */
628         farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIDI, 1);
629     } else if (unlikely(float64_is_zero(farg1.d) && float64_is_zero(farg2.d))) {
630         /* Division of zero by zero */
631         farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXZDZ, 1);
632     } else {
633         if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
634                      float64_is_signaling_nan(farg2.d, &env->fp_status))) {
635             /* sNaN division */
636             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
637         }
638         farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
639     }
640 
641     return farg1.ll;
642 }
643 
644 
645 #define FPU_FCTI(op, cvt, nanval)                                      \
646 uint64_t helper_##op(CPUPPCState *env, uint64_t arg)                   \
647 {                                                                      \
648     CPU_DoubleU farg;                                                  \
649                                                                        \
650     farg.ll = arg;                                                     \
651     farg.ll = float64_to_##cvt(farg.d, &env->fp_status);               \
652                                                                        \
653     if (unlikely(env->fp_status.float_exception_flags)) {              \
654         if (float64_is_any_nan(arg)) {                                 \
655             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 1);      \
656             if (float64_is_signaling_nan(arg, &env->fp_status)) {      \
657                 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); \
658             }                                                          \
659             farg.ll = nanval;                                          \
660         } else if (env->fp_status.float_exception_flags &              \
661                    float_flag_invalid) {                               \
662             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 1);      \
663         }                                                              \
664         float_check_status(env);                                       \
665     }                                                                  \
666     return farg.ll;                                                    \
667  }
668 
669 FPU_FCTI(fctiw, int32, 0x80000000U)
670 FPU_FCTI(fctiwz, int32_round_to_zero, 0x80000000U)
671 FPU_FCTI(fctiwu, uint32, 0x00000000U)
672 FPU_FCTI(fctiwuz, uint32_round_to_zero, 0x00000000U)
673 FPU_FCTI(fctid, int64, 0x8000000000000000ULL)
674 FPU_FCTI(fctidz, int64_round_to_zero, 0x8000000000000000ULL)
675 FPU_FCTI(fctidu, uint64, 0x0000000000000000ULL)
676 FPU_FCTI(fctiduz, uint64_round_to_zero, 0x0000000000000000ULL)
677 
678 #define FPU_FCFI(op, cvtr, is_single)                      \
679 uint64_t helper_##op(CPUPPCState *env, uint64_t arg)       \
680 {                                                          \
681     CPU_DoubleU farg;                                      \
682                                                            \
683     if (is_single) {                                       \
684         float32 tmp = cvtr(arg, &env->fp_status);          \
685         farg.d = float32_to_float64(tmp, &env->fp_status); \
686     } else {                                               \
687         farg.d = cvtr(arg, &env->fp_status);               \
688     }                                                      \
689     float_check_status(env);                               \
690     return farg.ll;                                        \
691 }
692 
693 FPU_FCFI(fcfid, int64_to_float64, 0)
694 FPU_FCFI(fcfids, int64_to_float32, 1)
695 FPU_FCFI(fcfidu, uint64_to_float64, 0)
696 FPU_FCFI(fcfidus, uint64_to_float32, 1)
697 
698 static inline uint64_t do_fri(CPUPPCState *env, uint64_t arg,
699                               int rounding_mode)
700 {
701     CPU_DoubleU farg;
702 
703     farg.ll = arg;
704 
705     if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
706         /* sNaN round */
707         float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
708         farg.ll = arg | 0x0008000000000000ULL;
709     } else {
710         int inexact = get_float_exception_flags(&env->fp_status) &
711                       float_flag_inexact;
712         set_float_rounding_mode(rounding_mode, &env->fp_status);
713         farg.ll = float64_round_to_int(farg.d, &env->fp_status);
714         /* Restore rounding mode from FPSCR */
715         fpscr_set_rounding_mode(env);
716 
717         /* fri* does not set FPSCR[XX] */
718         if (!inexact) {
719             env->fp_status.float_exception_flags &= ~float_flag_inexact;
720         }
721     }
722     float_check_status(env);
723     return farg.ll;
724 }
725 
726 uint64_t helper_frin(CPUPPCState *env, uint64_t arg)
727 {
728     return do_fri(env, arg, float_round_ties_away);
729 }
730 
731 uint64_t helper_friz(CPUPPCState *env, uint64_t arg)
732 {
733     return do_fri(env, arg, float_round_to_zero);
734 }
735 
736 uint64_t helper_frip(CPUPPCState *env, uint64_t arg)
737 {
738     return do_fri(env, arg, float_round_up);
739 }
740 
741 uint64_t helper_frim(CPUPPCState *env, uint64_t arg)
742 {
743     return do_fri(env, arg, float_round_down);
744 }
745 
746 static void float64_maddsub_update_excp(CPUPPCState *env, float64 arg1,
747                                         float64 arg2, float64 arg3,
748                                         unsigned int madd_flags)
749 {
750     if (unlikely((float64_is_infinity(arg1) && float64_is_zero(arg2)) ||
751                  (float64_is_zero(arg1) && float64_is_infinity(arg2)))) {
752         /* Multiplication of zero by infinity */
753         arg1 = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
754     } else if (unlikely(float64_is_signaling_nan(arg1, &env->fp_status) ||
755                         float64_is_signaling_nan(arg2, &env->fp_status) ||
756                         float64_is_signaling_nan(arg3, &env->fp_status))) {
757         /* sNaN operation */
758         float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
759     } else if ((float64_is_infinity(arg1) || float64_is_infinity(arg2)) &&
760                float64_is_infinity(arg3)) {
761         uint8_t aSign, bSign, cSign;
762 
763         aSign = float64_is_neg(arg1);
764         bSign = float64_is_neg(arg2);
765         cSign = float64_is_neg(arg3);
766         if (madd_flags & float_muladd_negate_c) {
767             cSign ^= 1;
768         }
769         if (aSign ^ bSign ^ cSign) {
770             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
771         }
772     }
773 }
774 
775 #define FPU_FMADD(op, madd_flags)                                       \
776 uint64_t helper_##op(CPUPPCState *env, uint64_t arg1,                   \
777                      uint64_t arg2, uint64_t arg3)                      \
778 {                                                                       \
779     uint32_t flags;                                                     \
780     float64 ret = float64_muladd(arg1, arg2, arg3, madd_flags,          \
781                                  &env->fp_status);                      \
782     flags = get_float_exception_flags(&env->fp_status);                 \
783     if (flags) {                                                        \
784         if (flags & float_flag_invalid) {                               \
785             float64_maddsub_update_excp(env, arg1, arg2, arg3,          \
786                                         madd_flags);                    \
787         }                                                               \
788         float_check_status(env);                                        \
789     }                                                                   \
790     return ret;                                                         \
791 }
792 
793 #define MADD_FLGS 0
794 #define MSUB_FLGS float_muladd_negate_c
795 #define NMADD_FLGS float_muladd_negate_result
796 #define NMSUB_FLGS (float_muladd_negate_c | float_muladd_negate_result)
797 
798 FPU_FMADD(fmadd, MADD_FLGS)
799 FPU_FMADD(fnmadd, NMADD_FLGS)
800 FPU_FMADD(fmsub, MSUB_FLGS)
801 FPU_FMADD(fnmsub, NMSUB_FLGS)
802 
803 /* frsp - frsp. */
804 uint64_t helper_frsp(CPUPPCState *env, uint64_t arg)
805 {
806     CPU_DoubleU farg;
807     float32 f32;
808 
809     farg.ll = arg;
810 
811     if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
812         /* sNaN square root */
813         float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
814     }
815     f32 = float64_to_float32(farg.d, &env->fp_status);
816     farg.d = float32_to_float64(f32, &env->fp_status);
817 
818     return farg.ll;
819 }
820 
821 /* fsqrt - fsqrt. */
822 uint64_t helper_fsqrt(CPUPPCState *env, uint64_t arg)
823 {
824     CPU_DoubleU farg;
825 
826     farg.ll = arg;
827 
828     if (unlikely(float64_is_any_nan(farg.d))) {
829         if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
830             /* sNaN reciprocal square root */
831             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
832             farg.ll = float64_snan_to_qnan(farg.ll);
833         }
834     } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
835         /* Square root of a negative nonzero number */
836         farg.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, 1);
837     } else {
838         farg.d = float64_sqrt(farg.d, &env->fp_status);
839     }
840     return farg.ll;
841 }
842 
843 /* fre - fre. */
844 uint64_t helper_fre(CPUPPCState *env, uint64_t arg)
845 {
846     CPU_DoubleU farg;
847 
848     farg.ll = arg;
849 
850     if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
851         /* sNaN reciprocal */
852         float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
853     }
854     farg.d = float64_div(float64_one, farg.d, &env->fp_status);
855     return farg.d;
856 }
857 
858 /* fres - fres. */
859 uint64_t helper_fres(CPUPPCState *env, uint64_t arg)
860 {
861     CPU_DoubleU farg;
862     float32 f32;
863 
864     farg.ll = arg;
865 
866     if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
867         /* sNaN reciprocal */
868         float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
869     }
870     farg.d = float64_div(float64_one, farg.d, &env->fp_status);
871     f32 = float64_to_float32(farg.d, &env->fp_status);
872     farg.d = float32_to_float64(f32, &env->fp_status);
873 
874     return farg.ll;
875 }
876 
877 /* frsqrte  - frsqrte. */
878 uint64_t helper_frsqrte(CPUPPCState *env, uint64_t arg)
879 {
880     CPU_DoubleU farg;
881 
882     farg.ll = arg;
883 
884     if (unlikely(float64_is_any_nan(farg.d))) {
885         if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
886             /* sNaN reciprocal square root */
887             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
888             farg.ll = float64_snan_to_qnan(farg.ll);
889         }
890     } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
891         /* Reciprocal square root of a negative nonzero number */
892         farg.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, 1);
893     } else {
894         farg.d = float64_sqrt(farg.d, &env->fp_status);
895         farg.d = float64_div(float64_one, farg.d, &env->fp_status);
896     }
897 
898     return farg.ll;
899 }
900 
901 /* fsel - fsel. */
902 uint64_t helper_fsel(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
903                      uint64_t arg3)
904 {
905     CPU_DoubleU farg1;
906 
907     farg1.ll = arg1;
908 
909     if ((!float64_is_neg(farg1.d) || float64_is_zero(farg1.d)) &&
910         !float64_is_any_nan(farg1.d)) {
911         return arg2;
912     } else {
913         return arg3;
914     }
915 }
916 
917 uint32_t helper_ftdiv(uint64_t fra, uint64_t frb)
918 {
919     int fe_flag = 0;
920     int fg_flag = 0;
921 
922     if (unlikely(float64_is_infinity(fra) ||
923                  float64_is_infinity(frb) ||
924                  float64_is_zero(frb))) {
925         fe_flag = 1;
926         fg_flag = 1;
927     } else {
928         int e_a = ppc_float64_get_unbiased_exp(fra);
929         int e_b = ppc_float64_get_unbiased_exp(frb);
930 
931         if (unlikely(float64_is_any_nan(fra) ||
932                      float64_is_any_nan(frb))) {
933             fe_flag = 1;
934         } else if ((e_b <= -1022) || (e_b >= 1021)) {
935             fe_flag = 1;
936         } else if (!float64_is_zero(fra) &&
937                    (((e_a - e_b) >= 1023) ||
938                     ((e_a - e_b) <= -1021) ||
939                     (e_a <= -970))) {
940             fe_flag = 1;
941         }
942 
943         if (unlikely(float64_is_zero_or_denormal(frb))) {
944             /* XB is not zero because of the above check and */
945             /* so must be denormalized.                      */
946             fg_flag = 1;
947         }
948     }
949 
950     return 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0);
951 }
952 
953 uint32_t helper_ftsqrt(uint64_t frb)
954 {
955     int fe_flag = 0;
956     int fg_flag = 0;
957 
958     if (unlikely(float64_is_infinity(frb) || float64_is_zero(frb))) {
959         fe_flag = 1;
960         fg_flag = 1;
961     } else {
962         int e_b = ppc_float64_get_unbiased_exp(frb);
963 
964         if (unlikely(float64_is_any_nan(frb))) {
965             fe_flag = 1;
966         } else if (unlikely(float64_is_zero(frb))) {
967             fe_flag = 1;
968         } else if (unlikely(float64_is_neg(frb))) {
969             fe_flag = 1;
970         } else if (!float64_is_zero(frb) && (e_b <= (-1022+52))) {
971             fe_flag = 1;
972         }
973 
974         if (unlikely(float64_is_zero_or_denormal(frb))) {
975             /* XB is not zero because of the above check and */
976             /* therefore must be denormalized.               */
977             fg_flag = 1;
978         }
979     }
980 
981     return 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0);
982 }
983 
984 void helper_fcmpu(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
985                   uint32_t crfD)
986 {
987     CPU_DoubleU farg1, farg2;
988     uint32_t ret = 0;
989 
990     farg1.ll = arg1;
991     farg2.ll = arg2;
992 
993     if (unlikely(float64_is_any_nan(farg1.d) ||
994                  float64_is_any_nan(farg2.d))) {
995         ret = 0x01UL;
996     } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
997         ret = 0x08UL;
998     } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
999         ret = 0x04UL;
1000     } else {
1001         ret = 0x02UL;
1002     }
1003 
1004     env->fpscr &= ~(0x0F << FPSCR_FPRF);
1005     env->fpscr |= ret << FPSCR_FPRF;
1006     env->crf[crfD] = ret;
1007     if (unlikely(ret == 0x01UL
1008                  && (float64_is_signaling_nan(farg1.d, &env->fp_status) ||
1009                      float64_is_signaling_nan(farg2.d, &env->fp_status)))) {
1010         /* sNaN comparison */
1011         float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
1012     }
1013 }
1014 
1015 void helper_fcmpo(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
1016                   uint32_t crfD)
1017 {
1018     CPU_DoubleU farg1, farg2;
1019     uint32_t ret = 0;
1020 
1021     farg1.ll = arg1;
1022     farg2.ll = arg2;
1023 
1024     if (unlikely(float64_is_any_nan(farg1.d) ||
1025                  float64_is_any_nan(farg2.d))) {
1026         ret = 0x01UL;
1027     } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1028         ret = 0x08UL;
1029     } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1030         ret = 0x04UL;
1031     } else {
1032         ret = 0x02UL;
1033     }
1034 
1035     env->fpscr &= ~(0x0F << FPSCR_FPRF);
1036     env->fpscr |= ret << FPSCR_FPRF;
1037     env->crf[crfD] = ret;
1038     if (unlikely(ret == 0x01UL)) {
1039         if (float64_is_signaling_nan(farg1.d, &env->fp_status) ||
1040             float64_is_signaling_nan(farg2.d, &env->fp_status)) {
1041             /* sNaN comparison */
1042             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN |
1043                                   POWERPC_EXCP_FP_VXVC, 1);
1044         } else {
1045             /* qNaN comparison */
1046             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 1);
1047         }
1048     }
1049 }
1050 
1051 /* Single-precision floating-point conversions */
1052 static inline uint32_t efscfsi(CPUPPCState *env, uint32_t val)
1053 {
1054     CPU_FloatU u;
1055 
1056     u.f = int32_to_float32(val, &env->vec_status);
1057 
1058     return u.l;
1059 }
1060 
1061 static inline uint32_t efscfui(CPUPPCState *env, uint32_t val)
1062 {
1063     CPU_FloatU u;
1064 
1065     u.f = uint32_to_float32(val, &env->vec_status);
1066 
1067     return u.l;
1068 }
1069 
1070 static inline int32_t efsctsi(CPUPPCState *env, uint32_t val)
1071 {
1072     CPU_FloatU u;
1073 
1074     u.l = val;
1075     /* NaN are not treated the same way IEEE 754 does */
1076     if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1077         return 0;
1078     }
1079 
1080     return float32_to_int32(u.f, &env->vec_status);
1081 }
1082 
1083 static inline uint32_t efsctui(CPUPPCState *env, uint32_t val)
1084 {
1085     CPU_FloatU u;
1086 
1087     u.l = val;
1088     /* NaN are not treated the same way IEEE 754 does */
1089     if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1090         return 0;
1091     }
1092 
1093     return float32_to_uint32(u.f, &env->vec_status);
1094 }
1095 
1096 static inline uint32_t efsctsiz(CPUPPCState *env, uint32_t val)
1097 {
1098     CPU_FloatU u;
1099 
1100     u.l = val;
1101     /* NaN are not treated the same way IEEE 754 does */
1102     if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1103         return 0;
1104     }
1105 
1106     return float32_to_int32_round_to_zero(u.f, &env->vec_status);
1107 }
1108 
1109 static inline uint32_t efsctuiz(CPUPPCState *env, uint32_t val)
1110 {
1111     CPU_FloatU u;
1112 
1113     u.l = val;
1114     /* NaN are not treated the same way IEEE 754 does */
1115     if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1116         return 0;
1117     }
1118 
1119     return float32_to_uint32_round_to_zero(u.f, &env->vec_status);
1120 }
1121 
1122 static inline uint32_t efscfsf(CPUPPCState *env, uint32_t val)
1123 {
1124     CPU_FloatU u;
1125     float32 tmp;
1126 
1127     u.f = int32_to_float32(val, &env->vec_status);
1128     tmp = int64_to_float32(1ULL << 32, &env->vec_status);
1129     u.f = float32_div(u.f, tmp, &env->vec_status);
1130 
1131     return u.l;
1132 }
1133 
1134 static inline uint32_t efscfuf(CPUPPCState *env, uint32_t val)
1135 {
1136     CPU_FloatU u;
1137     float32 tmp;
1138 
1139     u.f = uint32_to_float32(val, &env->vec_status);
1140     tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1141     u.f = float32_div(u.f, tmp, &env->vec_status);
1142 
1143     return u.l;
1144 }
1145 
1146 static inline uint32_t efsctsf(CPUPPCState *env, uint32_t val)
1147 {
1148     CPU_FloatU u;
1149     float32 tmp;
1150 
1151     u.l = val;
1152     /* NaN are not treated the same way IEEE 754 does */
1153     if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1154         return 0;
1155     }
1156     tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1157     u.f = float32_mul(u.f, tmp, &env->vec_status);
1158 
1159     return float32_to_int32(u.f, &env->vec_status);
1160 }
1161 
1162 static inline uint32_t efsctuf(CPUPPCState *env, uint32_t val)
1163 {
1164     CPU_FloatU u;
1165     float32 tmp;
1166 
1167     u.l = val;
1168     /* NaN are not treated the same way IEEE 754 does */
1169     if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1170         return 0;
1171     }
1172     tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1173     u.f = float32_mul(u.f, tmp, &env->vec_status);
1174 
1175     return float32_to_uint32(u.f, &env->vec_status);
1176 }
1177 
1178 #define HELPER_SPE_SINGLE_CONV(name)                              \
1179     uint32_t helper_e##name(CPUPPCState *env, uint32_t val)       \
1180     {                                                             \
1181         return e##name(env, val);                                 \
1182     }
1183 /* efscfsi */
1184 HELPER_SPE_SINGLE_CONV(fscfsi);
1185 /* efscfui */
1186 HELPER_SPE_SINGLE_CONV(fscfui);
1187 /* efscfuf */
1188 HELPER_SPE_SINGLE_CONV(fscfuf);
1189 /* efscfsf */
1190 HELPER_SPE_SINGLE_CONV(fscfsf);
1191 /* efsctsi */
1192 HELPER_SPE_SINGLE_CONV(fsctsi);
1193 /* efsctui */
1194 HELPER_SPE_SINGLE_CONV(fsctui);
1195 /* efsctsiz */
1196 HELPER_SPE_SINGLE_CONV(fsctsiz);
1197 /* efsctuiz */
1198 HELPER_SPE_SINGLE_CONV(fsctuiz);
1199 /* efsctsf */
1200 HELPER_SPE_SINGLE_CONV(fsctsf);
1201 /* efsctuf */
1202 HELPER_SPE_SINGLE_CONV(fsctuf);
1203 
1204 #define HELPER_SPE_VECTOR_CONV(name)                            \
1205     uint64_t helper_ev##name(CPUPPCState *env, uint64_t val)    \
1206     {                                                           \
1207         return ((uint64_t)e##name(env, val >> 32) << 32) |      \
1208             (uint64_t)e##name(env, val);                        \
1209     }
1210 /* evfscfsi */
1211 HELPER_SPE_VECTOR_CONV(fscfsi);
1212 /* evfscfui */
1213 HELPER_SPE_VECTOR_CONV(fscfui);
1214 /* evfscfuf */
1215 HELPER_SPE_VECTOR_CONV(fscfuf);
1216 /* evfscfsf */
1217 HELPER_SPE_VECTOR_CONV(fscfsf);
1218 /* evfsctsi */
1219 HELPER_SPE_VECTOR_CONV(fsctsi);
1220 /* evfsctui */
1221 HELPER_SPE_VECTOR_CONV(fsctui);
1222 /* evfsctsiz */
1223 HELPER_SPE_VECTOR_CONV(fsctsiz);
1224 /* evfsctuiz */
1225 HELPER_SPE_VECTOR_CONV(fsctuiz);
1226 /* evfsctsf */
1227 HELPER_SPE_VECTOR_CONV(fsctsf);
1228 /* evfsctuf */
1229 HELPER_SPE_VECTOR_CONV(fsctuf);
1230 
1231 /* Single-precision floating-point arithmetic */
1232 static inline uint32_t efsadd(CPUPPCState *env, uint32_t op1, uint32_t op2)
1233 {
1234     CPU_FloatU u1, u2;
1235 
1236     u1.l = op1;
1237     u2.l = op2;
1238     u1.f = float32_add(u1.f, u2.f, &env->vec_status);
1239     return u1.l;
1240 }
1241 
1242 static inline uint32_t efssub(CPUPPCState *env, uint32_t op1, uint32_t op2)
1243 {
1244     CPU_FloatU u1, u2;
1245 
1246     u1.l = op1;
1247     u2.l = op2;
1248     u1.f = float32_sub(u1.f, u2.f, &env->vec_status);
1249     return u1.l;
1250 }
1251 
1252 static inline uint32_t efsmul(CPUPPCState *env, uint32_t op1, uint32_t op2)
1253 {
1254     CPU_FloatU u1, u2;
1255 
1256     u1.l = op1;
1257     u2.l = op2;
1258     u1.f = float32_mul(u1.f, u2.f, &env->vec_status);
1259     return u1.l;
1260 }
1261 
1262 static inline uint32_t efsdiv(CPUPPCState *env, uint32_t op1, uint32_t op2)
1263 {
1264     CPU_FloatU u1, u2;
1265 
1266     u1.l = op1;
1267     u2.l = op2;
1268     u1.f = float32_div(u1.f, u2.f, &env->vec_status);
1269     return u1.l;
1270 }
1271 
1272 #define HELPER_SPE_SINGLE_ARITH(name)                                   \
1273     uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1274     {                                                                   \
1275         return e##name(env, op1, op2);                                  \
1276     }
1277 /* efsadd */
1278 HELPER_SPE_SINGLE_ARITH(fsadd);
1279 /* efssub */
1280 HELPER_SPE_SINGLE_ARITH(fssub);
1281 /* efsmul */
1282 HELPER_SPE_SINGLE_ARITH(fsmul);
1283 /* efsdiv */
1284 HELPER_SPE_SINGLE_ARITH(fsdiv);
1285 
1286 #define HELPER_SPE_VECTOR_ARITH(name)                                   \
1287     uint64_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1288     {                                                                   \
1289         return ((uint64_t)e##name(env, op1 >> 32, op2 >> 32) << 32) |   \
1290             (uint64_t)e##name(env, op1, op2);                           \
1291     }
1292 /* evfsadd */
1293 HELPER_SPE_VECTOR_ARITH(fsadd);
1294 /* evfssub */
1295 HELPER_SPE_VECTOR_ARITH(fssub);
1296 /* evfsmul */
1297 HELPER_SPE_VECTOR_ARITH(fsmul);
1298 /* evfsdiv */
1299 HELPER_SPE_VECTOR_ARITH(fsdiv);
1300 
1301 /* Single-precision floating-point comparisons */
1302 static inline uint32_t efscmplt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1303 {
1304     CPU_FloatU u1, u2;
1305 
1306     u1.l = op1;
1307     u2.l = op2;
1308     return float32_lt(u1.f, u2.f, &env->vec_status) ? 4 : 0;
1309 }
1310 
1311 static inline uint32_t efscmpgt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1312 {
1313     CPU_FloatU u1, u2;
1314 
1315     u1.l = op1;
1316     u2.l = op2;
1317     return float32_le(u1.f, u2.f, &env->vec_status) ? 0 : 4;
1318 }
1319 
1320 static inline uint32_t efscmpeq(CPUPPCState *env, uint32_t op1, uint32_t op2)
1321 {
1322     CPU_FloatU u1, u2;
1323 
1324     u1.l = op1;
1325     u2.l = op2;
1326     return float32_eq(u1.f, u2.f, &env->vec_status) ? 4 : 0;
1327 }
1328 
1329 static inline uint32_t efststlt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1330 {
1331     /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1332     return efscmplt(env, op1, op2);
1333 }
1334 
1335 static inline uint32_t efststgt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1336 {
1337     /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1338     return efscmpgt(env, op1, op2);
1339 }
1340 
1341 static inline uint32_t efststeq(CPUPPCState *env, uint32_t op1, uint32_t op2)
1342 {
1343     /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1344     return efscmpeq(env, op1, op2);
1345 }
1346 
1347 #define HELPER_SINGLE_SPE_CMP(name)                                     \
1348     uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1349     {                                                                   \
1350         return e##name(env, op1, op2);                                  \
1351     }
1352 /* efststlt */
1353 HELPER_SINGLE_SPE_CMP(fststlt);
1354 /* efststgt */
1355 HELPER_SINGLE_SPE_CMP(fststgt);
1356 /* efststeq */
1357 HELPER_SINGLE_SPE_CMP(fststeq);
1358 /* efscmplt */
1359 HELPER_SINGLE_SPE_CMP(fscmplt);
1360 /* efscmpgt */
1361 HELPER_SINGLE_SPE_CMP(fscmpgt);
1362 /* efscmpeq */
1363 HELPER_SINGLE_SPE_CMP(fscmpeq);
1364 
1365 static inline uint32_t evcmp_merge(int t0, int t1)
1366 {
1367     return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
1368 }
1369 
1370 #define HELPER_VECTOR_SPE_CMP(name)                                     \
1371     uint32_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1372     {                                                                   \
1373         return evcmp_merge(e##name(env, op1 >> 32, op2 >> 32),          \
1374                            e##name(env, op1, op2));                     \
1375     }
1376 /* evfststlt */
1377 HELPER_VECTOR_SPE_CMP(fststlt);
1378 /* evfststgt */
1379 HELPER_VECTOR_SPE_CMP(fststgt);
1380 /* evfststeq */
1381 HELPER_VECTOR_SPE_CMP(fststeq);
1382 /* evfscmplt */
1383 HELPER_VECTOR_SPE_CMP(fscmplt);
1384 /* evfscmpgt */
1385 HELPER_VECTOR_SPE_CMP(fscmpgt);
1386 /* evfscmpeq */
1387 HELPER_VECTOR_SPE_CMP(fscmpeq);
1388 
1389 /* Double-precision floating-point conversion */
1390 uint64_t helper_efdcfsi(CPUPPCState *env, uint32_t val)
1391 {
1392     CPU_DoubleU u;
1393 
1394     u.d = int32_to_float64(val, &env->vec_status);
1395 
1396     return u.ll;
1397 }
1398 
1399 uint64_t helper_efdcfsid(CPUPPCState *env, uint64_t val)
1400 {
1401     CPU_DoubleU u;
1402 
1403     u.d = int64_to_float64(val, &env->vec_status);
1404 
1405     return u.ll;
1406 }
1407 
1408 uint64_t helper_efdcfui(CPUPPCState *env, uint32_t val)
1409 {
1410     CPU_DoubleU u;
1411 
1412     u.d = uint32_to_float64(val, &env->vec_status);
1413 
1414     return u.ll;
1415 }
1416 
1417 uint64_t helper_efdcfuid(CPUPPCState *env, uint64_t val)
1418 {
1419     CPU_DoubleU u;
1420 
1421     u.d = uint64_to_float64(val, &env->vec_status);
1422 
1423     return u.ll;
1424 }
1425 
1426 uint32_t helper_efdctsi(CPUPPCState *env, uint64_t val)
1427 {
1428     CPU_DoubleU u;
1429 
1430     u.ll = val;
1431     /* NaN are not treated the same way IEEE 754 does */
1432     if (unlikely(float64_is_any_nan(u.d))) {
1433         return 0;
1434     }
1435 
1436     return float64_to_int32(u.d, &env->vec_status);
1437 }
1438 
1439 uint32_t helper_efdctui(CPUPPCState *env, uint64_t val)
1440 {
1441     CPU_DoubleU u;
1442 
1443     u.ll = val;
1444     /* NaN are not treated the same way IEEE 754 does */
1445     if (unlikely(float64_is_any_nan(u.d))) {
1446         return 0;
1447     }
1448 
1449     return float64_to_uint32(u.d, &env->vec_status);
1450 }
1451 
1452 uint32_t helper_efdctsiz(CPUPPCState *env, uint64_t val)
1453 {
1454     CPU_DoubleU u;
1455 
1456     u.ll = val;
1457     /* NaN are not treated the same way IEEE 754 does */
1458     if (unlikely(float64_is_any_nan(u.d))) {
1459         return 0;
1460     }
1461 
1462     return float64_to_int32_round_to_zero(u.d, &env->vec_status);
1463 }
1464 
1465 uint64_t helper_efdctsidz(CPUPPCState *env, uint64_t val)
1466 {
1467     CPU_DoubleU u;
1468 
1469     u.ll = val;
1470     /* NaN are not treated the same way IEEE 754 does */
1471     if (unlikely(float64_is_any_nan(u.d))) {
1472         return 0;
1473     }
1474 
1475     return float64_to_int64_round_to_zero(u.d, &env->vec_status);
1476 }
1477 
1478 uint32_t helper_efdctuiz(CPUPPCState *env, uint64_t val)
1479 {
1480     CPU_DoubleU u;
1481 
1482     u.ll = val;
1483     /* NaN are not treated the same way IEEE 754 does */
1484     if (unlikely(float64_is_any_nan(u.d))) {
1485         return 0;
1486     }
1487 
1488     return float64_to_uint32_round_to_zero(u.d, &env->vec_status);
1489 }
1490 
1491 uint64_t helper_efdctuidz(CPUPPCState *env, uint64_t val)
1492 {
1493     CPU_DoubleU u;
1494 
1495     u.ll = val;
1496     /* NaN are not treated the same way IEEE 754 does */
1497     if (unlikely(float64_is_any_nan(u.d))) {
1498         return 0;
1499     }
1500 
1501     return float64_to_uint64_round_to_zero(u.d, &env->vec_status);
1502 }
1503 
1504 uint64_t helper_efdcfsf(CPUPPCState *env, uint32_t val)
1505 {
1506     CPU_DoubleU u;
1507     float64 tmp;
1508 
1509     u.d = int32_to_float64(val, &env->vec_status);
1510     tmp = int64_to_float64(1ULL << 32, &env->vec_status);
1511     u.d = float64_div(u.d, tmp, &env->vec_status);
1512 
1513     return u.ll;
1514 }
1515 
1516 uint64_t helper_efdcfuf(CPUPPCState *env, uint32_t val)
1517 {
1518     CPU_DoubleU u;
1519     float64 tmp;
1520 
1521     u.d = uint32_to_float64(val, &env->vec_status);
1522     tmp = int64_to_float64(1ULL << 32, &env->vec_status);
1523     u.d = float64_div(u.d, tmp, &env->vec_status);
1524 
1525     return u.ll;
1526 }
1527 
1528 uint32_t helper_efdctsf(CPUPPCState *env, uint64_t val)
1529 {
1530     CPU_DoubleU u;
1531     float64 tmp;
1532 
1533     u.ll = val;
1534     /* NaN are not treated the same way IEEE 754 does */
1535     if (unlikely(float64_is_any_nan(u.d))) {
1536         return 0;
1537     }
1538     tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
1539     u.d = float64_mul(u.d, tmp, &env->vec_status);
1540 
1541     return float64_to_int32(u.d, &env->vec_status);
1542 }
1543 
1544 uint32_t helper_efdctuf(CPUPPCState *env, uint64_t val)
1545 {
1546     CPU_DoubleU u;
1547     float64 tmp;
1548 
1549     u.ll = val;
1550     /* NaN are not treated the same way IEEE 754 does */
1551     if (unlikely(float64_is_any_nan(u.d))) {
1552         return 0;
1553     }
1554     tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
1555     u.d = float64_mul(u.d, tmp, &env->vec_status);
1556 
1557     return float64_to_uint32(u.d, &env->vec_status);
1558 }
1559 
1560 uint32_t helper_efscfd(CPUPPCState *env, uint64_t val)
1561 {
1562     CPU_DoubleU u1;
1563     CPU_FloatU u2;
1564 
1565     u1.ll = val;
1566     u2.f = float64_to_float32(u1.d, &env->vec_status);
1567 
1568     return u2.l;
1569 }
1570 
1571 uint64_t helper_efdcfs(CPUPPCState *env, uint32_t val)
1572 {
1573     CPU_DoubleU u2;
1574     CPU_FloatU u1;
1575 
1576     u1.l = val;
1577     u2.d = float32_to_float64(u1.f, &env->vec_status);
1578 
1579     return u2.ll;
1580 }
1581 
1582 /* Double precision fixed-point arithmetic */
1583 uint64_t helper_efdadd(CPUPPCState *env, uint64_t op1, uint64_t op2)
1584 {
1585     CPU_DoubleU u1, u2;
1586 
1587     u1.ll = op1;
1588     u2.ll = op2;
1589     u1.d = float64_add(u1.d, u2.d, &env->vec_status);
1590     return u1.ll;
1591 }
1592 
1593 uint64_t helper_efdsub(CPUPPCState *env, uint64_t op1, uint64_t op2)
1594 {
1595     CPU_DoubleU u1, u2;
1596 
1597     u1.ll = op1;
1598     u2.ll = op2;
1599     u1.d = float64_sub(u1.d, u2.d, &env->vec_status);
1600     return u1.ll;
1601 }
1602 
1603 uint64_t helper_efdmul(CPUPPCState *env, uint64_t op1, uint64_t op2)
1604 {
1605     CPU_DoubleU u1, u2;
1606 
1607     u1.ll = op1;
1608     u2.ll = op2;
1609     u1.d = float64_mul(u1.d, u2.d, &env->vec_status);
1610     return u1.ll;
1611 }
1612 
1613 uint64_t helper_efddiv(CPUPPCState *env, uint64_t op1, uint64_t op2)
1614 {
1615     CPU_DoubleU u1, u2;
1616 
1617     u1.ll = op1;
1618     u2.ll = op2;
1619     u1.d = float64_div(u1.d, u2.d, &env->vec_status);
1620     return u1.ll;
1621 }
1622 
1623 /* Double precision floating point helpers */
1624 uint32_t helper_efdtstlt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1625 {
1626     CPU_DoubleU u1, u2;
1627 
1628     u1.ll = op1;
1629     u2.ll = op2;
1630     return float64_lt(u1.d, u2.d, &env->vec_status) ? 4 : 0;
1631 }
1632 
1633 uint32_t helper_efdtstgt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1634 {
1635     CPU_DoubleU u1, u2;
1636 
1637     u1.ll = op1;
1638     u2.ll = op2;
1639     return float64_le(u1.d, u2.d, &env->vec_status) ? 0 : 4;
1640 }
1641 
1642 uint32_t helper_efdtsteq(CPUPPCState *env, uint64_t op1, uint64_t op2)
1643 {
1644     CPU_DoubleU u1, u2;
1645 
1646     u1.ll = op1;
1647     u2.ll = op2;
1648     return float64_eq_quiet(u1.d, u2.d, &env->vec_status) ? 4 : 0;
1649 }
1650 
1651 uint32_t helper_efdcmplt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1652 {
1653     /* XXX: TODO: test special values (NaN, infinites, ...) */
1654     return helper_efdtstlt(env, op1, op2);
1655 }
1656 
1657 uint32_t helper_efdcmpgt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1658 {
1659     /* XXX: TODO: test special values (NaN, infinites, ...) */
1660     return helper_efdtstgt(env, op1, op2);
1661 }
1662 
1663 uint32_t helper_efdcmpeq(CPUPPCState *env, uint64_t op1, uint64_t op2)
1664 {
1665     /* XXX: TODO: test special values (NaN, infinites, ...) */
1666     return helper_efdtsteq(env, op1, op2);
1667 }
1668 
1669 #define float64_to_float64(x, env) x
1670 
1671 
1672 /* VSX_ADD_SUB - VSX floating point add/subract
1673  *   name  - instruction mnemonic
1674  *   op    - operation (add or sub)
1675  *   nels  - number of elements (1, 2 or 4)
1676  *   tp    - type (float32 or float64)
1677  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
1678  *   sfprf - set FPRF
1679  */
1680 #define VSX_ADD_SUB(name, op, nels, tp, fld, sfprf, r2sp)                    \
1681 void helper_##name(CPUPPCState *env, uint32_t opcode)                        \
1682 {                                                                            \
1683     ppc_vsr_t xt, xa, xb;                                                    \
1684     int i;                                                                   \
1685                                                                              \
1686     getVSR(xA(opcode), &xa, env);                                            \
1687     getVSR(xB(opcode), &xb, env);                                            \
1688     getVSR(xT(opcode), &xt, env);                                            \
1689     helper_reset_fpstatus(env);                                              \
1690                                                                              \
1691     for (i = 0; i < nels; i++) {                                             \
1692         float_status tstat = env->fp_status;                                 \
1693         set_float_exception_flags(0, &tstat);                                \
1694         xt.fld = tp##_##op(xa.fld, xb.fld, &tstat);                          \
1695         env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1696                                                                              \
1697         if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
1698             if (tp##_is_infinity(xa.fld) && tp##_is_infinity(xb.fld)) {      \
1699                 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, sfprf);    \
1700             } else if (tp##_is_signaling_nan(xa.fld, &tstat) ||              \
1701                        tp##_is_signaling_nan(xb.fld, &tstat)) {              \
1702                 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf);   \
1703             }                                                                \
1704         }                                                                    \
1705                                                                              \
1706         if (r2sp) {                                                          \
1707             xt.fld = helper_frsp(env, xt.fld);                               \
1708         }                                                                    \
1709                                                                              \
1710         if (sfprf) {                                                         \
1711             helper_compute_fprf_float64(env, xt.fld);                        \
1712         }                                                                    \
1713     }                                                                        \
1714     putVSR(xT(opcode), &xt, env);                                            \
1715     float_check_status(env);                                                 \
1716 }
1717 
1718 VSX_ADD_SUB(xsadddp, add, 1, float64, VsrD(0), 1, 0)
1719 VSX_ADD_SUB(xsaddsp, add, 1, float64, VsrD(0), 1, 1)
1720 VSX_ADD_SUB(xvadddp, add, 2, float64, VsrD(i), 0, 0)
1721 VSX_ADD_SUB(xvaddsp, add, 4, float32, VsrW(i), 0, 0)
1722 VSX_ADD_SUB(xssubdp, sub, 1, float64, VsrD(0), 1, 0)
1723 VSX_ADD_SUB(xssubsp, sub, 1, float64, VsrD(0), 1, 1)
1724 VSX_ADD_SUB(xvsubdp, sub, 2, float64, VsrD(i), 0, 0)
1725 VSX_ADD_SUB(xvsubsp, sub, 4, float32, VsrW(i), 0, 0)
1726 
1727 void helper_xsaddqp(CPUPPCState *env, uint32_t opcode)
1728 {
1729     ppc_vsr_t xt, xa, xb;
1730     float_status tstat;
1731 
1732     getVSR(rA(opcode) + 32, &xa, env);
1733     getVSR(rB(opcode) + 32, &xb, env);
1734     getVSR(rD(opcode) + 32, &xt, env);
1735     helper_reset_fpstatus(env);
1736 
1737     tstat = env->fp_status;
1738     if (unlikely(Rc(opcode) != 0)) {
1739         tstat.float_rounding_mode = float_round_to_odd;
1740     }
1741 
1742     set_float_exception_flags(0, &tstat);
1743     xt.f128 = float128_add(xa.f128, xb.f128, &tstat);
1744     env->fp_status.float_exception_flags |= tstat.float_exception_flags;
1745 
1746     if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
1747         if (float128_is_infinity(xa.f128) && float128_is_infinity(xb.f128)) {
1748             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
1749         } else if (float128_is_signaling_nan(xa.f128, &tstat) ||
1750                    float128_is_signaling_nan(xb.f128, &tstat)) {
1751             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
1752         }
1753     }
1754 
1755     helper_compute_fprf_float128(env, xt.f128);
1756 
1757     putVSR(rD(opcode) + 32, &xt, env);
1758     float_check_status(env);
1759 }
1760 
1761 /* VSX_MUL - VSX floating point multiply
1762  *   op    - instruction mnemonic
1763  *   nels  - number of elements (1, 2 or 4)
1764  *   tp    - type (float32 or float64)
1765  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
1766  *   sfprf - set FPRF
1767  */
1768 #define VSX_MUL(op, nels, tp, fld, sfprf, r2sp)                              \
1769 void helper_##op(CPUPPCState *env, uint32_t opcode)                          \
1770 {                                                                            \
1771     ppc_vsr_t xt, xa, xb;                                                    \
1772     int i;                                                                   \
1773                                                                              \
1774     getVSR(xA(opcode), &xa, env);                                            \
1775     getVSR(xB(opcode), &xb, env);                                            \
1776     getVSR(xT(opcode), &xt, env);                                            \
1777     helper_reset_fpstatus(env);                                              \
1778                                                                              \
1779     for (i = 0; i < nels; i++) {                                             \
1780         float_status tstat = env->fp_status;                                 \
1781         set_float_exception_flags(0, &tstat);                                \
1782         xt.fld = tp##_mul(xa.fld, xb.fld, &tstat);                           \
1783         env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1784                                                                              \
1785         if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
1786             if ((tp##_is_infinity(xa.fld) && tp##_is_zero(xb.fld)) ||        \
1787                 (tp##_is_infinity(xb.fld) && tp##_is_zero(xa.fld))) {        \
1788                 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, sfprf);    \
1789             } else if (tp##_is_signaling_nan(xa.fld, &tstat) ||              \
1790                        tp##_is_signaling_nan(xb.fld, &tstat)) {              \
1791                 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf);   \
1792             }                                                                \
1793         }                                                                    \
1794                                                                              \
1795         if (r2sp) {                                                          \
1796             xt.fld = helper_frsp(env, xt.fld);                               \
1797         }                                                                    \
1798                                                                              \
1799         if (sfprf) {                                                         \
1800             helper_compute_fprf_float64(env, xt.fld);                        \
1801         }                                                                    \
1802     }                                                                        \
1803                                                                              \
1804     putVSR(xT(opcode), &xt, env);                                            \
1805     float_check_status(env);                                                 \
1806 }
1807 
1808 VSX_MUL(xsmuldp, 1, float64, VsrD(0), 1, 0)
1809 VSX_MUL(xsmulsp, 1, float64, VsrD(0), 1, 1)
1810 VSX_MUL(xvmuldp, 2, float64, VsrD(i), 0, 0)
1811 VSX_MUL(xvmulsp, 4, float32, VsrW(i), 0, 0)
1812 
1813 void helper_xsmulqp(CPUPPCState *env, uint32_t opcode)
1814 {
1815     ppc_vsr_t xt, xa, xb;
1816     float_status tstat;
1817 
1818     getVSR(rA(opcode) + 32, &xa, env);
1819     getVSR(rB(opcode) + 32, &xb, env);
1820     getVSR(rD(opcode) + 32, &xt, env);
1821 
1822     helper_reset_fpstatus(env);
1823     tstat = env->fp_status;
1824     if (unlikely(Rc(opcode) != 0)) {
1825         tstat.float_rounding_mode = float_round_to_odd;
1826     }
1827 
1828     set_float_exception_flags(0, &tstat);
1829     xt.f128 = float128_mul(xa.f128, xb.f128, &tstat);
1830     env->fp_status.float_exception_flags |= tstat.float_exception_flags;
1831 
1832     if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
1833         if ((float128_is_infinity(xa.f128) && float128_is_zero(xb.f128)) ||
1834             (float128_is_infinity(xb.f128) && float128_is_zero(xa.f128))) {
1835             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
1836         } else if (float128_is_signaling_nan(xa.f128, &tstat) ||
1837                    float128_is_signaling_nan(xb.f128, &tstat)) {
1838             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
1839         }
1840     }
1841     helper_compute_fprf_float128(env, xt.f128);
1842 
1843     putVSR(rD(opcode) + 32, &xt, env);
1844     float_check_status(env);
1845 }
1846 
1847 /* VSX_DIV - VSX floating point divide
1848  *   op    - instruction mnemonic
1849  *   nels  - number of elements (1, 2 or 4)
1850  *   tp    - type (float32 or float64)
1851  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
1852  *   sfprf - set FPRF
1853  */
1854 #define VSX_DIV(op, nels, tp, fld, sfprf, r2sp)                               \
1855 void helper_##op(CPUPPCState *env, uint32_t opcode)                           \
1856 {                                                                             \
1857     ppc_vsr_t xt, xa, xb;                                                     \
1858     int i;                                                                    \
1859                                                                               \
1860     getVSR(xA(opcode), &xa, env);                                             \
1861     getVSR(xB(opcode), &xb, env);                                             \
1862     getVSR(xT(opcode), &xt, env);                                             \
1863     helper_reset_fpstatus(env);                                               \
1864                                                                               \
1865     for (i = 0; i < nels; i++) {                                              \
1866         float_status tstat = env->fp_status;                                  \
1867         set_float_exception_flags(0, &tstat);                                 \
1868         xt.fld = tp##_div(xa.fld, xb.fld, &tstat);                            \
1869         env->fp_status.float_exception_flags |= tstat.float_exception_flags;  \
1870                                                                               \
1871         if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {     \
1872             if (tp##_is_infinity(xa.fld) && tp##_is_infinity(xb.fld)) {       \
1873                 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIDI, sfprf);     \
1874             } else if (tp##_is_zero(xa.fld) &&                                \
1875                 tp##_is_zero(xb.fld)) {                                       \
1876                 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXZDZ, sfprf);     \
1877             } else if (tp##_is_signaling_nan(xa.fld, &tstat) ||               \
1878                 tp##_is_signaling_nan(xb.fld, &tstat)) {                      \
1879                 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf);    \
1880             }                                                                 \
1881         }                                                                     \
1882                                                                               \
1883         if (r2sp) {                                                           \
1884             xt.fld = helper_frsp(env, xt.fld);                                \
1885         }                                                                     \
1886                                                                               \
1887         if (sfprf) {                                                          \
1888             helper_compute_fprf_float64(env, xt.fld);                         \
1889         }                                                                     \
1890     }                                                                         \
1891                                                                               \
1892     putVSR(xT(opcode), &xt, env);                                             \
1893     float_check_status(env);                                                  \
1894 }
1895 
1896 VSX_DIV(xsdivdp, 1, float64, VsrD(0), 1, 0)
1897 VSX_DIV(xsdivsp, 1, float64, VsrD(0), 1, 1)
1898 VSX_DIV(xvdivdp, 2, float64, VsrD(i), 0, 0)
1899 VSX_DIV(xvdivsp, 4, float32, VsrW(i), 0, 0)
1900 
1901 void helper_xsdivqp(CPUPPCState *env, uint32_t opcode)
1902 {
1903     ppc_vsr_t xt, xa, xb;
1904     float_status tstat;
1905 
1906     getVSR(rA(opcode) + 32, &xa, env);
1907     getVSR(rB(opcode) + 32, &xb, env);
1908     getVSR(rD(opcode) + 32, &xt, env);
1909 
1910     helper_reset_fpstatus(env);
1911     tstat = env->fp_status;
1912     if (unlikely(Rc(opcode) != 0)) {
1913         tstat.float_rounding_mode = float_round_to_odd;
1914     }
1915 
1916     set_float_exception_flags(0, &tstat);
1917     xt.f128 = float128_div(xa.f128, xb.f128, &tstat);
1918     env->fp_status.float_exception_flags |= tstat.float_exception_flags;
1919 
1920     if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
1921         if (float128_is_infinity(xa.f128) && float128_is_infinity(xb.f128)) {
1922             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIDI, 1);
1923         } else if (float128_is_zero(xa.f128) &&
1924             float128_is_zero(xb.f128)) {
1925             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXZDZ, 1);
1926         } else if (float128_is_signaling_nan(xa.f128, &tstat) ||
1927             float128_is_signaling_nan(xb.f128, &tstat)) {
1928             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
1929         }
1930     }
1931 
1932     helper_compute_fprf_float128(env, xt.f128);
1933     putVSR(rD(opcode) + 32, &xt, env);
1934     float_check_status(env);
1935 }
1936 
1937 /* VSX_RE  - VSX floating point reciprocal estimate
1938  *   op    - instruction mnemonic
1939  *   nels  - number of elements (1, 2 or 4)
1940  *   tp    - type (float32 or float64)
1941  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
1942  *   sfprf - set FPRF
1943  */
1944 #define VSX_RE(op, nels, tp, fld, sfprf, r2sp)                                \
1945 void helper_##op(CPUPPCState *env, uint32_t opcode)                           \
1946 {                                                                             \
1947     ppc_vsr_t xt, xb;                                                         \
1948     int i;                                                                    \
1949                                                                               \
1950     getVSR(xB(opcode), &xb, env);                                             \
1951     getVSR(xT(opcode), &xt, env);                                             \
1952     helper_reset_fpstatus(env);                                               \
1953                                                                               \
1954     for (i = 0; i < nels; i++) {                                              \
1955         if (unlikely(tp##_is_signaling_nan(xb.fld, &env->fp_status))) {       \
1956                 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf);    \
1957         }                                                                     \
1958         xt.fld = tp##_div(tp##_one, xb.fld, &env->fp_status);                 \
1959                                                                               \
1960         if (r2sp) {                                                           \
1961             xt.fld = helper_frsp(env, xt.fld);                                \
1962         }                                                                     \
1963                                                                               \
1964         if (sfprf) {                                                          \
1965             helper_compute_fprf_float64(env, xt.fld);                         \
1966         }                                                                     \
1967     }                                                                         \
1968                                                                               \
1969     putVSR(xT(opcode), &xt, env);                                             \
1970     float_check_status(env);                                                  \
1971 }
1972 
1973 VSX_RE(xsredp, 1, float64, VsrD(0), 1, 0)
1974 VSX_RE(xsresp, 1, float64, VsrD(0), 1, 1)
1975 VSX_RE(xvredp, 2, float64, VsrD(i), 0, 0)
1976 VSX_RE(xvresp, 4, float32, VsrW(i), 0, 0)
1977 
1978 /* VSX_SQRT - VSX floating point square root
1979  *   op    - instruction mnemonic
1980  *   nels  - number of elements (1, 2 or 4)
1981  *   tp    - type (float32 or float64)
1982  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
1983  *   sfprf - set FPRF
1984  */
1985 #define VSX_SQRT(op, nels, tp, fld, sfprf, r2sp)                             \
1986 void helper_##op(CPUPPCState *env, uint32_t opcode)                          \
1987 {                                                                            \
1988     ppc_vsr_t xt, xb;                                                        \
1989     int i;                                                                   \
1990                                                                              \
1991     getVSR(xB(opcode), &xb, env);                                            \
1992     getVSR(xT(opcode), &xt, env);                                            \
1993     helper_reset_fpstatus(env);                                              \
1994                                                                              \
1995     for (i = 0; i < nels; i++) {                                             \
1996         float_status tstat = env->fp_status;                                 \
1997         set_float_exception_flags(0, &tstat);                                \
1998         xt.fld = tp##_sqrt(xb.fld, &tstat);                                  \
1999         env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2000                                                                              \
2001         if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
2002             if (tp##_is_neg(xb.fld) && !tp##_is_zero(xb.fld)) {              \
2003                 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, sfprf);   \
2004             } else if (tp##_is_signaling_nan(xb.fld, &tstat)) {              \
2005                 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf);   \
2006             }                                                                \
2007         }                                                                    \
2008                                                                              \
2009         if (r2sp) {                                                          \
2010             xt.fld = helper_frsp(env, xt.fld);                               \
2011         }                                                                    \
2012                                                                              \
2013         if (sfprf) {                                                         \
2014             helper_compute_fprf_float64(env, xt.fld);                        \
2015         }                                                                    \
2016     }                                                                        \
2017                                                                              \
2018     putVSR(xT(opcode), &xt, env);                                            \
2019     float_check_status(env);                                                 \
2020 }
2021 
2022 VSX_SQRT(xssqrtdp, 1, float64, VsrD(0), 1, 0)
2023 VSX_SQRT(xssqrtsp, 1, float64, VsrD(0), 1, 1)
2024 VSX_SQRT(xvsqrtdp, 2, float64, VsrD(i), 0, 0)
2025 VSX_SQRT(xvsqrtsp, 4, float32, VsrW(i), 0, 0)
2026 
2027 /* VSX_RSQRTE - VSX floating point reciprocal square root estimate
2028  *   op    - instruction mnemonic
2029  *   nels  - number of elements (1, 2 or 4)
2030  *   tp    - type (float32 or float64)
2031  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2032  *   sfprf - set FPRF
2033  */
2034 #define VSX_RSQRTE(op, nels, tp, fld, sfprf, r2sp)                           \
2035 void helper_##op(CPUPPCState *env, uint32_t opcode)                          \
2036 {                                                                            \
2037     ppc_vsr_t xt, xb;                                                        \
2038     int i;                                                                   \
2039                                                                              \
2040     getVSR(xB(opcode), &xb, env);                                            \
2041     getVSR(xT(opcode), &xt, env);                                            \
2042     helper_reset_fpstatus(env);                                              \
2043                                                                              \
2044     for (i = 0; i < nels; i++) {                                             \
2045         float_status tstat = env->fp_status;                                 \
2046         set_float_exception_flags(0, &tstat);                                \
2047         xt.fld = tp##_sqrt(xb.fld, &tstat);                                  \
2048         xt.fld = tp##_div(tp##_one, xt.fld, &tstat);                         \
2049         env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2050                                                                              \
2051         if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
2052             if (tp##_is_neg(xb.fld) && !tp##_is_zero(xb.fld)) {              \
2053                 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, sfprf);   \
2054             } else if (tp##_is_signaling_nan(xb.fld, &tstat)) {              \
2055                 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf);   \
2056             }                                                                \
2057         }                                                                    \
2058                                                                              \
2059         if (r2sp) {                                                          \
2060             xt.fld = helper_frsp(env, xt.fld);                               \
2061         }                                                                    \
2062                                                                              \
2063         if (sfprf) {                                                         \
2064             helper_compute_fprf_float64(env, xt.fld);                        \
2065         }                                                                    \
2066     }                                                                        \
2067                                                                              \
2068     putVSR(xT(opcode), &xt, env);                                            \
2069     float_check_status(env);                                                 \
2070 }
2071 
2072 VSX_RSQRTE(xsrsqrtedp, 1, float64, VsrD(0), 1, 0)
2073 VSX_RSQRTE(xsrsqrtesp, 1, float64, VsrD(0), 1, 1)
2074 VSX_RSQRTE(xvrsqrtedp, 2, float64, VsrD(i), 0, 0)
2075 VSX_RSQRTE(xvrsqrtesp, 4, float32, VsrW(i), 0, 0)
2076 
2077 /* VSX_TDIV - VSX floating point test for divide
2078  *   op    - instruction mnemonic
2079  *   nels  - number of elements (1, 2 or 4)
2080  *   tp    - type (float32 or float64)
2081  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2082  *   emin  - minimum unbiased exponent
2083  *   emax  - maximum unbiased exponent
2084  *   nbits - number of fraction bits
2085  */
2086 #define VSX_TDIV(op, nels, tp, fld, emin, emax, nbits)                  \
2087 void helper_##op(CPUPPCState *env, uint32_t opcode)                     \
2088 {                                                                       \
2089     ppc_vsr_t xa, xb;                                                   \
2090     int i;                                                              \
2091     int fe_flag = 0;                                                    \
2092     int fg_flag = 0;                                                    \
2093                                                                         \
2094     getVSR(xA(opcode), &xa, env);                                       \
2095     getVSR(xB(opcode), &xb, env);                                       \
2096                                                                         \
2097     for (i = 0; i < nels; i++) {                                        \
2098         if (unlikely(tp##_is_infinity(xa.fld) ||                        \
2099                      tp##_is_infinity(xb.fld) ||                        \
2100                      tp##_is_zero(xb.fld))) {                           \
2101             fe_flag = 1;                                                \
2102             fg_flag = 1;                                                \
2103         } else {                                                        \
2104             int e_a = ppc_##tp##_get_unbiased_exp(xa.fld);              \
2105             int e_b = ppc_##tp##_get_unbiased_exp(xb.fld);              \
2106                                                                         \
2107             if (unlikely(tp##_is_any_nan(xa.fld) ||                     \
2108                          tp##_is_any_nan(xb.fld))) {                    \
2109                 fe_flag = 1;                                            \
2110             } else if ((e_b <= emin) || (e_b >= (emax-2))) {            \
2111                 fe_flag = 1;                                            \
2112             } else if (!tp##_is_zero(xa.fld) &&                         \
2113                        (((e_a - e_b) >= emax) ||                        \
2114                         ((e_a - e_b) <= (emin+1)) ||                    \
2115                          (e_a <= (emin+nbits)))) {                      \
2116                 fe_flag = 1;                                            \
2117             }                                                           \
2118                                                                         \
2119             if (unlikely(tp##_is_zero_or_denormal(xb.fld))) {           \
2120                 /* XB is not zero because of the above check and */     \
2121                 /* so must be denormalized.                      */     \
2122                 fg_flag = 1;                                            \
2123             }                                                           \
2124         }                                                               \
2125     }                                                                   \
2126                                                                         \
2127     env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2128 }
2129 
2130 VSX_TDIV(xstdivdp, 1, float64, VsrD(0), -1022, 1023, 52)
2131 VSX_TDIV(xvtdivdp, 2, float64, VsrD(i), -1022, 1023, 52)
2132 VSX_TDIV(xvtdivsp, 4, float32, VsrW(i), -126, 127, 23)
2133 
2134 /* VSX_TSQRT - VSX floating point test for square root
2135  *   op    - instruction mnemonic
2136  *   nels  - number of elements (1, 2 or 4)
2137  *   tp    - type (float32 or float64)
2138  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2139  *   emin  - minimum unbiased exponent
2140  *   emax  - maximum unbiased exponent
2141  *   nbits - number of fraction bits
2142  */
2143 #define VSX_TSQRT(op, nels, tp, fld, emin, nbits)                       \
2144 void helper_##op(CPUPPCState *env, uint32_t opcode)                     \
2145 {                                                                       \
2146     ppc_vsr_t xa, xb;                                                   \
2147     int i;                                                              \
2148     int fe_flag = 0;                                                    \
2149     int fg_flag = 0;                                                    \
2150                                                                         \
2151     getVSR(xA(opcode), &xa, env);                                       \
2152     getVSR(xB(opcode), &xb, env);                                       \
2153                                                                         \
2154     for (i = 0; i < nels; i++) {                                        \
2155         if (unlikely(tp##_is_infinity(xb.fld) ||                        \
2156                      tp##_is_zero(xb.fld))) {                           \
2157             fe_flag = 1;                                                \
2158             fg_flag = 1;                                                \
2159         } else {                                                        \
2160             int e_b = ppc_##tp##_get_unbiased_exp(xb.fld);              \
2161                                                                         \
2162             if (unlikely(tp##_is_any_nan(xb.fld))) {                    \
2163                 fe_flag = 1;                                            \
2164             } else if (unlikely(tp##_is_zero(xb.fld))) {                \
2165                 fe_flag = 1;                                            \
2166             } else if (unlikely(tp##_is_neg(xb.fld))) {                 \
2167                 fe_flag = 1;                                            \
2168             } else if (!tp##_is_zero(xb.fld) &&                         \
2169                       (e_b <= (emin+nbits))) {                          \
2170                 fe_flag = 1;                                            \
2171             }                                                           \
2172                                                                         \
2173             if (unlikely(tp##_is_zero_or_denormal(xb.fld))) {           \
2174                 /* XB is not zero because of the above check and */     \
2175                 /* therefore must be denormalized.               */     \
2176                 fg_flag = 1;                                            \
2177             }                                                           \
2178         }                                                               \
2179     }                                                                   \
2180                                                                         \
2181     env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2182 }
2183 
2184 VSX_TSQRT(xstsqrtdp, 1, float64, VsrD(0), -1022, 52)
2185 VSX_TSQRT(xvtsqrtdp, 2, float64, VsrD(i), -1022, 52)
2186 VSX_TSQRT(xvtsqrtsp, 4, float32, VsrW(i), -126, 23)
2187 
2188 /* VSX_MADD - VSX floating point muliply/add variations
2189  *   op    - instruction mnemonic
2190  *   nels  - number of elements (1, 2 or 4)
2191  *   tp    - type (float32 or float64)
2192  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2193  *   maddflgs - flags for the float*muladd routine that control the
2194  *           various forms (madd, msub, nmadd, nmsub)
2195  *   afrm  - A form (1=A, 0=M)
2196  *   sfprf - set FPRF
2197  */
2198 #define VSX_MADD(op, nels, tp, fld, maddflgs, afrm, sfprf, r2sp)              \
2199 void helper_##op(CPUPPCState *env, uint32_t opcode)                           \
2200 {                                                                             \
2201     ppc_vsr_t xt_in, xa, xb, xt_out;                                          \
2202     ppc_vsr_t *b, *c;                                                         \
2203     int i;                                                                    \
2204                                                                               \
2205     if (afrm) { /* AxB + T */                                                 \
2206         b = &xb;                                                              \
2207         c = &xt_in;                                                           \
2208     } else { /* AxT + B */                                                    \
2209         b = &xt_in;                                                           \
2210         c = &xb;                                                              \
2211     }                                                                         \
2212                                                                               \
2213     getVSR(xA(opcode), &xa, env);                                             \
2214     getVSR(xB(opcode), &xb, env);                                             \
2215     getVSR(xT(opcode), &xt_in, env);                                          \
2216                                                                               \
2217     xt_out = xt_in;                                                           \
2218                                                                               \
2219     helper_reset_fpstatus(env);                                               \
2220                                                                               \
2221     for (i = 0; i < nels; i++) {                                              \
2222         float_status tstat = env->fp_status;                                  \
2223         set_float_exception_flags(0, &tstat);                                 \
2224         if (r2sp && (tstat.float_rounding_mode == float_round_nearest_even)) {\
2225             /* Avoid double rounding errors by rounding the intermediate */   \
2226             /* result to odd.                                            */   \
2227             set_float_rounding_mode(float_round_to_zero, &tstat);             \
2228             xt_out.fld = tp##_muladd(xa.fld, b->fld, c->fld,                  \
2229                                        maddflgs, &tstat);                     \
2230             xt_out.fld |= (get_float_exception_flags(&tstat) &                \
2231                               float_flag_inexact) != 0;                       \
2232         } else {                                                              \
2233             xt_out.fld = tp##_muladd(xa.fld, b->fld, c->fld,                  \
2234                                         maddflgs, &tstat);                    \
2235         }                                                                     \
2236         env->fp_status.float_exception_flags |= tstat.float_exception_flags;  \
2237                                                                               \
2238         if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {     \
2239             if (tp##_is_signaling_nan(xa.fld, &tstat) ||                      \
2240                 tp##_is_signaling_nan(b->fld, &tstat) ||                      \
2241                 tp##_is_signaling_nan(c->fld, &tstat)) {                      \
2242                 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf);    \
2243                 tstat.float_exception_flags &= ~float_flag_invalid;           \
2244             }                                                                 \
2245             if ((tp##_is_infinity(xa.fld) && tp##_is_zero(b->fld)) ||         \
2246                 (tp##_is_zero(xa.fld) && tp##_is_infinity(b->fld))) {         \
2247                 xt_out.fld = float64_to_##tp(float_invalid_op_excp(env,       \
2248                     POWERPC_EXCP_FP_VXIMZ, sfprf), &env->fp_status);          \
2249                 tstat.float_exception_flags &= ~float_flag_invalid;           \
2250             }                                                                 \
2251             if ((tstat.float_exception_flags & float_flag_invalid) &&         \
2252                 ((tp##_is_infinity(xa.fld) ||                                 \
2253                   tp##_is_infinity(b->fld)) &&                                \
2254                   tp##_is_infinity(c->fld))) {                                \
2255                 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, sfprf);     \
2256             }                                                                 \
2257         }                                                                     \
2258                                                                               \
2259         if (r2sp) {                                                           \
2260             xt_out.fld = helper_frsp(env, xt_out.fld);                        \
2261         }                                                                     \
2262                                                                               \
2263         if (sfprf) {                                                          \
2264             helper_compute_fprf_float64(env, xt_out.fld);                     \
2265         }                                                                     \
2266     }                                                                         \
2267     putVSR(xT(opcode), &xt_out, env);                                         \
2268     float_check_status(env);                                                  \
2269 }
2270 
2271 VSX_MADD(xsmaddadp, 1, float64, VsrD(0), MADD_FLGS, 1, 1, 0)
2272 VSX_MADD(xsmaddmdp, 1, float64, VsrD(0), MADD_FLGS, 0, 1, 0)
2273 VSX_MADD(xsmsubadp, 1, float64, VsrD(0), MSUB_FLGS, 1, 1, 0)
2274 VSX_MADD(xsmsubmdp, 1, float64, VsrD(0), MSUB_FLGS, 0, 1, 0)
2275 VSX_MADD(xsnmaddadp, 1, float64, VsrD(0), NMADD_FLGS, 1, 1, 0)
2276 VSX_MADD(xsnmaddmdp, 1, float64, VsrD(0), NMADD_FLGS, 0, 1, 0)
2277 VSX_MADD(xsnmsubadp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 1, 0)
2278 VSX_MADD(xsnmsubmdp, 1, float64, VsrD(0), NMSUB_FLGS, 0, 1, 0)
2279 
2280 VSX_MADD(xsmaddasp, 1, float64, VsrD(0), MADD_FLGS, 1, 1, 1)
2281 VSX_MADD(xsmaddmsp, 1, float64, VsrD(0), MADD_FLGS, 0, 1, 1)
2282 VSX_MADD(xsmsubasp, 1, float64, VsrD(0), MSUB_FLGS, 1, 1, 1)
2283 VSX_MADD(xsmsubmsp, 1, float64, VsrD(0), MSUB_FLGS, 0, 1, 1)
2284 VSX_MADD(xsnmaddasp, 1, float64, VsrD(0), NMADD_FLGS, 1, 1, 1)
2285 VSX_MADD(xsnmaddmsp, 1, float64, VsrD(0), NMADD_FLGS, 0, 1, 1)
2286 VSX_MADD(xsnmsubasp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 1, 1)
2287 VSX_MADD(xsnmsubmsp, 1, float64, VsrD(0), NMSUB_FLGS, 0, 1, 1)
2288 
2289 VSX_MADD(xvmaddadp, 2, float64, VsrD(i), MADD_FLGS, 1, 0, 0)
2290 VSX_MADD(xvmaddmdp, 2, float64, VsrD(i), MADD_FLGS, 0, 0, 0)
2291 VSX_MADD(xvmsubadp, 2, float64, VsrD(i), MSUB_FLGS, 1, 0, 0)
2292 VSX_MADD(xvmsubmdp, 2, float64, VsrD(i), MSUB_FLGS, 0, 0, 0)
2293 VSX_MADD(xvnmaddadp, 2, float64, VsrD(i), NMADD_FLGS, 1, 0, 0)
2294 VSX_MADD(xvnmaddmdp, 2, float64, VsrD(i), NMADD_FLGS, 0, 0, 0)
2295 VSX_MADD(xvnmsubadp, 2, float64, VsrD(i), NMSUB_FLGS, 1, 0, 0)
2296 VSX_MADD(xvnmsubmdp, 2, float64, VsrD(i), NMSUB_FLGS, 0, 0, 0)
2297 
2298 VSX_MADD(xvmaddasp, 4, float32, VsrW(i), MADD_FLGS, 1, 0, 0)
2299 VSX_MADD(xvmaddmsp, 4, float32, VsrW(i), MADD_FLGS, 0, 0, 0)
2300 VSX_MADD(xvmsubasp, 4, float32, VsrW(i), MSUB_FLGS, 1, 0, 0)
2301 VSX_MADD(xvmsubmsp, 4, float32, VsrW(i), MSUB_FLGS, 0, 0, 0)
2302 VSX_MADD(xvnmaddasp, 4, float32, VsrW(i), NMADD_FLGS, 1, 0, 0)
2303 VSX_MADD(xvnmaddmsp, 4, float32, VsrW(i), NMADD_FLGS, 0, 0, 0)
2304 VSX_MADD(xvnmsubasp, 4, float32, VsrW(i), NMSUB_FLGS, 1, 0, 0)
2305 VSX_MADD(xvnmsubmsp, 4, float32, VsrW(i), NMSUB_FLGS, 0, 0, 0)
2306 
2307 /* VSX_SCALAR_CMP_DP - VSX scalar floating point compare double precision
2308  *   op    - instruction mnemonic
2309  *   cmp   - comparison operation
2310  *   exp   - expected result of comparison
2311  *   svxvc - set VXVC bit
2312  */
2313 #define VSX_SCALAR_CMP_DP(op, cmp, exp, svxvc)                                \
2314 void helper_##op(CPUPPCState *env, uint32_t opcode)                           \
2315 {                                                                             \
2316     ppc_vsr_t xt, xa, xb;                                                     \
2317     bool vxsnan_flag = false, vxvc_flag = false, vex_flag = false;            \
2318                                                                               \
2319     getVSR(xA(opcode), &xa, env);                                             \
2320     getVSR(xB(opcode), &xb, env);                                             \
2321     getVSR(xT(opcode), &xt, env);                                             \
2322                                                                               \
2323     if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) ||              \
2324         float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) {              \
2325         vxsnan_flag = true;                                                   \
2326         if (fpscr_ve == 0 && svxvc) {                                         \
2327             vxvc_flag = true;                                                 \
2328         }                                                                     \
2329     } else if (svxvc) {                                                       \
2330         vxvc_flag = float64_is_quiet_nan(xa.VsrD(0), &env->fp_status) ||      \
2331             float64_is_quiet_nan(xb.VsrD(0), &env->fp_status);                \
2332     }                                                                         \
2333     if (vxsnan_flag) {                                                        \
2334         float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);                \
2335     }                                                                         \
2336     if (vxvc_flag) {                                                          \
2337         float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0);                  \
2338     }                                                                         \
2339     vex_flag = fpscr_ve && (vxvc_flag || vxsnan_flag);                        \
2340                                                                               \
2341     if (!vex_flag) {                                                          \
2342         if (float64_##cmp(xb.VsrD(0), xa.VsrD(0), &env->fp_status) == exp) {  \
2343             xt.VsrD(0) = -1;                                                  \
2344             xt.VsrD(1) = 0;                                                   \
2345         } else {                                                              \
2346             xt.VsrD(0) = 0;                                                   \
2347             xt.VsrD(1) = 0;                                                   \
2348         }                                                                     \
2349     }                                                                         \
2350     putVSR(xT(opcode), &xt, env);                                             \
2351     helper_float_check_status(env);                                           \
2352 }
2353 
2354 VSX_SCALAR_CMP_DP(xscmpeqdp, eq, 1, 0)
2355 VSX_SCALAR_CMP_DP(xscmpgedp, le, 1, 1)
2356 VSX_SCALAR_CMP_DP(xscmpgtdp, lt, 1, 1)
2357 VSX_SCALAR_CMP_DP(xscmpnedp, eq, 0, 0)
2358 
2359 void helper_xscmpexpdp(CPUPPCState *env, uint32_t opcode)
2360 {
2361     ppc_vsr_t xa, xb;
2362     int64_t exp_a, exp_b;
2363     uint32_t cc;
2364 
2365     getVSR(xA(opcode), &xa, env);
2366     getVSR(xB(opcode), &xb, env);
2367 
2368     exp_a = extract64(xa.VsrD(0), 52, 11);
2369     exp_b = extract64(xb.VsrD(0), 52, 11);
2370 
2371     if (unlikely(float64_is_any_nan(xa.VsrD(0)) ||
2372                  float64_is_any_nan(xb.VsrD(0)))) {
2373         cc = CRF_SO;
2374     } else {
2375         if (exp_a < exp_b) {
2376             cc = CRF_LT;
2377         } else if (exp_a > exp_b) {
2378             cc = CRF_GT;
2379         } else {
2380             cc = CRF_EQ;
2381         }
2382     }
2383 
2384     env->fpscr &= ~(0x0F << FPSCR_FPRF);
2385     env->fpscr |= cc << FPSCR_FPRF;
2386     env->crf[BF(opcode)] = cc;
2387 
2388     helper_float_check_status(env);
2389 }
2390 
2391 void helper_xscmpexpqp(CPUPPCState *env, uint32_t opcode)
2392 {
2393     ppc_vsr_t xa, xb;
2394     int64_t exp_a, exp_b;
2395     uint32_t cc;
2396 
2397     getVSR(rA(opcode) + 32, &xa, env);
2398     getVSR(rB(opcode) + 32, &xb, env);
2399 
2400     exp_a = extract64(xa.VsrD(0), 48, 15);
2401     exp_b = extract64(xb.VsrD(0), 48, 15);
2402 
2403     if (unlikely(float128_is_any_nan(xa.f128) ||
2404                  float128_is_any_nan(xb.f128))) {
2405         cc = CRF_SO;
2406     } else {
2407         if (exp_a < exp_b) {
2408             cc = CRF_LT;
2409         } else if (exp_a > exp_b) {
2410             cc = CRF_GT;
2411         } else {
2412             cc = CRF_EQ;
2413         }
2414     }
2415 
2416     env->fpscr &= ~(0x0F << FPSCR_FPRF);
2417     env->fpscr |= cc << FPSCR_FPRF;
2418     env->crf[BF(opcode)] = cc;
2419 
2420     helper_float_check_status(env);
2421 }
2422 
2423 #define VSX_SCALAR_CMP(op, ordered)                                      \
2424 void helper_##op(CPUPPCState *env, uint32_t opcode)                      \
2425 {                                                                        \
2426     ppc_vsr_t xa, xb;                                                    \
2427     uint32_t cc = 0;                                                     \
2428     bool vxsnan_flag = false, vxvc_flag = false;                         \
2429                                                                          \
2430     helper_reset_fpstatus(env);                                          \
2431     getVSR(xA(opcode), &xa, env);                                        \
2432     getVSR(xB(opcode), &xb, env);                                        \
2433                                                                          \
2434     if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) ||         \
2435         float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) {         \
2436         vxsnan_flag = true;                                              \
2437         cc = CRF_SO;                                                     \
2438         if (fpscr_ve == 0 && ordered) {                                  \
2439             vxvc_flag = true;                                            \
2440         }                                                                \
2441     } else if (float64_is_quiet_nan(xa.VsrD(0), &env->fp_status) ||      \
2442                float64_is_quiet_nan(xb.VsrD(0), &env->fp_status)) {      \
2443         cc = CRF_SO;                                                     \
2444         if (ordered) {                                                   \
2445             vxvc_flag = true;                                            \
2446         }                                                                \
2447     }                                                                    \
2448     if (vxsnan_flag) {                                                   \
2449         float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);           \
2450     }                                                                    \
2451     if (vxvc_flag) {                                                     \
2452         float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0);             \
2453     }                                                                    \
2454                                                                          \
2455     if (float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) {           \
2456         cc |= CRF_LT;                                                    \
2457     } else if (!float64_le(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) {   \
2458         cc |= CRF_GT;                                                    \
2459     } else {                                                             \
2460         cc |= CRF_EQ;                                                    \
2461     }                                                                    \
2462                                                                          \
2463     env->fpscr &= ~(0x0F << FPSCR_FPRF);                                 \
2464     env->fpscr |= cc << FPSCR_FPRF;                                      \
2465     env->crf[BF(opcode)] = cc;                                           \
2466                                                                          \
2467     float_check_status(env);                                             \
2468 }
2469 
2470 VSX_SCALAR_CMP(xscmpodp, 1)
2471 VSX_SCALAR_CMP(xscmpudp, 0)
2472 
2473 #define VSX_SCALAR_CMPQ(op, ordered)                                    \
2474 void helper_##op(CPUPPCState *env, uint32_t opcode)                     \
2475 {                                                                       \
2476     ppc_vsr_t xa, xb;                                                   \
2477     uint32_t cc = 0;                                                    \
2478     bool vxsnan_flag = false, vxvc_flag = false;                        \
2479                                                                         \
2480     helper_reset_fpstatus(env);                                         \
2481     getVSR(rA(opcode) + 32, &xa, env);                                  \
2482     getVSR(rB(opcode) + 32, &xb, env);                                  \
2483                                                                         \
2484     if (float128_is_signaling_nan(xa.f128, &env->fp_status) ||          \
2485         float128_is_signaling_nan(xb.f128, &env->fp_status)) {          \
2486         vxsnan_flag = true;                                             \
2487         cc = CRF_SO;                                                    \
2488         if (fpscr_ve == 0 && ordered) {                                 \
2489             vxvc_flag = true;                                           \
2490         }                                                               \
2491     } else if (float128_is_quiet_nan(xa.f128, &env->fp_status) ||       \
2492                float128_is_quiet_nan(xb.f128, &env->fp_status)) {       \
2493         cc = CRF_SO;                                                    \
2494         if (ordered) {                                                  \
2495             vxvc_flag = true;                                           \
2496         }                                                               \
2497     }                                                                   \
2498     if (vxsnan_flag) {                                                  \
2499         float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);          \
2500     }                                                                   \
2501     if (vxvc_flag) {                                                    \
2502         float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0);            \
2503     }                                                                   \
2504                                                                         \
2505     if (float128_lt(xa.f128, xb.f128, &env->fp_status)) {               \
2506         cc |= CRF_LT;                                                   \
2507     } else if (!float128_le(xa.f128, xb.f128, &env->fp_status)) {       \
2508         cc |= CRF_GT;                                                   \
2509     } else {                                                            \
2510         cc |= CRF_EQ;                                                   \
2511     }                                                                   \
2512                                                                         \
2513     env->fpscr &= ~(0x0F << FPSCR_FPRF);                                \
2514     env->fpscr |= cc << FPSCR_FPRF;                                     \
2515     env->crf[BF(opcode)] = cc;                                          \
2516                                                                         \
2517     float_check_status(env);                                            \
2518 }
2519 
2520 VSX_SCALAR_CMPQ(xscmpoqp, 1)
2521 VSX_SCALAR_CMPQ(xscmpuqp, 0)
2522 
2523 /* VSX_MAX_MIN - VSX floating point maximum/minimum
2524  *   name  - instruction mnemonic
2525  *   op    - operation (max or min)
2526  *   nels  - number of elements (1, 2 or 4)
2527  *   tp    - type (float32 or float64)
2528  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2529  */
2530 #define VSX_MAX_MIN(name, op, nels, tp, fld)                                  \
2531 void helper_##name(CPUPPCState *env, uint32_t opcode)                         \
2532 {                                                                             \
2533     ppc_vsr_t xt, xa, xb;                                                     \
2534     int i;                                                                    \
2535                                                                               \
2536     getVSR(xA(opcode), &xa, env);                                             \
2537     getVSR(xB(opcode), &xb, env);                                             \
2538     getVSR(xT(opcode), &xt, env);                                             \
2539                                                                               \
2540     for (i = 0; i < nels; i++) {                                              \
2541         xt.fld = tp##_##op(xa.fld, xb.fld, &env->fp_status);                  \
2542         if (unlikely(tp##_is_signaling_nan(xa.fld, &env->fp_status) ||        \
2543                      tp##_is_signaling_nan(xb.fld, &env->fp_status))) {       \
2544             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);            \
2545         }                                                                     \
2546     }                                                                         \
2547                                                                               \
2548     putVSR(xT(opcode), &xt, env);                                             \
2549     float_check_status(env);                                                  \
2550 }
2551 
2552 VSX_MAX_MIN(xsmaxdp, maxnum, 1, float64, VsrD(0))
2553 VSX_MAX_MIN(xvmaxdp, maxnum, 2, float64, VsrD(i))
2554 VSX_MAX_MIN(xvmaxsp, maxnum, 4, float32, VsrW(i))
2555 VSX_MAX_MIN(xsmindp, minnum, 1, float64, VsrD(0))
2556 VSX_MAX_MIN(xvmindp, minnum, 2, float64, VsrD(i))
2557 VSX_MAX_MIN(xvminsp, minnum, 4, float32, VsrW(i))
2558 
2559 #define VSX_MAX_MINC(name, max)                                               \
2560 void helper_##name(CPUPPCState *env, uint32_t opcode)                         \
2561 {                                                                             \
2562     ppc_vsr_t xt, xa, xb;                                                     \
2563     bool vxsnan_flag = false, vex_flag = false;                               \
2564                                                                               \
2565     getVSR(rA(opcode) + 32, &xa, env);                                        \
2566     getVSR(rB(opcode) + 32, &xb, env);                                        \
2567     getVSR(rD(opcode) + 32, &xt, env);                                        \
2568                                                                               \
2569     if (unlikely(float64_is_any_nan(xa.VsrD(0)) ||                            \
2570                  float64_is_any_nan(xb.VsrD(0)))) {                           \
2571         if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) ||          \
2572             float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) {          \
2573             vxsnan_flag = true;                                               \
2574         }                                                                     \
2575         xt.VsrD(0) = xb.VsrD(0);                                              \
2576     } else if ((max &&                                                        \
2577                !float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) ||       \
2578                (!max &&                                                       \
2579                float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status))) {        \
2580         xt.VsrD(0) = xa.VsrD(0);                                              \
2581     } else {                                                                  \
2582         xt.VsrD(0) = xb.VsrD(0);                                              \
2583     }                                                                         \
2584                                                                               \
2585     vex_flag = fpscr_ve & vxsnan_flag;                                        \
2586     if (vxsnan_flag) {                                                        \
2587             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);            \
2588     }                                                                         \
2589     if (!vex_flag) {                                                          \
2590         putVSR(rD(opcode) + 32, &xt, env);                                    \
2591     }                                                                         \
2592 }                                                                             \
2593 
2594 VSX_MAX_MINC(xsmaxcdp, 1);
2595 VSX_MAX_MINC(xsmincdp, 0);
2596 
2597 #define VSX_MAX_MINJ(name, max)                                               \
2598 void helper_##name(CPUPPCState *env, uint32_t opcode)                         \
2599 {                                                                             \
2600     ppc_vsr_t xt, xa, xb;                                                     \
2601     bool vxsnan_flag = false, vex_flag = false;                               \
2602                                                                               \
2603     getVSR(rA(opcode) + 32, &xa, env);                                        \
2604     getVSR(rB(opcode) + 32, &xb, env);                                        \
2605     getVSR(rD(opcode) + 32, &xt, env);                                        \
2606                                                                               \
2607     if (unlikely(float64_is_any_nan(xa.VsrD(0)))) {                           \
2608         if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status)) {          \
2609             vxsnan_flag = true;                                               \
2610         }                                                                     \
2611         xt.VsrD(0) = xa.VsrD(0);                                              \
2612     } else if (unlikely(float64_is_any_nan(xb.VsrD(0)))) {                    \
2613         if (float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) {          \
2614             vxsnan_flag = true;                                               \
2615         }                                                                     \
2616         xt.VsrD(0) = xb.VsrD(0);                                              \
2617     } else if (float64_is_zero(xa.VsrD(0)) && float64_is_zero(xb.VsrD(0))) {  \
2618         if (max) {                                                            \
2619             if (!float64_is_neg(xa.VsrD(0)) || !float64_is_neg(xb.VsrD(0))) { \
2620                 xt.VsrD(0) = 0ULL;                                            \
2621             } else {                                                          \
2622                 xt.VsrD(0) = 0x8000000000000000ULL;                           \
2623             }                                                                 \
2624         } else {                                                              \
2625             if (float64_is_neg(xa.VsrD(0)) || float64_is_neg(xb.VsrD(0))) {   \
2626                 xt.VsrD(0) = 0x8000000000000000ULL;                           \
2627             } else {                                                          \
2628                 xt.VsrD(0) = 0ULL;                                            \
2629             }                                                                 \
2630         }                                                                     \
2631     } else if ((max &&                                                        \
2632                !float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) ||       \
2633                (!max &&                                                       \
2634                float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status))) {        \
2635         xt.VsrD(0) = xa.VsrD(0);                                              \
2636     } else {                                                                  \
2637         xt.VsrD(0) = xb.VsrD(0);                                              \
2638     }                                                                         \
2639                                                                               \
2640     vex_flag = fpscr_ve & vxsnan_flag;                                        \
2641     if (vxsnan_flag) {                                                        \
2642             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);            \
2643     }                                                                         \
2644     if (!vex_flag) {                                                          \
2645         putVSR(rD(opcode) + 32, &xt, env);                                    \
2646     }                                                                         \
2647 }                                                                             \
2648 
2649 VSX_MAX_MINJ(xsmaxjdp, 1);
2650 VSX_MAX_MINJ(xsminjdp, 0);
2651 
2652 /* VSX_CMP - VSX floating point compare
2653  *   op    - instruction mnemonic
2654  *   nels  - number of elements (1, 2 or 4)
2655  *   tp    - type (float32 or float64)
2656  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2657  *   cmp   - comparison operation
2658  *   svxvc - set VXVC bit
2659  *   exp   - expected result of comparison
2660  */
2661 #define VSX_CMP(op, nels, tp, fld, cmp, svxvc, exp)                       \
2662 void helper_##op(CPUPPCState *env, uint32_t opcode)                       \
2663 {                                                                         \
2664     ppc_vsr_t xt, xa, xb;                                                 \
2665     int i;                                                                \
2666     int all_true = 1;                                                     \
2667     int all_false = 1;                                                    \
2668                                                                           \
2669     getVSR(xA(opcode), &xa, env);                                         \
2670     getVSR(xB(opcode), &xb, env);                                         \
2671     getVSR(xT(opcode), &xt, env);                                         \
2672                                                                           \
2673     for (i = 0; i < nels; i++) {                                          \
2674         if (unlikely(tp##_is_any_nan(xa.fld) ||                           \
2675                      tp##_is_any_nan(xb.fld))) {                          \
2676             if (tp##_is_signaling_nan(xa.fld, &env->fp_status) ||         \
2677                 tp##_is_signaling_nan(xb.fld, &env->fp_status)) {         \
2678                 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);    \
2679             }                                                             \
2680             if (svxvc) {                                                  \
2681                 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0);      \
2682             }                                                             \
2683             xt.fld = 0;                                                   \
2684             all_true = 0;                                                 \
2685         } else {                                                          \
2686             if (tp##_##cmp(xb.fld, xa.fld, &env->fp_status) == exp) {     \
2687                 xt.fld = -1;                                              \
2688                 all_false = 0;                                            \
2689             } else {                                                      \
2690                 xt.fld = 0;                                               \
2691                 all_true = 0;                                             \
2692             }                                                             \
2693         }                                                                 \
2694     }                                                                     \
2695                                                                           \
2696     putVSR(xT(opcode), &xt, env);                                         \
2697     if ((opcode >> (31-21)) & 1) {                                        \
2698         env->crf[6] = (all_true ? 0x8 : 0) | (all_false ? 0x2 : 0);       \
2699     }                                                                     \
2700     float_check_status(env);                                              \
2701  }
2702 
2703 VSX_CMP(xvcmpeqdp, 2, float64, VsrD(i), eq, 0, 1)
2704 VSX_CMP(xvcmpgedp, 2, float64, VsrD(i), le, 1, 1)
2705 VSX_CMP(xvcmpgtdp, 2, float64, VsrD(i), lt, 1, 1)
2706 VSX_CMP(xvcmpnedp, 2, float64, VsrD(i), eq, 0, 0)
2707 VSX_CMP(xvcmpeqsp, 4, float32, VsrW(i), eq, 0, 1)
2708 VSX_CMP(xvcmpgesp, 4, float32, VsrW(i), le, 1, 1)
2709 VSX_CMP(xvcmpgtsp, 4, float32, VsrW(i), lt, 1, 1)
2710 VSX_CMP(xvcmpnesp, 4, float32, VsrW(i), eq, 0, 0)
2711 
2712 /* VSX_CVT_FP_TO_FP - VSX floating point/floating point conversion
2713  *   op    - instruction mnemonic
2714  *   nels  - number of elements (1, 2 or 4)
2715  *   stp   - source type (float32 or float64)
2716  *   ttp   - target type (float32 or float64)
2717  *   sfld  - source vsr_t field
2718  *   tfld  - target vsr_t field (f32 or f64)
2719  *   sfprf - set FPRF
2720  */
2721 #define VSX_CVT_FP_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf)    \
2722 void helper_##op(CPUPPCState *env, uint32_t opcode)                \
2723 {                                                                  \
2724     ppc_vsr_t xt, xb;                                              \
2725     int i;                                                         \
2726                                                                    \
2727     getVSR(xB(opcode), &xb, env);                                  \
2728     getVSR(xT(opcode), &xt, env);                                  \
2729                                                                    \
2730     for (i = 0; i < nels; i++) {                                   \
2731         xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status);        \
2732         if (unlikely(stp##_is_signaling_nan(xb.sfld,               \
2733                                             &env->fp_status))) {   \
2734             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2735             xt.tfld = ttp##_snan_to_qnan(xt.tfld);                 \
2736         }                                                          \
2737         if (sfprf) {                                               \
2738             helper_compute_fprf_##ttp(env, xt.tfld);               \
2739         }                                                          \
2740     }                                                              \
2741                                                                    \
2742     putVSR(xT(opcode), &xt, env);                                  \
2743     float_check_status(env);                                       \
2744 }
2745 
2746 VSX_CVT_FP_TO_FP(xscvdpsp, 1, float64, float32, VsrD(0), VsrW(0), 1)
2747 VSX_CVT_FP_TO_FP(xscvspdp, 1, float32, float64, VsrW(0), VsrD(0), 1)
2748 VSX_CVT_FP_TO_FP(xvcvdpsp, 2, float64, float32, VsrD(i), VsrW(2*i), 0)
2749 VSX_CVT_FP_TO_FP(xvcvspdp, 2, float32, float64, VsrW(2*i), VsrD(i), 0)
2750 
2751 /* VSX_CVT_FP_TO_FP_VECTOR - VSX floating point/floating point conversion
2752  *   op    - instruction mnemonic
2753  *   nels  - number of elements (1, 2 or 4)
2754  *   stp   - source type (float32 or float64)
2755  *   ttp   - target type (float32 or float64)
2756  *   sfld  - source vsr_t field
2757  *   tfld  - target vsr_t field (f32 or f64)
2758  *   sfprf - set FPRF
2759  */
2760 #define VSX_CVT_FP_TO_FP_VECTOR(op, nels, stp, ttp, sfld, tfld, sfprf)    \
2761 void helper_##op(CPUPPCState *env, uint32_t opcode)                       \
2762 {                                                                       \
2763     ppc_vsr_t xt, xb;                                                   \
2764     int i;                                                              \
2765                                                                         \
2766     getVSR(rB(opcode) + 32, &xb, env);                                  \
2767     getVSR(rD(opcode) + 32, &xt, env);                                  \
2768                                                                         \
2769     for (i = 0; i < nels; i++) {                                        \
2770         xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status);             \
2771         if (unlikely(stp##_is_signaling_nan(xb.sfld,                    \
2772                                             &env->fp_status))) {        \
2773             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);      \
2774             xt.tfld = ttp##_snan_to_qnan(xt.tfld);                      \
2775         }                                                               \
2776         if (sfprf) {                                                    \
2777             helper_compute_fprf_##ttp(env, xt.tfld);                    \
2778         }                                                               \
2779     }                                                                   \
2780                                                                         \
2781     putVSR(rD(opcode) + 32, &xt, env);                                  \
2782     float_check_status(env);                                            \
2783 }
2784 
2785 VSX_CVT_FP_TO_FP_VECTOR(xscvdpqp, 1, float64, float128, VsrD(0), f128, 1)
2786 
2787 /* VSX_CVT_FP_TO_FP_HP - VSX floating point/floating point conversion
2788  *                       involving one half precision value
2789  *   op    - instruction mnemonic
2790  *   nels  - number of elements (1, 2 or 4)
2791  *   stp   - source type
2792  *   ttp   - target type
2793  *   sfld  - source vsr_t field
2794  *   tfld  - target vsr_t field
2795  *   sfprf - set FPRF
2796  */
2797 #define VSX_CVT_FP_TO_FP_HP(op, nels, stp, ttp, sfld, tfld, sfprf) \
2798 void helper_##op(CPUPPCState *env, uint32_t opcode)                \
2799 {                                                                  \
2800     ppc_vsr_t xt, xb;                                              \
2801     int i;                                                         \
2802                                                                    \
2803     getVSR(xB(opcode), &xb, env);                                  \
2804     memset(&xt, 0, sizeof(xt));                                    \
2805                                                                    \
2806     for (i = 0; i < nels; i++) {                                   \
2807         xt.tfld = stp##_to_##ttp(xb.sfld, 1, &env->fp_status);     \
2808         if (unlikely(stp##_is_signaling_nan(xb.sfld,               \
2809                                             &env->fp_status))) {   \
2810             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2811             xt.tfld = ttp##_snan_to_qnan(xt.tfld);                 \
2812         }                                                          \
2813         if (sfprf) {                                               \
2814             helper_compute_fprf_##ttp(env, xt.tfld);               \
2815         }                                                          \
2816     }                                                              \
2817                                                                    \
2818     putVSR(xT(opcode), &xt, env);                                  \
2819     float_check_status(env);                                       \
2820 }
2821 
2822 VSX_CVT_FP_TO_FP_HP(xscvdphp, 1, float64, float16, VsrD(0), VsrH(3), 1)
2823 VSX_CVT_FP_TO_FP_HP(xscvhpdp, 1, float16, float64, VsrH(3), VsrD(0), 1)
2824 VSX_CVT_FP_TO_FP_HP(xvcvsphp, 4, float32, float16, VsrW(i), VsrH(2 * i  + 1), 0)
2825 VSX_CVT_FP_TO_FP_HP(xvcvhpsp, 4, float16, float32, VsrH(2 * i + 1), VsrW(i), 0)
2826 
2827 /*
2828  * xscvqpdp isn't using VSX_CVT_FP_TO_FP() because xscvqpdpo will be
2829  * added to this later.
2830  */
2831 void helper_xscvqpdp(CPUPPCState *env, uint32_t opcode)
2832 {
2833     ppc_vsr_t xt, xb;
2834     float_status tstat;
2835 
2836     getVSR(rB(opcode) + 32, &xb, env);
2837     memset(&xt, 0, sizeof(xt));
2838 
2839     tstat = env->fp_status;
2840     if (unlikely(Rc(opcode) != 0)) {
2841         tstat.float_rounding_mode = float_round_to_odd;
2842     }
2843 
2844     xt.VsrD(0) = float128_to_float64(xb.f128, &tstat);
2845     env->fp_status.float_exception_flags |= tstat.float_exception_flags;
2846     if (unlikely(float128_is_signaling_nan(xb.f128,
2847                                            &tstat))) {
2848         float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);
2849         xt.VsrD(0) = float64_snan_to_qnan(xt.VsrD(0));
2850     }
2851     helper_compute_fprf_float64(env, xt.VsrD(0));
2852 
2853     putVSR(rD(opcode) + 32, &xt, env);
2854     float_check_status(env);
2855 }
2856 
2857 uint64_t helper_xscvdpspn(CPUPPCState *env, uint64_t xb)
2858 {
2859     float_status tstat = env->fp_status;
2860     set_float_exception_flags(0, &tstat);
2861 
2862     return (uint64_t)float64_to_float32(xb, &tstat) << 32;
2863 }
2864 
2865 uint64_t helper_xscvspdpn(CPUPPCState *env, uint64_t xb)
2866 {
2867     float_status tstat = env->fp_status;
2868     set_float_exception_flags(0, &tstat);
2869 
2870     return float32_to_float64(xb >> 32, &tstat);
2871 }
2872 
2873 /* VSX_CVT_FP_TO_INT - VSX floating point to integer conversion
2874  *   op    - instruction mnemonic
2875  *   nels  - number of elements (1, 2 or 4)
2876  *   stp   - source type (float32 or float64)
2877  *   ttp   - target type (int32, uint32, int64 or uint64)
2878  *   sfld  - source vsr_t field
2879  *   tfld  - target vsr_t field
2880  *   rnan  - resulting NaN
2881  */
2882 #define VSX_CVT_FP_TO_INT(op, nels, stp, ttp, sfld, tfld, rnan)              \
2883 void helper_##op(CPUPPCState *env, uint32_t opcode)                          \
2884 {                                                                            \
2885     ppc_vsr_t xt, xb;                                                        \
2886     int i;                                                                   \
2887                                                                              \
2888     getVSR(xB(opcode), &xb, env);                                            \
2889     getVSR(xT(opcode), &xt, env);                                            \
2890                                                                              \
2891     for (i = 0; i < nels; i++) {                                             \
2892         if (unlikely(stp##_is_any_nan(xb.sfld))) {                           \
2893             if (stp##_is_signaling_nan(xb.sfld, &env->fp_status)) {          \
2894                 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);       \
2895             }                                                                \
2896             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0);            \
2897             xt.tfld = rnan;                                                  \
2898         } else {                                                             \
2899             xt.tfld = stp##_to_##ttp##_round_to_zero(xb.sfld,                \
2900                           &env->fp_status);                                  \
2901             if (env->fp_status.float_exception_flags & float_flag_invalid) { \
2902                 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0);        \
2903             }                                                                \
2904         }                                                                    \
2905     }                                                                        \
2906                                                                              \
2907     putVSR(xT(opcode), &xt, env);                                            \
2908     float_check_status(env);                                                 \
2909 }
2910 
2911 VSX_CVT_FP_TO_INT(xscvdpsxds, 1, float64, int64, VsrD(0), VsrD(0), \
2912                   0x8000000000000000ULL)
2913 VSX_CVT_FP_TO_INT(xscvdpsxws, 1, float64, int32, VsrD(0), VsrW(1), \
2914                   0x80000000U)
2915 VSX_CVT_FP_TO_INT(xscvdpuxds, 1, float64, uint64, VsrD(0), VsrD(0), 0ULL)
2916 VSX_CVT_FP_TO_INT(xscvdpuxws, 1, float64, uint32, VsrD(0), VsrW(1), 0U)
2917 VSX_CVT_FP_TO_INT(xvcvdpsxds, 2, float64, int64, VsrD(i), VsrD(i), \
2918                   0x8000000000000000ULL)
2919 VSX_CVT_FP_TO_INT(xvcvdpsxws, 2, float64, int32, VsrD(i), VsrW(2*i), \
2920                   0x80000000U)
2921 VSX_CVT_FP_TO_INT(xvcvdpuxds, 2, float64, uint64, VsrD(i), VsrD(i), 0ULL)
2922 VSX_CVT_FP_TO_INT(xvcvdpuxws, 2, float64, uint32, VsrD(i), VsrW(2*i), 0U)
2923 VSX_CVT_FP_TO_INT(xvcvspsxds, 2, float32, int64, VsrW(2*i), VsrD(i), \
2924                   0x8000000000000000ULL)
2925 VSX_CVT_FP_TO_INT(xvcvspsxws, 4, float32, int32, VsrW(i), VsrW(i), 0x80000000U)
2926 VSX_CVT_FP_TO_INT(xvcvspuxds, 2, float32, uint64, VsrW(2*i), VsrD(i), 0ULL)
2927 VSX_CVT_FP_TO_INT(xvcvspuxws, 4, float32, uint32, VsrW(i), VsrW(i), 0U)
2928 
2929 /* VSX_CVT_FP_TO_INT_VECTOR - VSX floating point to integer conversion
2930  *   op    - instruction mnemonic
2931  *   stp   - source type (float32 or float64)
2932  *   ttp   - target type (int32, uint32, int64 or uint64)
2933  *   sfld  - source vsr_t field
2934  *   tfld  - target vsr_t field
2935  *   rnan  - resulting NaN
2936  */
2937 #define VSX_CVT_FP_TO_INT_VECTOR(op, stp, ttp, sfld, tfld, rnan)             \
2938 void helper_##op(CPUPPCState *env, uint32_t opcode)                          \
2939 {                                                                            \
2940     ppc_vsr_t xt, xb;                                                        \
2941                                                                              \
2942     getVSR(rB(opcode) + 32, &xb, env);                                       \
2943     memset(&xt, 0, sizeof(xt));                                              \
2944                                                                              \
2945     if (unlikely(stp##_is_any_nan(xb.sfld))) {                               \
2946         if (stp##_is_signaling_nan(xb.sfld, &env->fp_status)) {              \
2947             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);           \
2948         }                                                                    \
2949         float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0);                \
2950         xt.tfld = rnan;                                                      \
2951     } else {                                                                 \
2952         xt.tfld = stp##_to_##ttp##_round_to_zero(xb.sfld,                    \
2953                       &env->fp_status);                                      \
2954         if (env->fp_status.float_exception_flags & float_flag_invalid) {     \
2955             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0);            \
2956         }                                                                    \
2957     }                                                                        \
2958                                                                              \
2959     putVSR(rD(opcode) + 32, &xt, env);                                       \
2960     float_check_status(env);                                                 \
2961 }
2962 
2963 VSX_CVT_FP_TO_INT_VECTOR(xscvqpsdz, float128, int64, f128, VsrD(0),          \
2964                   0x8000000000000000ULL)
2965 
2966 VSX_CVT_FP_TO_INT_VECTOR(xscvqpswz, float128, int32, f128, VsrD(0),          \
2967                   0xffffffff80000000ULL)
2968 VSX_CVT_FP_TO_INT_VECTOR(xscvqpudz, float128, uint64, f128, VsrD(0), 0x0ULL)
2969 VSX_CVT_FP_TO_INT_VECTOR(xscvqpuwz, float128, uint32, f128, VsrD(0), 0x0ULL)
2970 
2971 /* VSX_CVT_INT_TO_FP - VSX integer to floating point conversion
2972  *   op    - instruction mnemonic
2973  *   nels  - number of elements (1, 2 or 4)
2974  *   stp   - source type (int32, uint32, int64 or uint64)
2975  *   ttp   - target type (float32 or float64)
2976  *   sfld  - source vsr_t field
2977  *   tfld  - target vsr_t field
2978  *   jdef  - definition of the j index (i or 2*i)
2979  *   sfprf - set FPRF
2980  */
2981 #define VSX_CVT_INT_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf, r2sp)  \
2982 void helper_##op(CPUPPCState *env, uint32_t opcode)                     \
2983 {                                                                       \
2984     ppc_vsr_t xt, xb;                                                   \
2985     int i;                                                              \
2986                                                                         \
2987     getVSR(xB(opcode), &xb, env);                                       \
2988     getVSR(xT(opcode), &xt, env);                                       \
2989                                                                         \
2990     for (i = 0; i < nels; i++) {                                        \
2991         xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status);             \
2992         if (r2sp) {                                                     \
2993             xt.tfld = helper_frsp(env, xt.tfld);                        \
2994         }                                                               \
2995         if (sfprf) {                                                    \
2996             helper_compute_fprf_float64(env, xt.tfld);                  \
2997         }                                                               \
2998     }                                                                   \
2999                                                                         \
3000     putVSR(xT(opcode), &xt, env);                                       \
3001     float_check_status(env);                                            \
3002 }
3003 
3004 VSX_CVT_INT_TO_FP(xscvsxddp, 1, int64, float64, VsrD(0), VsrD(0), 1, 0)
3005 VSX_CVT_INT_TO_FP(xscvuxddp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 0)
3006 VSX_CVT_INT_TO_FP(xscvsxdsp, 1, int64, float64, VsrD(0), VsrD(0), 1, 1)
3007 VSX_CVT_INT_TO_FP(xscvuxdsp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 1)
3008 VSX_CVT_INT_TO_FP(xvcvsxddp, 2, int64, float64, VsrD(i), VsrD(i), 0, 0)
3009 VSX_CVT_INT_TO_FP(xvcvuxddp, 2, uint64, float64, VsrD(i), VsrD(i), 0, 0)
3010 VSX_CVT_INT_TO_FP(xvcvsxwdp, 2, int32, float64, VsrW(2*i), VsrD(i), 0, 0)
3011 VSX_CVT_INT_TO_FP(xvcvuxwdp, 2, uint64, float64, VsrW(2*i), VsrD(i), 0, 0)
3012 VSX_CVT_INT_TO_FP(xvcvsxdsp, 2, int64, float32, VsrD(i), VsrW(2*i), 0, 0)
3013 VSX_CVT_INT_TO_FP(xvcvuxdsp, 2, uint64, float32, VsrD(i), VsrW(2*i), 0, 0)
3014 VSX_CVT_INT_TO_FP(xvcvsxwsp, 4, int32, float32, VsrW(i), VsrW(i), 0, 0)
3015 VSX_CVT_INT_TO_FP(xvcvuxwsp, 4, uint32, float32, VsrW(i), VsrW(i), 0, 0)
3016 
3017 /* VSX_CVT_INT_TO_FP_VECTOR - VSX integer to floating point conversion
3018  *   op    - instruction mnemonic
3019  *   stp   - source type (int32, uint32, int64 or uint64)
3020  *   ttp   - target type (float32 or float64)
3021  *   sfld  - source vsr_t field
3022  *   tfld  - target vsr_t field
3023  */
3024 #define VSX_CVT_INT_TO_FP_VECTOR(op, stp, ttp, sfld, tfld)              \
3025 void helper_##op(CPUPPCState *env, uint32_t opcode)                     \
3026 {                                                                       \
3027     ppc_vsr_t xt, xb;                                                   \
3028                                                                         \
3029     getVSR(rB(opcode) + 32, &xb, env);                                  \
3030     getVSR(rD(opcode) + 32, &xt, env);                                  \
3031                                                                         \
3032     xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status);                 \
3033     helper_compute_fprf_##ttp(env, xt.tfld);                            \
3034                                                                         \
3035     putVSR(xT(opcode) + 32, &xt, env);                                  \
3036     float_check_status(env);                                            \
3037 }
3038 
3039 VSX_CVT_INT_TO_FP_VECTOR(xscvsdqp, int64, float128, VsrD(0), f128)
3040 VSX_CVT_INT_TO_FP_VECTOR(xscvudqp, uint64, float128, VsrD(0), f128)
3041 
3042 /* For "use current rounding mode", define a value that will not be one of
3043  * the existing rounding model enums.
3044  */
3045 #define FLOAT_ROUND_CURRENT (float_round_nearest_even + float_round_down + \
3046   float_round_up + float_round_to_zero)
3047 
3048 /* VSX_ROUND - VSX floating point round
3049  *   op    - instruction mnemonic
3050  *   nels  - number of elements (1, 2 or 4)
3051  *   tp    - type (float32 or float64)
3052  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
3053  *   rmode - rounding mode
3054  *   sfprf - set FPRF
3055  */
3056 #define VSX_ROUND(op, nels, tp, fld, rmode, sfprf)                     \
3057 void helper_##op(CPUPPCState *env, uint32_t opcode)                    \
3058 {                                                                      \
3059     ppc_vsr_t xt, xb;                                                  \
3060     int i;                                                             \
3061     getVSR(xB(opcode), &xb, env);                                      \
3062     getVSR(xT(opcode), &xt, env);                                      \
3063                                                                        \
3064     if (rmode != FLOAT_ROUND_CURRENT) {                                \
3065         set_float_rounding_mode(rmode, &env->fp_status);               \
3066     }                                                                  \
3067                                                                        \
3068     for (i = 0; i < nels; i++) {                                       \
3069         if (unlikely(tp##_is_signaling_nan(xb.fld,                     \
3070                                            &env->fp_status))) {        \
3071             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);     \
3072             xt.fld = tp##_snan_to_qnan(xb.fld);                        \
3073         } else {                                                       \
3074             xt.fld = tp##_round_to_int(xb.fld, &env->fp_status);       \
3075         }                                                              \
3076         if (sfprf) {                                                   \
3077             helper_compute_fprf_float64(env, xt.fld);                  \
3078         }                                                              \
3079     }                                                                  \
3080                                                                        \
3081     /* If this is not a "use current rounding mode" instruction,       \
3082      * then inhibit setting of the XX bit and restore rounding         \
3083      * mode from FPSCR */                                              \
3084     if (rmode != FLOAT_ROUND_CURRENT) {                                \
3085         fpscr_set_rounding_mode(env);                                  \
3086         env->fp_status.float_exception_flags &= ~float_flag_inexact;   \
3087     }                                                                  \
3088                                                                        \
3089     putVSR(xT(opcode), &xt, env);                                      \
3090     float_check_status(env);                                           \
3091 }
3092 
3093 VSX_ROUND(xsrdpi, 1, float64, VsrD(0), float_round_ties_away, 1)
3094 VSX_ROUND(xsrdpic, 1, float64, VsrD(0), FLOAT_ROUND_CURRENT, 1)
3095 VSX_ROUND(xsrdpim, 1, float64, VsrD(0), float_round_down, 1)
3096 VSX_ROUND(xsrdpip, 1, float64, VsrD(0), float_round_up, 1)
3097 VSX_ROUND(xsrdpiz, 1, float64, VsrD(0), float_round_to_zero, 1)
3098 
3099 VSX_ROUND(xvrdpi, 2, float64, VsrD(i), float_round_ties_away, 0)
3100 VSX_ROUND(xvrdpic, 2, float64, VsrD(i), FLOAT_ROUND_CURRENT, 0)
3101 VSX_ROUND(xvrdpim, 2, float64, VsrD(i), float_round_down, 0)
3102 VSX_ROUND(xvrdpip, 2, float64, VsrD(i), float_round_up, 0)
3103 VSX_ROUND(xvrdpiz, 2, float64, VsrD(i), float_round_to_zero, 0)
3104 
3105 VSX_ROUND(xvrspi, 4, float32, VsrW(i), float_round_ties_away, 0)
3106 VSX_ROUND(xvrspic, 4, float32, VsrW(i), FLOAT_ROUND_CURRENT, 0)
3107 VSX_ROUND(xvrspim, 4, float32, VsrW(i), float_round_down, 0)
3108 VSX_ROUND(xvrspip, 4, float32, VsrW(i), float_round_up, 0)
3109 VSX_ROUND(xvrspiz, 4, float32, VsrW(i), float_round_to_zero, 0)
3110 
3111 uint64_t helper_xsrsp(CPUPPCState *env, uint64_t xb)
3112 {
3113     helper_reset_fpstatus(env);
3114 
3115     uint64_t xt = helper_frsp(env, xb);
3116 
3117     helper_compute_fprf_float64(env, xt);
3118     float_check_status(env);
3119     return xt;
3120 }
3121 
3122 #define VSX_XXPERM(op, indexed)                                       \
3123 void helper_##op(CPUPPCState *env, uint32_t opcode)                   \
3124 {                                                                     \
3125     ppc_vsr_t xt, xa, pcv, xto;                                       \
3126     int i, idx;                                                       \
3127                                                                       \
3128     getVSR(xA(opcode), &xa, env);                                     \
3129     getVSR(xT(opcode), &xt, env);                                     \
3130     getVSR(xB(opcode), &pcv, env);                                    \
3131                                                                       \
3132     for (i = 0; i < 16; i++) {                                        \
3133         idx = pcv.VsrB(i) & 0x1F;                                     \
3134         if (indexed) {                                                \
3135             idx = 31 - idx;                                           \
3136         }                                                             \
3137         xto.VsrB(i) = (idx <= 15) ? xa.VsrB(idx) : xt.VsrB(idx - 16); \
3138     }                                                                 \
3139     putVSR(xT(opcode), &xto, env);                                    \
3140 }
3141 
3142 VSX_XXPERM(xxperm, 0)
3143 VSX_XXPERM(xxpermr, 1)
3144 
3145 void helper_xvxsigsp(CPUPPCState *env, uint32_t opcode)
3146 {
3147     ppc_vsr_t xt, xb;
3148     uint32_t exp, i, fraction;
3149 
3150     getVSR(xB(opcode), &xb, env);
3151     memset(&xt, 0, sizeof(xt));
3152 
3153     for (i = 0; i < 4; i++) {
3154         exp = (xb.VsrW(i) >> 23) & 0xFF;
3155         fraction = xb.VsrW(i) & 0x7FFFFF;
3156         if (exp != 0 && exp != 255) {
3157             xt.VsrW(i) = fraction | 0x00800000;
3158         } else {
3159             xt.VsrW(i) = fraction;
3160         }
3161     }
3162     putVSR(xT(opcode), &xt, env);
3163 }
3164 
3165 /* VSX_TEST_DC - VSX floating point test data class
3166  *   op    - instruction mnemonic
3167  *   nels  - number of elements (1, 2 or 4)
3168  *   xbn   - VSR register number
3169  *   tp    - type (float32 or float64)
3170  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
3171  *   tfld   - target vsr_t field (VsrD(*) or VsrW(*))
3172  *   fld_max - target field max
3173  *   scrf - set result in CR and FPCC
3174  */
3175 #define VSX_TEST_DC(op, nels, xbn, tp, fld, tfld, fld_max, scrf)  \
3176 void helper_##op(CPUPPCState *env, uint32_t opcode)         \
3177 {                                                           \
3178     ppc_vsr_t xt, xb;                                       \
3179     uint32_t i, sign, dcmx;                                 \
3180     uint32_t cc, match = 0;                                 \
3181                                                             \
3182     getVSR(xbn, &xb, env);                                  \
3183     if (!scrf) {                                            \
3184         memset(&xt, 0, sizeof(xt));                         \
3185         dcmx = DCMX_XV(opcode);                             \
3186     } else {                                                \
3187         dcmx = DCMX(opcode);                                \
3188     }                                                       \
3189                                                             \
3190     for (i = 0; i < nels; i++) {                            \
3191         sign = tp##_is_neg(xb.fld);                         \
3192         if (tp##_is_any_nan(xb.fld)) {                      \
3193             match = extract32(dcmx, 6, 1);                  \
3194         } else if (tp##_is_infinity(xb.fld)) {              \
3195             match = extract32(dcmx, 4 + !sign, 1);          \
3196         } else if (tp##_is_zero(xb.fld)) {                  \
3197             match = extract32(dcmx, 2 + !sign, 1);          \
3198         } else if (tp##_is_zero_or_denormal(xb.fld)) {      \
3199             match = extract32(dcmx, 0 + !sign, 1);          \
3200         }                                                   \
3201                                                             \
3202         if (scrf) {                                         \
3203             cc = sign << CRF_LT_BIT | match << CRF_EQ_BIT;  \
3204             env->fpscr &= ~(0x0F << FPSCR_FPRF);            \
3205             env->fpscr |= cc << FPSCR_FPRF;                 \
3206             env->crf[BF(opcode)] = cc;                      \
3207         } else {                                            \
3208             xt.tfld = match ? fld_max : 0;                  \
3209         }                                                   \
3210         match = 0;                                          \
3211     }                                                       \
3212     if (!scrf) {                                            \
3213         putVSR(xT(opcode), &xt, env);                       \
3214     }                                                       \
3215 }
3216 
3217 VSX_TEST_DC(xvtstdcdp, 2, xB(opcode), float64, VsrD(i), VsrD(i), UINT64_MAX, 0)
3218 VSX_TEST_DC(xvtstdcsp, 4, xB(opcode), float32, VsrW(i), VsrW(i), UINT32_MAX, 0)
3219 VSX_TEST_DC(xststdcdp, 1, xB(opcode), float64, VsrD(0), VsrD(0), 0, 1)
3220 VSX_TEST_DC(xststdcqp, 1, (rB(opcode) + 32), float128, f128, VsrD(0), 0, 1)
3221 
3222 void helper_xststdcsp(CPUPPCState *env, uint32_t opcode)
3223 {
3224     ppc_vsr_t xb;
3225     uint32_t dcmx, sign, exp;
3226     uint32_t cc, match = 0, not_sp = 0;
3227 
3228     getVSR(xB(opcode), &xb, env);
3229     dcmx = DCMX(opcode);
3230     exp = (xb.VsrD(0) >> 52) & 0x7FF;
3231 
3232     sign = float64_is_neg(xb.VsrD(0));
3233     if (float64_is_any_nan(xb.VsrD(0))) {
3234         match = extract32(dcmx, 6, 1);
3235     } else if (float64_is_infinity(xb.VsrD(0))) {
3236         match = extract32(dcmx, 4 + !sign, 1);
3237     } else if (float64_is_zero(xb.VsrD(0))) {
3238         match = extract32(dcmx, 2 + !sign, 1);
3239     } else if (float64_is_zero_or_denormal(xb.VsrD(0)) ||
3240                (exp > 0 && exp < 0x381)) {
3241         match = extract32(dcmx, 0 + !sign, 1);
3242     }
3243 
3244     not_sp = !float64_eq(xb.VsrD(0),
3245                          float32_to_float64(
3246                              float64_to_float32(xb.VsrD(0), &env->fp_status),
3247                              &env->fp_status), &env->fp_status);
3248 
3249     cc = sign << CRF_LT_BIT | match << CRF_EQ_BIT | not_sp << CRF_SO_BIT;
3250     env->fpscr &= ~(0x0F << FPSCR_FPRF);
3251     env->fpscr |= cc << FPSCR_FPRF;
3252     env->crf[BF(opcode)] = cc;
3253 }
3254 
3255 void helper_xsrqpi(CPUPPCState *env, uint32_t opcode)
3256 {
3257     ppc_vsr_t xb;
3258     ppc_vsr_t xt;
3259     uint8_t r = Rrm(opcode);
3260     uint8_t ex = Rc(opcode);
3261     uint8_t rmc = RMC(opcode);
3262     uint8_t rmode = 0;
3263     float_status tstat;
3264 
3265     getVSR(rB(opcode) + 32, &xb, env);
3266     memset(&xt, 0, sizeof(xt));
3267     helper_reset_fpstatus(env);
3268 
3269     if (r == 0 && rmc == 0) {
3270         rmode = float_round_ties_away;
3271     } else if (r == 0 && rmc == 0x3) {
3272         rmode = fpscr_rn;
3273     } else if (r == 1) {
3274         switch (rmc) {
3275         case 0:
3276             rmode = float_round_nearest_even;
3277             break;
3278         case 1:
3279             rmode = float_round_to_zero;
3280             break;
3281         case 2:
3282             rmode = float_round_up;
3283             break;
3284         case 3:
3285             rmode = float_round_down;
3286             break;
3287         default:
3288             abort();
3289         }
3290     }
3291 
3292     tstat = env->fp_status;
3293     set_float_exception_flags(0, &tstat);
3294     set_float_rounding_mode(rmode, &tstat);
3295     xt.f128 = float128_round_to_int(xb.f128, &tstat);
3296     env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3297 
3298     if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
3299         if (float128_is_signaling_nan(xb.f128, &tstat)) {
3300             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);
3301             xt.f128 = float128_snan_to_qnan(xt.f128);
3302         }
3303     }
3304 
3305     if (ex == 0 && (tstat.float_exception_flags & float_flag_inexact)) {
3306         env->fp_status.float_exception_flags &= ~float_flag_inexact;
3307     }
3308 
3309     helper_compute_fprf_float128(env, xt.f128);
3310     float_check_status(env);
3311     putVSR(rD(opcode) + 32, &xt, env);
3312 }
3313 
3314 void helper_xsrqpxp(CPUPPCState *env, uint32_t opcode)
3315 {
3316     ppc_vsr_t xb;
3317     ppc_vsr_t xt;
3318     uint8_t r = Rrm(opcode);
3319     uint8_t rmc = RMC(opcode);
3320     uint8_t rmode = 0;
3321     floatx80 round_res;
3322     float_status tstat;
3323 
3324     getVSR(rB(opcode) + 32, &xb, env);
3325     memset(&xt, 0, sizeof(xt));
3326     helper_reset_fpstatus(env);
3327 
3328     if (r == 0 && rmc == 0) {
3329         rmode = float_round_ties_away;
3330     } else if (r == 0 && rmc == 0x3) {
3331         rmode = fpscr_rn;
3332     } else if (r == 1) {
3333         switch (rmc) {
3334         case 0:
3335             rmode = float_round_nearest_even;
3336             break;
3337         case 1:
3338             rmode = float_round_to_zero;
3339             break;
3340         case 2:
3341             rmode = float_round_up;
3342             break;
3343         case 3:
3344             rmode = float_round_down;
3345             break;
3346         default:
3347             abort();
3348         }
3349     }
3350 
3351     tstat = env->fp_status;
3352     set_float_exception_flags(0, &tstat);
3353     set_float_rounding_mode(rmode, &tstat);
3354     round_res = float128_to_floatx80(xb.f128, &tstat);
3355     xt.f128 = floatx80_to_float128(round_res, &tstat);
3356     env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3357 
3358     if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
3359         if (float128_is_signaling_nan(xb.f128, &tstat)) {
3360             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);
3361             xt.f128 = float128_snan_to_qnan(xt.f128);
3362         }
3363     }
3364 
3365     helper_compute_fprf_float128(env, xt.f128);
3366     putVSR(rD(opcode) + 32, &xt, env);
3367     float_check_status(env);
3368 }
3369 
3370 void helper_xssqrtqp(CPUPPCState *env, uint32_t opcode)
3371 {
3372     ppc_vsr_t xb;
3373     ppc_vsr_t xt;
3374     float_status tstat;
3375 
3376     getVSR(rB(opcode) + 32, &xb, env);
3377     memset(&xt, 0, sizeof(xt));
3378     helper_reset_fpstatus(env);
3379 
3380     tstat = env->fp_status;
3381     if (unlikely(Rc(opcode) != 0)) {
3382         tstat.float_rounding_mode = float_round_to_odd;
3383     }
3384 
3385     set_float_exception_flags(0, &tstat);
3386     xt.f128 = float128_sqrt(xb.f128, &tstat);
3387     env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3388 
3389     if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
3390         if (float128_is_signaling_nan(xb.f128, &tstat)) {
3391             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
3392             xt.f128 = float128_snan_to_qnan(xb.f128);
3393         } else if  (float128_is_quiet_nan(xb.f128, &tstat)) {
3394             xt.f128 = xb.f128;
3395         } else if (float128_is_neg(xb.f128) && !float128_is_zero(xb.f128)) {
3396             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, 1);
3397             set_snan_bit_is_one(0, &env->fp_status);
3398             xt.f128 = float128_default_nan(&env->fp_status);
3399         }
3400     }
3401 
3402     helper_compute_fprf_float128(env, xt.f128);
3403     putVSR(rD(opcode) + 32, &xt, env);
3404     float_check_status(env);
3405 }
3406 
3407 void helper_xssubqp(CPUPPCState *env, uint32_t opcode)
3408 {
3409     ppc_vsr_t xt, xa, xb;
3410     float_status tstat;
3411 
3412     getVSR(rA(opcode) + 32, &xa, env);
3413     getVSR(rB(opcode) + 32, &xb, env);
3414     getVSR(rD(opcode) + 32, &xt, env);
3415     helper_reset_fpstatus(env);
3416 
3417     tstat = env->fp_status;
3418     if (unlikely(Rc(opcode) != 0)) {
3419         tstat.float_rounding_mode = float_round_to_odd;
3420     }
3421 
3422     set_float_exception_flags(0, &tstat);
3423     xt.f128 = float128_sub(xa.f128, xb.f128, &tstat);
3424     env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3425 
3426     if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
3427         if (float128_is_infinity(xa.f128) && float128_is_infinity(xb.f128)) {
3428             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
3429         } else if (float128_is_signaling_nan(xa.f128, &tstat) ||
3430                    float128_is_signaling_nan(xb.f128, &tstat)) {
3431             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
3432         }
3433     }
3434 
3435     helper_compute_fprf_float128(env, xt.f128);
3436     putVSR(rD(opcode) + 32, &xt, env);
3437     float_check_status(env);
3438 }
3439