xref: /openbmc/qemu/target/ppc/fpu_helper.c (revision 3d69b95e5e2ef4c5acfa2b2aaa93e2fad9eeeaa1)
1 /*
2  *  PowerPC floating point and SPE emulation helpers for QEMU.
3  *
4  *  Copyright (c) 2003-2007 Jocelyn Mayer
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 #include "cpu.h"
21 #include "exec/helper-proto.h"
22 #include "exec/exec-all.h"
23 #include "internal.h"
24 #include "fpu/softfloat.h"
25 
26 static inline float128 float128_snan_to_qnan(float128 x)
27 {
28     float128 r;
29 
30     r.high = x.high | 0x0000800000000000;
31     r.low = x.low;
32     return r;
33 }
34 
35 #define float64_snan_to_qnan(x) ((x) | 0x0008000000000000ULL)
36 #define float32_snan_to_qnan(x) ((x) | 0x00400000)
37 #define float16_snan_to_qnan(x) ((x) | 0x0200)
38 
39 /*****************************************************************************/
40 /* Floating point operations helpers */
41 uint64_t helper_float32_to_float64(CPUPPCState *env, uint32_t arg)
42 {
43     CPU_FloatU f;
44     CPU_DoubleU d;
45 
46     f.l = arg;
47     d.d = float32_to_float64(f.f, &env->fp_status);
48     return d.ll;
49 }
50 
51 uint32_t helper_float64_to_float32(CPUPPCState *env, uint64_t arg)
52 {
53     CPU_FloatU f;
54     CPU_DoubleU d;
55 
56     d.ll = arg;
57     f.f = float64_to_float32(d.d, &env->fp_status);
58     return f.l;
59 }
60 
61 static inline int ppc_float32_get_unbiased_exp(float32 f)
62 {
63     return ((f >> 23) & 0xFF) - 127;
64 }
65 
66 static inline int ppc_float64_get_unbiased_exp(float64 f)
67 {
68     return ((f >> 52) & 0x7FF) - 1023;
69 }
70 
71 #define COMPUTE_FPRF(tp)                                       \
72 void helper_compute_fprf_##tp(CPUPPCState *env, tp arg)        \
73 {                                                              \
74     int isneg;                                                 \
75     int fprf;                                                  \
76                                                                \
77     isneg = tp##_is_neg(arg);                                  \
78     if (unlikely(tp##_is_any_nan(arg))) {                      \
79         if (tp##_is_signaling_nan(arg, &env->fp_status)) {     \
80             /* Signaling NaN: flags are undefined */           \
81             fprf = 0x00;                                       \
82         } else {                                               \
83             /* Quiet NaN */                                    \
84             fprf = 0x11;                                       \
85         }                                                      \
86     } else if (unlikely(tp##_is_infinity(arg))) {              \
87         /* +/- infinity */                                     \
88         if (isneg) {                                           \
89             fprf = 0x09;                                       \
90         } else {                                               \
91             fprf = 0x05;                                       \
92         }                                                      \
93     } else {                                                   \
94         if (tp##_is_zero(arg)) {                               \
95             /* +/- zero */                                     \
96             if (isneg) {                                       \
97                 fprf = 0x12;                                   \
98             } else {                                           \
99                 fprf = 0x02;                                   \
100             }                                                  \
101         } else {                                               \
102             if (tp##_is_zero_or_denormal(arg)) {               \
103                 /* Denormalized numbers */                     \
104                 fprf = 0x10;                                   \
105             } else {                                           \
106                 /* Normalized numbers */                       \
107                 fprf = 0x00;                                   \
108             }                                                  \
109             if (isneg) {                                       \
110                 fprf |= 0x08;                                  \
111             } else {                                           \
112                 fprf |= 0x04;                                  \
113             }                                                  \
114         }                                                      \
115     }                                                          \
116     /* We update FPSCR_FPRF */                                 \
117     env->fpscr &= ~(0x1F << FPSCR_FPRF);                       \
118     env->fpscr |= fprf << FPSCR_FPRF;                          \
119 }
120 
121 COMPUTE_FPRF(float16)
122 COMPUTE_FPRF(float32)
123 COMPUTE_FPRF(float64)
124 COMPUTE_FPRF(float128)
125 
126 /* Floating-point invalid operations exception */
127 static inline __attribute__((__always_inline__))
128 uint64_t float_invalid_op_excp(CPUPPCState *env, int op, int set_fpcc)
129 {
130     CPUState *cs = CPU(ppc_env_get_cpu(env));
131     uint64_t ret = 0;
132     int ve;
133 
134     ve = fpscr_ve;
135     switch (op) {
136     case POWERPC_EXCP_FP_VXSNAN:
137         env->fpscr |= 1 << FPSCR_VXSNAN;
138         break;
139     case POWERPC_EXCP_FP_VXSOFT:
140         env->fpscr |= 1 << FPSCR_VXSOFT;
141         break;
142     case POWERPC_EXCP_FP_VXISI:
143         /* Magnitude subtraction of infinities */
144         env->fpscr |= 1 << FPSCR_VXISI;
145         goto update_arith;
146     case POWERPC_EXCP_FP_VXIDI:
147         /* Division of infinity by infinity */
148         env->fpscr |= 1 << FPSCR_VXIDI;
149         goto update_arith;
150     case POWERPC_EXCP_FP_VXZDZ:
151         /* Division of zero by zero */
152         env->fpscr |= 1 << FPSCR_VXZDZ;
153         goto update_arith;
154     case POWERPC_EXCP_FP_VXIMZ:
155         /* Multiplication of zero by infinity */
156         env->fpscr |= 1 << FPSCR_VXIMZ;
157         goto update_arith;
158     case POWERPC_EXCP_FP_VXVC:
159         /* Ordered comparison of NaN */
160         env->fpscr |= 1 << FPSCR_VXVC;
161         if (set_fpcc) {
162             env->fpscr &= ~(0xF << FPSCR_FPCC);
163             env->fpscr |= 0x11 << FPSCR_FPCC;
164         }
165         /* We must update the target FPR before raising the exception */
166         if (ve != 0) {
167             cs->exception_index = POWERPC_EXCP_PROGRAM;
168             env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
169             /* Update the floating-point enabled exception summary */
170             env->fpscr |= 1 << FPSCR_FEX;
171             /* Exception is differed */
172             ve = 0;
173         }
174         break;
175     case POWERPC_EXCP_FP_VXSQRT:
176         /* Square root of a negative number */
177         env->fpscr |= 1 << FPSCR_VXSQRT;
178     update_arith:
179         env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
180         if (ve == 0) {
181             /* Set the result to quiet NaN */
182             ret = 0x7FF8000000000000ULL;
183             if (set_fpcc) {
184                 env->fpscr &= ~(0xF << FPSCR_FPCC);
185                 env->fpscr |= 0x11 << FPSCR_FPCC;
186             }
187         }
188         break;
189     case POWERPC_EXCP_FP_VXCVI:
190         /* Invalid conversion */
191         env->fpscr |= 1 << FPSCR_VXCVI;
192         env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
193         if (ve == 0) {
194             /* Set the result to quiet NaN */
195             ret = 0x7FF8000000000000ULL;
196             if (set_fpcc) {
197                 env->fpscr &= ~(0xF << FPSCR_FPCC);
198                 env->fpscr |= 0x11 << FPSCR_FPCC;
199             }
200         }
201         break;
202     }
203     /* Update the floating-point invalid operation summary */
204     env->fpscr |= 1 << FPSCR_VX;
205     /* Update the floating-point exception summary */
206     env->fpscr |= FP_FX;
207     if (ve != 0) {
208         /* Update the floating-point enabled exception summary */
209         env->fpscr |= 1 << FPSCR_FEX;
210         if (msr_fe0 != 0 || msr_fe1 != 0) {
211             /* GETPC() works here because this is inline */
212             raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
213                                    POWERPC_EXCP_FP | op, GETPC());
214         }
215     }
216     return ret;
217 }
218 
219 static inline void float_zero_divide_excp(CPUPPCState *env, uintptr_t raddr)
220 {
221     env->fpscr |= 1 << FPSCR_ZX;
222     env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
223     /* Update the floating-point exception summary */
224     env->fpscr |= FP_FX;
225     if (fpscr_ze != 0) {
226         /* Update the floating-point enabled exception summary */
227         env->fpscr |= 1 << FPSCR_FEX;
228         if (msr_fe0 != 0 || msr_fe1 != 0) {
229             raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
230                                    POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX,
231                                    raddr);
232         }
233     }
234 }
235 
236 static inline void float_overflow_excp(CPUPPCState *env)
237 {
238     CPUState *cs = CPU(ppc_env_get_cpu(env));
239 
240     env->fpscr |= 1 << FPSCR_OX;
241     /* Update the floating-point exception summary */
242     env->fpscr |= FP_FX;
243     if (fpscr_oe != 0) {
244         /* XXX: should adjust the result */
245         /* Update the floating-point enabled exception summary */
246         env->fpscr |= 1 << FPSCR_FEX;
247         /* We must update the target FPR before raising the exception */
248         cs->exception_index = POWERPC_EXCP_PROGRAM;
249         env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
250     } else {
251         env->fpscr |= 1 << FPSCR_XX;
252         env->fpscr |= 1 << FPSCR_FI;
253     }
254 }
255 
256 static inline void float_underflow_excp(CPUPPCState *env)
257 {
258     CPUState *cs = CPU(ppc_env_get_cpu(env));
259 
260     env->fpscr |= 1 << FPSCR_UX;
261     /* Update the floating-point exception summary */
262     env->fpscr |= FP_FX;
263     if (fpscr_ue != 0) {
264         /* XXX: should adjust the result */
265         /* Update the floating-point enabled exception summary */
266         env->fpscr |= 1 << FPSCR_FEX;
267         /* We must update the target FPR before raising the exception */
268         cs->exception_index = POWERPC_EXCP_PROGRAM;
269         env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
270     }
271 }
272 
273 static inline void float_inexact_excp(CPUPPCState *env)
274 {
275     CPUState *cs = CPU(ppc_env_get_cpu(env));
276 
277     env->fpscr |= 1 << FPSCR_XX;
278     /* Update the floating-point exception summary */
279     env->fpscr |= FP_FX;
280     if (fpscr_xe != 0) {
281         /* Update the floating-point enabled exception summary */
282         env->fpscr |= 1 << FPSCR_FEX;
283         /* We must update the target FPR before raising the exception */
284         cs->exception_index = POWERPC_EXCP_PROGRAM;
285         env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
286     }
287 }
288 
289 static inline void fpscr_set_rounding_mode(CPUPPCState *env)
290 {
291     int rnd_type;
292 
293     /* Set rounding mode */
294     switch (fpscr_rn) {
295     case 0:
296         /* Best approximation (round to nearest) */
297         rnd_type = float_round_nearest_even;
298         break;
299     case 1:
300         /* Smaller magnitude (round toward zero) */
301         rnd_type = float_round_to_zero;
302         break;
303     case 2:
304         /* Round toward +infinite */
305         rnd_type = float_round_up;
306         break;
307     default:
308     case 3:
309         /* Round toward -infinite */
310         rnd_type = float_round_down;
311         break;
312     }
313     set_float_rounding_mode(rnd_type, &env->fp_status);
314 }
315 
316 void helper_fpscr_clrbit(CPUPPCState *env, uint32_t bit)
317 {
318     int prev;
319 
320     prev = (env->fpscr >> bit) & 1;
321     env->fpscr &= ~(1 << bit);
322     if (prev == 1) {
323         switch (bit) {
324         case FPSCR_RN1:
325         case FPSCR_RN:
326             fpscr_set_rounding_mode(env);
327             break;
328         case FPSCR_VXSNAN:
329         case FPSCR_VXISI:
330         case FPSCR_VXIDI:
331         case FPSCR_VXZDZ:
332         case FPSCR_VXIMZ:
333         case FPSCR_VXVC:
334         case FPSCR_VXSOFT:
335         case FPSCR_VXSQRT:
336         case FPSCR_VXCVI:
337             if (!fpscr_ix) {
338                 /* Set VX bit to zero */
339                 env->fpscr &= ~(1 << FPSCR_VX);
340             }
341             break;
342         case FPSCR_OX:
343         case FPSCR_UX:
344         case FPSCR_ZX:
345         case FPSCR_XX:
346         case FPSCR_VE:
347         case FPSCR_OE:
348         case FPSCR_UE:
349         case FPSCR_ZE:
350         case FPSCR_XE:
351             if (!fpscr_eex) {
352                 /* Set the FEX bit */
353                 env->fpscr &= ~(1 << FPSCR_FEX);
354             }
355             break;
356         default:
357             break;
358         }
359     }
360 }
361 
362 void helper_fpscr_setbit(CPUPPCState *env, uint32_t bit)
363 {
364     CPUState *cs = CPU(ppc_env_get_cpu(env));
365     int prev;
366 
367     prev = (env->fpscr >> bit) & 1;
368     env->fpscr |= 1 << bit;
369     if (prev == 0) {
370         switch (bit) {
371         case FPSCR_VX:
372             env->fpscr |= FP_FX;
373             if (fpscr_ve) {
374                 goto raise_ve;
375             }
376             break;
377         case FPSCR_OX:
378             env->fpscr |= FP_FX;
379             if (fpscr_oe) {
380                 goto raise_oe;
381             }
382             break;
383         case FPSCR_UX:
384             env->fpscr |= FP_FX;
385             if (fpscr_ue) {
386                 goto raise_ue;
387             }
388             break;
389         case FPSCR_ZX:
390             env->fpscr |= FP_FX;
391             if (fpscr_ze) {
392                 goto raise_ze;
393             }
394             break;
395         case FPSCR_XX:
396             env->fpscr |= FP_FX;
397             if (fpscr_xe) {
398                 goto raise_xe;
399             }
400             break;
401         case FPSCR_VXSNAN:
402         case FPSCR_VXISI:
403         case FPSCR_VXIDI:
404         case FPSCR_VXZDZ:
405         case FPSCR_VXIMZ:
406         case FPSCR_VXVC:
407         case FPSCR_VXSOFT:
408         case FPSCR_VXSQRT:
409         case FPSCR_VXCVI:
410             env->fpscr |= 1 << FPSCR_VX;
411             env->fpscr |= FP_FX;
412             if (fpscr_ve != 0) {
413                 goto raise_ve;
414             }
415             break;
416         case FPSCR_VE:
417             if (fpscr_vx != 0) {
418             raise_ve:
419                 env->error_code = POWERPC_EXCP_FP;
420                 if (fpscr_vxsnan) {
421                     env->error_code |= POWERPC_EXCP_FP_VXSNAN;
422                 }
423                 if (fpscr_vxisi) {
424                     env->error_code |= POWERPC_EXCP_FP_VXISI;
425                 }
426                 if (fpscr_vxidi) {
427                     env->error_code |= POWERPC_EXCP_FP_VXIDI;
428                 }
429                 if (fpscr_vxzdz) {
430                     env->error_code |= POWERPC_EXCP_FP_VXZDZ;
431                 }
432                 if (fpscr_vximz) {
433                     env->error_code |= POWERPC_EXCP_FP_VXIMZ;
434                 }
435                 if (fpscr_vxvc) {
436                     env->error_code |= POWERPC_EXCP_FP_VXVC;
437                 }
438                 if (fpscr_vxsoft) {
439                     env->error_code |= POWERPC_EXCP_FP_VXSOFT;
440                 }
441                 if (fpscr_vxsqrt) {
442                     env->error_code |= POWERPC_EXCP_FP_VXSQRT;
443                 }
444                 if (fpscr_vxcvi) {
445                     env->error_code |= POWERPC_EXCP_FP_VXCVI;
446                 }
447                 goto raise_excp;
448             }
449             break;
450         case FPSCR_OE:
451             if (fpscr_ox != 0) {
452             raise_oe:
453                 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
454                 goto raise_excp;
455             }
456             break;
457         case FPSCR_UE:
458             if (fpscr_ux != 0) {
459             raise_ue:
460                 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
461                 goto raise_excp;
462             }
463             break;
464         case FPSCR_ZE:
465             if (fpscr_zx != 0) {
466             raise_ze:
467                 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
468                 goto raise_excp;
469             }
470             break;
471         case FPSCR_XE:
472             if (fpscr_xx != 0) {
473             raise_xe:
474                 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
475                 goto raise_excp;
476             }
477             break;
478         case FPSCR_RN1:
479         case FPSCR_RN:
480             fpscr_set_rounding_mode(env);
481             break;
482         default:
483             break;
484         raise_excp:
485             /* Update the floating-point enabled exception summary */
486             env->fpscr |= 1 << FPSCR_FEX;
487             /* We have to update Rc1 before raising the exception */
488             cs->exception_index = POWERPC_EXCP_PROGRAM;
489             break;
490         }
491     }
492 }
493 
494 void helper_store_fpscr(CPUPPCState *env, uint64_t arg, uint32_t mask)
495 {
496     CPUState *cs = CPU(ppc_env_get_cpu(env));
497     target_ulong prev, new;
498     int i;
499 
500     prev = env->fpscr;
501     new = (target_ulong)arg;
502     new &= ~0x60000000LL;
503     new |= prev & 0x60000000LL;
504     for (i = 0; i < sizeof(target_ulong) * 2; i++) {
505         if (mask & (1 << i)) {
506             env->fpscr &= ~(0xFLL << (4 * i));
507             env->fpscr |= new & (0xFLL << (4 * i));
508         }
509     }
510     /* Update VX and FEX */
511     if (fpscr_ix != 0) {
512         env->fpscr |= 1 << FPSCR_VX;
513     } else {
514         env->fpscr &= ~(1 << FPSCR_VX);
515     }
516     if ((fpscr_ex & fpscr_eex) != 0) {
517         env->fpscr |= 1 << FPSCR_FEX;
518         cs->exception_index = POWERPC_EXCP_PROGRAM;
519         /* XXX: we should compute it properly */
520         env->error_code = POWERPC_EXCP_FP;
521     } else {
522         env->fpscr &= ~(1 << FPSCR_FEX);
523     }
524     fpscr_set_rounding_mode(env);
525 }
526 
527 void store_fpscr(CPUPPCState *env, uint64_t arg, uint32_t mask)
528 {
529     helper_store_fpscr(env, arg, mask);
530 }
531 
532 static void do_float_check_status(CPUPPCState *env, uintptr_t raddr)
533 {
534     CPUState *cs = CPU(ppc_env_get_cpu(env));
535     int status = get_float_exception_flags(&env->fp_status);
536 
537     if (status & float_flag_divbyzero) {
538         float_zero_divide_excp(env, raddr);
539     } else if (status & float_flag_overflow) {
540         float_overflow_excp(env);
541     } else if (status & float_flag_underflow) {
542         float_underflow_excp(env);
543     } else if (status & float_flag_inexact) {
544         float_inexact_excp(env);
545     }
546 
547     if (cs->exception_index == POWERPC_EXCP_PROGRAM &&
548         (env->error_code & POWERPC_EXCP_FP)) {
549         /* Differred floating-point exception after target FPR update */
550         if (msr_fe0 != 0 || msr_fe1 != 0) {
551             raise_exception_err_ra(env, cs->exception_index,
552                                    env->error_code, raddr);
553         }
554     }
555 }
556 
557 static inline  __attribute__((__always_inline__))
558 void float_check_status(CPUPPCState *env)
559 {
560     /* GETPC() works here because this is inline */
561     do_float_check_status(env, GETPC());
562 }
563 
564 void helper_float_check_status(CPUPPCState *env)
565 {
566     do_float_check_status(env, GETPC());
567 }
568 
569 void helper_reset_fpstatus(CPUPPCState *env)
570 {
571     set_float_exception_flags(0, &env->fp_status);
572 }
573 
574 /* fadd - fadd. */
575 uint64_t helper_fadd(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
576 {
577     CPU_DoubleU farg1, farg2;
578 
579     farg1.ll = arg1;
580     farg2.ll = arg2;
581 
582     if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
583                  float64_is_neg(farg1.d) != float64_is_neg(farg2.d))) {
584         /* Magnitude subtraction of infinities */
585         farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
586     } else {
587         if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
588                      float64_is_signaling_nan(farg2.d, &env->fp_status))) {
589             /* sNaN addition */
590             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
591         }
592         farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
593     }
594 
595     return farg1.ll;
596 }
597 
598 /* fsub - fsub. */
599 uint64_t helper_fsub(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
600 {
601     CPU_DoubleU farg1, farg2;
602 
603     farg1.ll = arg1;
604     farg2.ll = arg2;
605 
606     if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
607                  float64_is_neg(farg1.d) == float64_is_neg(farg2.d))) {
608         /* Magnitude subtraction of infinities */
609         farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
610     } else {
611         if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
612                      float64_is_signaling_nan(farg2.d, &env->fp_status))) {
613             /* sNaN subtraction */
614             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
615         }
616         farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
617     }
618 
619     return farg1.ll;
620 }
621 
622 /* fmul - fmul. */
623 uint64_t helper_fmul(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
624 {
625     CPU_DoubleU farg1, farg2;
626 
627     farg1.ll = arg1;
628     farg2.ll = arg2;
629 
630     if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
631                  (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
632         /* Multiplication of zero by infinity */
633         farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
634     } else {
635         if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
636                      float64_is_signaling_nan(farg2.d, &env->fp_status))) {
637             /* sNaN multiplication */
638             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
639         }
640         farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
641     }
642 
643     return farg1.ll;
644 }
645 
646 /* fdiv - fdiv. */
647 uint64_t helper_fdiv(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
648 {
649     CPU_DoubleU farg1, farg2;
650 
651     farg1.ll = arg1;
652     farg2.ll = arg2;
653 
654     if (unlikely(float64_is_infinity(farg1.d) &&
655                  float64_is_infinity(farg2.d))) {
656         /* Division of infinity by infinity */
657         farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIDI, 1);
658     } else if (unlikely(float64_is_zero(farg1.d) && float64_is_zero(farg2.d))) {
659         /* Division of zero by zero */
660         farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXZDZ, 1);
661     } else {
662         if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
663                      float64_is_signaling_nan(farg2.d, &env->fp_status))) {
664             /* sNaN division */
665             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
666         }
667         farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
668     }
669 
670     return farg1.ll;
671 }
672 
673 
674 #define FPU_FCTI(op, cvt, nanval)                                      \
675 uint64_t helper_##op(CPUPPCState *env, uint64_t arg)                   \
676 {                                                                      \
677     CPU_DoubleU farg;                                                  \
678                                                                        \
679     farg.ll = arg;                                                     \
680     farg.ll = float64_to_##cvt(farg.d, &env->fp_status);               \
681                                                                        \
682     if (unlikely(env->fp_status.float_exception_flags)) {              \
683         if (float64_is_any_nan(arg)) {                                 \
684             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 1);      \
685             if (float64_is_signaling_nan(arg, &env->fp_status)) {      \
686                 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); \
687             }                                                          \
688             farg.ll = nanval;                                          \
689         } else if (env->fp_status.float_exception_flags &              \
690                    float_flag_invalid) {                               \
691             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 1);      \
692         }                                                              \
693         float_check_status(env);                                       \
694     }                                                                  \
695     return farg.ll;                                                    \
696  }
697 
698 FPU_FCTI(fctiw, int32, 0x80000000U)
699 FPU_FCTI(fctiwz, int32_round_to_zero, 0x80000000U)
700 FPU_FCTI(fctiwu, uint32, 0x00000000U)
701 FPU_FCTI(fctiwuz, uint32_round_to_zero, 0x00000000U)
702 FPU_FCTI(fctid, int64, 0x8000000000000000ULL)
703 FPU_FCTI(fctidz, int64_round_to_zero, 0x8000000000000000ULL)
704 FPU_FCTI(fctidu, uint64, 0x0000000000000000ULL)
705 FPU_FCTI(fctiduz, uint64_round_to_zero, 0x0000000000000000ULL)
706 
707 #define FPU_FCFI(op, cvtr, is_single)                      \
708 uint64_t helper_##op(CPUPPCState *env, uint64_t arg)       \
709 {                                                          \
710     CPU_DoubleU farg;                                      \
711                                                            \
712     if (is_single) {                                       \
713         float32 tmp = cvtr(arg, &env->fp_status);          \
714         farg.d = float32_to_float64(tmp, &env->fp_status); \
715     } else {                                               \
716         farg.d = cvtr(arg, &env->fp_status);               \
717     }                                                      \
718     float_check_status(env);                               \
719     return farg.ll;                                        \
720 }
721 
722 FPU_FCFI(fcfid, int64_to_float64, 0)
723 FPU_FCFI(fcfids, int64_to_float32, 1)
724 FPU_FCFI(fcfidu, uint64_to_float64, 0)
725 FPU_FCFI(fcfidus, uint64_to_float32, 1)
726 
727 static inline uint64_t do_fri(CPUPPCState *env, uint64_t arg,
728                               int rounding_mode)
729 {
730     CPU_DoubleU farg;
731 
732     farg.ll = arg;
733 
734     if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
735         /* sNaN round */
736         float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
737         farg.ll = arg | 0x0008000000000000ULL;
738     } else {
739         int inexact = get_float_exception_flags(&env->fp_status) &
740                       float_flag_inexact;
741         set_float_rounding_mode(rounding_mode, &env->fp_status);
742         farg.ll = float64_round_to_int(farg.d, &env->fp_status);
743         /* Restore rounding mode from FPSCR */
744         fpscr_set_rounding_mode(env);
745 
746         /* fri* does not set FPSCR[XX] */
747         if (!inexact) {
748             env->fp_status.float_exception_flags &= ~float_flag_inexact;
749         }
750     }
751     float_check_status(env);
752     return farg.ll;
753 }
754 
755 uint64_t helper_frin(CPUPPCState *env, uint64_t arg)
756 {
757     return do_fri(env, arg, float_round_ties_away);
758 }
759 
760 uint64_t helper_friz(CPUPPCState *env, uint64_t arg)
761 {
762     return do_fri(env, arg, float_round_to_zero);
763 }
764 
765 uint64_t helper_frip(CPUPPCState *env, uint64_t arg)
766 {
767     return do_fri(env, arg, float_round_up);
768 }
769 
770 uint64_t helper_frim(CPUPPCState *env, uint64_t arg)
771 {
772     return do_fri(env, arg, float_round_down);
773 }
774 
775 #define FPU_MADDSUB_UPDATE(NAME, TP)                                    \
776 static void NAME(CPUPPCState *env, TP arg1, TP arg2, TP arg3,           \
777                  unsigned int madd_flags)                               \
778 {                                                                       \
779     if (TP##_is_signaling_nan(arg1, &env->fp_status) ||                 \
780         TP##_is_signaling_nan(arg2, &env->fp_status) ||                 \
781         TP##_is_signaling_nan(arg3, &env->fp_status)) {                 \
782         /* sNaN operation */                                            \
783         float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);          \
784     }                                                                   \
785     if ((TP##_is_infinity(arg1) && TP##_is_zero(arg2)) ||               \
786         (TP##_is_zero(arg1) && TP##_is_infinity(arg2))) {               \
787         /* Multiplication of zero by infinity */                        \
788         float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);           \
789     }                                                                   \
790     if ((TP##_is_infinity(arg1) || TP##_is_infinity(arg2)) &&           \
791         TP##_is_infinity(arg3)) {                                       \
792         uint8_t aSign, bSign, cSign;                                    \
793                                                                         \
794         aSign = TP##_is_neg(arg1);                                      \
795         bSign = TP##_is_neg(arg2);                                      \
796         cSign = TP##_is_neg(arg3);                                      \
797         if (madd_flags & float_muladd_negate_c) {                       \
798             cSign ^= 1;                                                 \
799         }                                                               \
800         if (aSign ^ bSign ^ cSign) {                                    \
801             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);       \
802         }                                                               \
803     }                                                                   \
804 }
805 FPU_MADDSUB_UPDATE(float32_maddsub_update_excp, float32)
806 FPU_MADDSUB_UPDATE(float64_maddsub_update_excp, float64)
807 
808 #define FPU_FMADD(op, madd_flags)                                       \
809 uint64_t helper_##op(CPUPPCState *env, uint64_t arg1,                   \
810                      uint64_t arg2, uint64_t arg3)                      \
811 {                                                                       \
812     uint32_t flags;                                                     \
813     float64 ret = float64_muladd(arg1, arg2, arg3, madd_flags,          \
814                                  &env->fp_status);                      \
815     flags = get_float_exception_flags(&env->fp_status);                 \
816     if (flags) {                                                        \
817         if (flags & float_flag_invalid) {                               \
818             float64_maddsub_update_excp(env, arg1, arg2, arg3,          \
819                                         madd_flags);                    \
820         }                                                               \
821         float_check_status(env);                                        \
822     }                                                                   \
823     return ret;                                                         \
824 }
825 
826 #define MADD_FLGS 0
827 #define MSUB_FLGS float_muladd_negate_c
828 #define NMADD_FLGS float_muladd_negate_result
829 #define NMSUB_FLGS (float_muladd_negate_c | float_muladd_negate_result)
830 
831 FPU_FMADD(fmadd, MADD_FLGS)
832 FPU_FMADD(fnmadd, NMADD_FLGS)
833 FPU_FMADD(fmsub, MSUB_FLGS)
834 FPU_FMADD(fnmsub, NMSUB_FLGS)
835 
836 /* frsp - frsp. */
837 uint64_t helper_frsp(CPUPPCState *env, uint64_t arg)
838 {
839     CPU_DoubleU farg;
840     float32 f32;
841 
842     farg.ll = arg;
843 
844     if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
845         /* sNaN square root */
846         float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
847     }
848     f32 = float64_to_float32(farg.d, &env->fp_status);
849     farg.d = float32_to_float64(f32, &env->fp_status);
850 
851     return farg.ll;
852 }
853 
854 /* fsqrt - fsqrt. */
855 uint64_t helper_fsqrt(CPUPPCState *env, uint64_t arg)
856 {
857     CPU_DoubleU farg;
858 
859     farg.ll = arg;
860 
861     if (unlikely(float64_is_any_nan(farg.d))) {
862         if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
863             /* sNaN reciprocal square root */
864             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
865             farg.ll = float64_snan_to_qnan(farg.ll);
866         }
867     } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
868         /* Square root of a negative nonzero number */
869         farg.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, 1);
870     } else {
871         farg.d = float64_sqrt(farg.d, &env->fp_status);
872     }
873     return farg.ll;
874 }
875 
876 /* fre - fre. */
877 uint64_t helper_fre(CPUPPCState *env, uint64_t arg)
878 {
879     CPU_DoubleU farg;
880 
881     farg.ll = arg;
882 
883     if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
884         /* sNaN reciprocal */
885         float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
886     }
887     farg.d = float64_div(float64_one, farg.d, &env->fp_status);
888     return farg.d;
889 }
890 
891 /* fres - fres. */
892 uint64_t helper_fres(CPUPPCState *env, uint64_t arg)
893 {
894     CPU_DoubleU farg;
895     float32 f32;
896 
897     farg.ll = arg;
898 
899     if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
900         /* sNaN reciprocal */
901         float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
902     }
903     farg.d = float64_div(float64_one, farg.d, &env->fp_status);
904     f32 = float64_to_float32(farg.d, &env->fp_status);
905     farg.d = float32_to_float64(f32, &env->fp_status);
906 
907     return farg.ll;
908 }
909 
910 /* frsqrte  - frsqrte. */
911 uint64_t helper_frsqrte(CPUPPCState *env, uint64_t arg)
912 {
913     CPU_DoubleU farg;
914 
915     farg.ll = arg;
916 
917     if (unlikely(float64_is_any_nan(farg.d))) {
918         if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
919             /* sNaN reciprocal square root */
920             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
921             farg.ll = float64_snan_to_qnan(farg.ll);
922         }
923     } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
924         /* Reciprocal square root of a negative nonzero number */
925         farg.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, 1);
926     } else {
927         farg.d = float64_sqrt(farg.d, &env->fp_status);
928         farg.d = float64_div(float64_one, farg.d, &env->fp_status);
929     }
930 
931     return farg.ll;
932 }
933 
934 /* fsel - fsel. */
935 uint64_t helper_fsel(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
936                      uint64_t arg3)
937 {
938     CPU_DoubleU farg1;
939 
940     farg1.ll = arg1;
941 
942     if ((!float64_is_neg(farg1.d) || float64_is_zero(farg1.d)) &&
943         !float64_is_any_nan(farg1.d)) {
944         return arg2;
945     } else {
946         return arg3;
947     }
948 }
949 
950 uint32_t helper_ftdiv(uint64_t fra, uint64_t frb)
951 {
952     int fe_flag = 0;
953     int fg_flag = 0;
954 
955     if (unlikely(float64_is_infinity(fra) ||
956                  float64_is_infinity(frb) ||
957                  float64_is_zero(frb))) {
958         fe_flag = 1;
959         fg_flag = 1;
960     } else {
961         int e_a = ppc_float64_get_unbiased_exp(fra);
962         int e_b = ppc_float64_get_unbiased_exp(frb);
963 
964         if (unlikely(float64_is_any_nan(fra) ||
965                      float64_is_any_nan(frb))) {
966             fe_flag = 1;
967         } else if ((e_b <= -1022) || (e_b >= 1021)) {
968             fe_flag = 1;
969         } else if (!float64_is_zero(fra) &&
970                    (((e_a - e_b) >= 1023) ||
971                     ((e_a - e_b) <= -1021) ||
972                     (e_a <= -970))) {
973             fe_flag = 1;
974         }
975 
976         if (unlikely(float64_is_zero_or_denormal(frb))) {
977             /* XB is not zero because of the above check and */
978             /* so must be denormalized.                      */
979             fg_flag = 1;
980         }
981     }
982 
983     return 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0);
984 }
985 
986 uint32_t helper_ftsqrt(uint64_t frb)
987 {
988     int fe_flag = 0;
989     int fg_flag = 0;
990 
991     if (unlikely(float64_is_infinity(frb) || float64_is_zero(frb))) {
992         fe_flag = 1;
993         fg_flag = 1;
994     } else {
995         int e_b = ppc_float64_get_unbiased_exp(frb);
996 
997         if (unlikely(float64_is_any_nan(frb))) {
998             fe_flag = 1;
999         } else if (unlikely(float64_is_zero(frb))) {
1000             fe_flag = 1;
1001         } else if (unlikely(float64_is_neg(frb))) {
1002             fe_flag = 1;
1003         } else if (!float64_is_zero(frb) && (e_b <= (-1022+52))) {
1004             fe_flag = 1;
1005         }
1006 
1007         if (unlikely(float64_is_zero_or_denormal(frb))) {
1008             /* XB is not zero because of the above check and */
1009             /* therefore must be denormalized.               */
1010             fg_flag = 1;
1011         }
1012     }
1013 
1014     return 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0);
1015 }
1016 
1017 void helper_fcmpu(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
1018                   uint32_t crfD)
1019 {
1020     CPU_DoubleU farg1, farg2;
1021     uint32_t ret = 0;
1022 
1023     farg1.ll = arg1;
1024     farg2.ll = arg2;
1025 
1026     if (unlikely(float64_is_any_nan(farg1.d) ||
1027                  float64_is_any_nan(farg2.d))) {
1028         ret = 0x01UL;
1029     } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1030         ret = 0x08UL;
1031     } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1032         ret = 0x04UL;
1033     } else {
1034         ret = 0x02UL;
1035     }
1036 
1037     env->fpscr &= ~(0x0F << FPSCR_FPRF);
1038     env->fpscr |= ret << FPSCR_FPRF;
1039     env->crf[crfD] = ret;
1040     if (unlikely(ret == 0x01UL
1041                  && (float64_is_signaling_nan(farg1.d, &env->fp_status) ||
1042                      float64_is_signaling_nan(farg2.d, &env->fp_status)))) {
1043         /* sNaN comparison */
1044         float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
1045     }
1046 }
1047 
1048 void helper_fcmpo(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
1049                   uint32_t crfD)
1050 {
1051     CPU_DoubleU farg1, farg2;
1052     uint32_t ret = 0;
1053 
1054     farg1.ll = arg1;
1055     farg2.ll = arg2;
1056 
1057     if (unlikely(float64_is_any_nan(farg1.d) ||
1058                  float64_is_any_nan(farg2.d))) {
1059         ret = 0x01UL;
1060     } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1061         ret = 0x08UL;
1062     } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1063         ret = 0x04UL;
1064     } else {
1065         ret = 0x02UL;
1066     }
1067 
1068     env->fpscr &= ~(0x0F << FPSCR_FPRF);
1069     env->fpscr |= ret << FPSCR_FPRF;
1070     env->crf[crfD] = ret;
1071     if (unlikely(ret == 0x01UL)) {
1072         if (float64_is_signaling_nan(farg1.d, &env->fp_status) ||
1073             float64_is_signaling_nan(farg2.d, &env->fp_status)) {
1074             /* sNaN comparison */
1075             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN |
1076                                   POWERPC_EXCP_FP_VXVC, 1);
1077         } else {
1078             /* qNaN comparison */
1079             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 1);
1080         }
1081     }
1082 }
1083 
1084 /* Single-precision floating-point conversions */
1085 static inline uint32_t efscfsi(CPUPPCState *env, uint32_t val)
1086 {
1087     CPU_FloatU u;
1088 
1089     u.f = int32_to_float32(val, &env->vec_status);
1090 
1091     return u.l;
1092 }
1093 
1094 static inline uint32_t efscfui(CPUPPCState *env, uint32_t val)
1095 {
1096     CPU_FloatU u;
1097 
1098     u.f = uint32_to_float32(val, &env->vec_status);
1099 
1100     return u.l;
1101 }
1102 
1103 static inline int32_t efsctsi(CPUPPCState *env, uint32_t val)
1104 {
1105     CPU_FloatU u;
1106 
1107     u.l = val;
1108     /* NaN are not treated the same way IEEE 754 does */
1109     if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1110         return 0;
1111     }
1112 
1113     return float32_to_int32(u.f, &env->vec_status);
1114 }
1115 
1116 static inline uint32_t efsctui(CPUPPCState *env, uint32_t val)
1117 {
1118     CPU_FloatU u;
1119 
1120     u.l = val;
1121     /* NaN are not treated the same way IEEE 754 does */
1122     if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1123         return 0;
1124     }
1125 
1126     return float32_to_uint32(u.f, &env->vec_status);
1127 }
1128 
1129 static inline uint32_t efsctsiz(CPUPPCState *env, uint32_t val)
1130 {
1131     CPU_FloatU u;
1132 
1133     u.l = val;
1134     /* NaN are not treated the same way IEEE 754 does */
1135     if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1136         return 0;
1137     }
1138 
1139     return float32_to_int32_round_to_zero(u.f, &env->vec_status);
1140 }
1141 
1142 static inline uint32_t efsctuiz(CPUPPCState *env, uint32_t val)
1143 {
1144     CPU_FloatU u;
1145 
1146     u.l = val;
1147     /* NaN are not treated the same way IEEE 754 does */
1148     if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1149         return 0;
1150     }
1151 
1152     return float32_to_uint32_round_to_zero(u.f, &env->vec_status);
1153 }
1154 
1155 static inline uint32_t efscfsf(CPUPPCState *env, uint32_t val)
1156 {
1157     CPU_FloatU u;
1158     float32 tmp;
1159 
1160     u.f = int32_to_float32(val, &env->vec_status);
1161     tmp = int64_to_float32(1ULL << 32, &env->vec_status);
1162     u.f = float32_div(u.f, tmp, &env->vec_status);
1163 
1164     return u.l;
1165 }
1166 
1167 static inline uint32_t efscfuf(CPUPPCState *env, uint32_t val)
1168 {
1169     CPU_FloatU u;
1170     float32 tmp;
1171 
1172     u.f = uint32_to_float32(val, &env->vec_status);
1173     tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1174     u.f = float32_div(u.f, tmp, &env->vec_status);
1175 
1176     return u.l;
1177 }
1178 
1179 static inline uint32_t efsctsf(CPUPPCState *env, uint32_t val)
1180 {
1181     CPU_FloatU u;
1182     float32 tmp;
1183 
1184     u.l = val;
1185     /* NaN are not treated the same way IEEE 754 does */
1186     if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1187         return 0;
1188     }
1189     tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1190     u.f = float32_mul(u.f, tmp, &env->vec_status);
1191 
1192     return float32_to_int32(u.f, &env->vec_status);
1193 }
1194 
1195 static inline uint32_t efsctuf(CPUPPCState *env, uint32_t val)
1196 {
1197     CPU_FloatU u;
1198     float32 tmp;
1199 
1200     u.l = val;
1201     /* NaN are not treated the same way IEEE 754 does */
1202     if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
1203         return 0;
1204     }
1205     tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
1206     u.f = float32_mul(u.f, tmp, &env->vec_status);
1207 
1208     return float32_to_uint32(u.f, &env->vec_status);
1209 }
1210 
1211 #define HELPER_SPE_SINGLE_CONV(name)                              \
1212     uint32_t helper_e##name(CPUPPCState *env, uint32_t val)       \
1213     {                                                             \
1214         return e##name(env, val);                                 \
1215     }
1216 /* efscfsi */
1217 HELPER_SPE_SINGLE_CONV(fscfsi);
1218 /* efscfui */
1219 HELPER_SPE_SINGLE_CONV(fscfui);
1220 /* efscfuf */
1221 HELPER_SPE_SINGLE_CONV(fscfuf);
1222 /* efscfsf */
1223 HELPER_SPE_SINGLE_CONV(fscfsf);
1224 /* efsctsi */
1225 HELPER_SPE_SINGLE_CONV(fsctsi);
1226 /* efsctui */
1227 HELPER_SPE_SINGLE_CONV(fsctui);
1228 /* efsctsiz */
1229 HELPER_SPE_SINGLE_CONV(fsctsiz);
1230 /* efsctuiz */
1231 HELPER_SPE_SINGLE_CONV(fsctuiz);
1232 /* efsctsf */
1233 HELPER_SPE_SINGLE_CONV(fsctsf);
1234 /* efsctuf */
1235 HELPER_SPE_SINGLE_CONV(fsctuf);
1236 
1237 #define HELPER_SPE_VECTOR_CONV(name)                            \
1238     uint64_t helper_ev##name(CPUPPCState *env, uint64_t val)    \
1239     {                                                           \
1240         return ((uint64_t)e##name(env, val >> 32) << 32) |      \
1241             (uint64_t)e##name(env, val);                        \
1242     }
1243 /* evfscfsi */
1244 HELPER_SPE_VECTOR_CONV(fscfsi);
1245 /* evfscfui */
1246 HELPER_SPE_VECTOR_CONV(fscfui);
1247 /* evfscfuf */
1248 HELPER_SPE_VECTOR_CONV(fscfuf);
1249 /* evfscfsf */
1250 HELPER_SPE_VECTOR_CONV(fscfsf);
1251 /* evfsctsi */
1252 HELPER_SPE_VECTOR_CONV(fsctsi);
1253 /* evfsctui */
1254 HELPER_SPE_VECTOR_CONV(fsctui);
1255 /* evfsctsiz */
1256 HELPER_SPE_VECTOR_CONV(fsctsiz);
1257 /* evfsctuiz */
1258 HELPER_SPE_VECTOR_CONV(fsctuiz);
1259 /* evfsctsf */
1260 HELPER_SPE_VECTOR_CONV(fsctsf);
1261 /* evfsctuf */
1262 HELPER_SPE_VECTOR_CONV(fsctuf);
1263 
1264 /* Single-precision floating-point arithmetic */
1265 static inline uint32_t efsadd(CPUPPCState *env, uint32_t op1, uint32_t op2)
1266 {
1267     CPU_FloatU u1, u2;
1268 
1269     u1.l = op1;
1270     u2.l = op2;
1271     u1.f = float32_add(u1.f, u2.f, &env->vec_status);
1272     return u1.l;
1273 }
1274 
1275 static inline uint32_t efssub(CPUPPCState *env, uint32_t op1, uint32_t op2)
1276 {
1277     CPU_FloatU u1, u2;
1278 
1279     u1.l = op1;
1280     u2.l = op2;
1281     u1.f = float32_sub(u1.f, u2.f, &env->vec_status);
1282     return u1.l;
1283 }
1284 
1285 static inline uint32_t efsmul(CPUPPCState *env, uint32_t op1, uint32_t op2)
1286 {
1287     CPU_FloatU u1, u2;
1288 
1289     u1.l = op1;
1290     u2.l = op2;
1291     u1.f = float32_mul(u1.f, u2.f, &env->vec_status);
1292     return u1.l;
1293 }
1294 
1295 static inline uint32_t efsdiv(CPUPPCState *env, uint32_t op1, uint32_t op2)
1296 {
1297     CPU_FloatU u1, u2;
1298 
1299     u1.l = op1;
1300     u2.l = op2;
1301     u1.f = float32_div(u1.f, u2.f, &env->vec_status);
1302     return u1.l;
1303 }
1304 
1305 #define HELPER_SPE_SINGLE_ARITH(name)                                   \
1306     uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1307     {                                                                   \
1308         return e##name(env, op1, op2);                                  \
1309     }
1310 /* efsadd */
1311 HELPER_SPE_SINGLE_ARITH(fsadd);
1312 /* efssub */
1313 HELPER_SPE_SINGLE_ARITH(fssub);
1314 /* efsmul */
1315 HELPER_SPE_SINGLE_ARITH(fsmul);
1316 /* efsdiv */
1317 HELPER_SPE_SINGLE_ARITH(fsdiv);
1318 
1319 #define HELPER_SPE_VECTOR_ARITH(name)                                   \
1320     uint64_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1321     {                                                                   \
1322         return ((uint64_t)e##name(env, op1 >> 32, op2 >> 32) << 32) |   \
1323             (uint64_t)e##name(env, op1, op2);                           \
1324     }
1325 /* evfsadd */
1326 HELPER_SPE_VECTOR_ARITH(fsadd);
1327 /* evfssub */
1328 HELPER_SPE_VECTOR_ARITH(fssub);
1329 /* evfsmul */
1330 HELPER_SPE_VECTOR_ARITH(fsmul);
1331 /* evfsdiv */
1332 HELPER_SPE_VECTOR_ARITH(fsdiv);
1333 
1334 /* Single-precision floating-point comparisons */
1335 static inline uint32_t efscmplt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1336 {
1337     CPU_FloatU u1, u2;
1338 
1339     u1.l = op1;
1340     u2.l = op2;
1341     return float32_lt(u1.f, u2.f, &env->vec_status) ? 4 : 0;
1342 }
1343 
1344 static inline uint32_t efscmpgt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1345 {
1346     CPU_FloatU u1, u2;
1347 
1348     u1.l = op1;
1349     u2.l = op2;
1350     return float32_le(u1.f, u2.f, &env->vec_status) ? 0 : 4;
1351 }
1352 
1353 static inline uint32_t efscmpeq(CPUPPCState *env, uint32_t op1, uint32_t op2)
1354 {
1355     CPU_FloatU u1, u2;
1356 
1357     u1.l = op1;
1358     u2.l = op2;
1359     return float32_eq(u1.f, u2.f, &env->vec_status) ? 4 : 0;
1360 }
1361 
1362 static inline uint32_t efststlt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1363 {
1364     /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1365     return efscmplt(env, op1, op2);
1366 }
1367 
1368 static inline uint32_t efststgt(CPUPPCState *env, uint32_t op1, uint32_t op2)
1369 {
1370     /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1371     return efscmpgt(env, op1, op2);
1372 }
1373 
1374 static inline uint32_t efststeq(CPUPPCState *env, uint32_t op1, uint32_t op2)
1375 {
1376     /* XXX: TODO: ignore special values (NaN, infinites, ...) */
1377     return efscmpeq(env, op1, op2);
1378 }
1379 
1380 #define HELPER_SINGLE_SPE_CMP(name)                                     \
1381     uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
1382     {                                                                   \
1383         return e##name(env, op1, op2);                                  \
1384     }
1385 /* efststlt */
1386 HELPER_SINGLE_SPE_CMP(fststlt);
1387 /* efststgt */
1388 HELPER_SINGLE_SPE_CMP(fststgt);
1389 /* efststeq */
1390 HELPER_SINGLE_SPE_CMP(fststeq);
1391 /* efscmplt */
1392 HELPER_SINGLE_SPE_CMP(fscmplt);
1393 /* efscmpgt */
1394 HELPER_SINGLE_SPE_CMP(fscmpgt);
1395 /* efscmpeq */
1396 HELPER_SINGLE_SPE_CMP(fscmpeq);
1397 
1398 static inline uint32_t evcmp_merge(int t0, int t1)
1399 {
1400     return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
1401 }
1402 
1403 #define HELPER_VECTOR_SPE_CMP(name)                                     \
1404     uint32_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
1405     {                                                                   \
1406         return evcmp_merge(e##name(env, op1 >> 32, op2 >> 32),          \
1407                            e##name(env, op1, op2));                     \
1408     }
1409 /* evfststlt */
1410 HELPER_VECTOR_SPE_CMP(fststlt);
1411 /* evfststgt */
1412 HELPER_VECTOR_SPE_CMP(fststgt);
1413 /* evfststeq */
1414 HELPER_VECTOR_SPE_CMP(fststeq);
1415 /* evfscmplt */
1416 HELPER_VECTOR_SPE_CMP(fscmplt);
1417 /* evfscmpgt */
1418 HELPER_VECTOR_SPE_CMP(fscmpgt);
1419 /* evfscmpeq */
1420 HELPER_VECTOR_SPE_CMP(fscmpeq);
1421 
1422 /* Double-precision floating-point conversion */
1423 uint64_t helper_efdcfsi(CPUPPCState *env, uint32_t val)
1424 {
1425     CPU_DoubleU u;
1426 
1427     u.d = int32_to_float64(val, &env->vec_status);
1428 
1429     return u.ll;
1430 }
1431 
1432 uint64_t helper_efdcfsid(CPUPPCState *env, uint64_t val)
1433 {
1434     CPU_DoubleU u;
1435 
1436     u.d = int64_to_float64(val, &env->vec_status);
1437 
1438     return u.ll;
1439 }
1440 
1441 uint64_t helper_efdcfui(CPUPPCState *env, uint32_t val)
1442 {
1443     CPU_DoubleU u;
1444 
1445     u.d = uint32_to_float64(val, &env->vec_status);
1446 
1447     return u.ll;
1448 }
1449 
1450 uint64_t helper_efdcfuid(CPUPPCState *env, uint64_t val)
1451 {
1452     CPU_DoubleU u;
1453 
1454     u.d = uint64_to_float64(val, &env->vec_status);
1455 
1456     return u.ll;
1457 }
1458 
1459 uint32_t helper_efdctsi(CPUPPCState *env, uint64_t val)
1460 {
1461     CPU_DoubleU u;
1462 
1463     u.ll = val;
1464     /* NaN are not treated the same way IEEE 754 does */
1465     if (unlikely(float64_is_any_nan(u.d))) {
1466         return 0;
1467     }
1468 
1469     return float64_to_int32(u.d, &env->vec_status);
1470 }
1471 
1472 uint32_t helper_efdctui(CPUPPCState *env, uint64_t val)
1473 {
1474     CPU_DoubleU u;
1475 
1476     u.ll = val;
1477     /* NaN are not treated the same way IEEE 754 does */
1478     if (unlikely(float64_is_any_nan(u.d))) {
1479         return 0;
1480     }
1481 
1482     return float64_to_uint32(u.d, &env->vec_status);
1483 }
1484 
1485 uint32_t helper_efdctsiz(CPUPPCState *env, uint64_t val)
1486 {
1487     CPU_DoubleU u;
1488 
1489     u.ll = val;
1490     /* NaN are not treated the same way IEEE 754 does */
1491     if (unlikely(float64_is_any_nan(u.d))) {
1492         return 0;
1493     }
1494 
1495     return float64_to_int32_round_to_zero(u.d, &env->vec_status);
1496 }
1497 
1498 uint64_t helper_efdctsidz(CPUPPCState *env, uint64_t val)
1499 {
1500     CPU_DoubleU u;
1501 
1502     u.ll = val;
1503     /* NaN are not treated the same way IEEE 754 does */
1504     if (unlikely(float64_is_any_nan(u.d))) {
1505         return 0;
1506     }
1507 
1508     return float64_to_int64_round_to_zero(u.d, &env->vec_status);
1509 }
1510 
1511 uint32_t helper_efdctuiz(CPUPPCState *env, uint64_t val)
1512 {
1513     CPU_DoubleU u;
1514 
1515     u.ll = val;
1516     /* NaN are not treated the same way IEEE 754 does */
1517     if (unlikely(float64_is_any_nan(u.d))) {
1518         return 0;
1519     }
1520 
1521     return float64_to_uint32_round_to_zero(u.d, &env->vec_status);
1522 }
1523 
1524 uint64_t helper_efdctuidz(CPUPPCState *env, uint64_t val)
1525 {
1526     CPU_DoubleU u;
1527 
1528     u.ll = val;
1529     /* NaN are not treated the same way IEEE 754 does */
1530     if (unlikely(float64_is_any_nan(u.d))) {
1531         return 0;
1532     }
1533 
1534     return float64_to_uint64_round_to_zero(u.d, &env->vec_status);
1535 }
1536 
1537 uint64_t helper_efdcfsf(CPUPPCState *env, uint32_t val)
1538 {
1539     CPU_DoubleU u;
1540     float64 tmp;
1541 
1542     u.d = int32_to_float64(val, &env->vec_status);
1543     tmp = int64_to_float64(1ULL << 32, &env->vec_status);
1544     u.d = float64_div(u.d, tmp, &env->vec_status);
1545 
1546     return u.ll;
1547 }
1548 
1549 uint64_t helper_efdcfuf(CPUPPCState *env, uint32_t val)
1550 {
1551     CPU_DoubleU u;
1552     float64 tmp;
1553 
1554     u.d = uint32_to_float64(val, &env->vec_status);
1555     tmp = int64_to_float64(1ULL << 32, &env->vec_status);
1556     u.d = float64_div(u.d, tmp, &env->vec_status);
1557 
1558     return u.ll;
1559 }
1560 
1561 uint32_t helper_efdctsf(CPUPPCState *env, uint64_t val)
1562 {
1563     CPU_DoubleU u;
1564     float64 tmp;
1565 
1566     u.ll = val;
1567     /* NaN are not treated the same way IEEE 754 does */
1568     if (unlikely(float64_is_any_nan(u.d))) {
1569         return 0;
1570     }
1571     tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
1572     u.d = float64_mul(u.d, tmp, &env->vec_status);
1573 
1574     return float64_to_int32(u.d, &env->vec_status);
1575 }
1576 
1577 uint32_t helper_efdctuf(CPUPPCState *env, uint64_t val)
1578 {
1579     CPU_DoubleU u;
1580     float64 tmp;
1581 
1582     u.ll = val;
1583     /* NaN are not treated the same way IEEE 754 does */
1584     if (unlikely(float64_is_any_nan(u.d))) {
1585         return 0;
1586     }
1587     tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
1588     u.d = float64_mul(u.d, tmp, &env->vec_status);
1589 
1590     return float64_to_uint32(u.d, &env->vec_status);
1591 }
1592 
1593 uint32_t helper_efscfd(CPUPPCState *env, uint64_t val)
1594 {
1595     CPU_DoubleU u1;
1596     CPU_FloatU u2;
1597 
1598     u1.ll = val;
1599     u2.f = float64_to_float32(u1.d, &env->vec_status);
1600 
1601     return u2.l;
1602 }
1603 
1604 uint64_t helper_efdcfs(CPUPPCState *env, uint32_t val)
1605 {
1606     CPU_DoubleU u2;
1607     CPU_FloatU u1;
1608 
1609     u1.l = val;
1610     u2.d = float32_to_float64(u1.f, &env->vec_status);
1611 
1612     return u2.ll;
1613 }
1614 
1615 /* Double precision fixed-point arithmetic */
1616 uint64_t helper_efdadd(CPUPPCState *env, uint64_t op1, uint64_t op2)
1617 {
1618     CPU_DoubleU u1, u2;
1619 
1620     u1.ll = op1;
1621     u2.ll = op2;
1622     u1.d = float64_add(u1.d, u2.d, &env->vec_status);
1623     return u1.ll;
1624 }
1625 
1626 uint64_t helper_efdsub(CPUPPCState *env, uint64_t op1, uint64_t op2)
1627 {
1628     CPU_DoubleU u1, u2;
1629 
1630     u1.ll = op1;
1631     u2.ll = op2;
1632     u1.d = float64_sub(u1.d, u2.d, &env->vec_status);
1633     return u1.ll;
1634 }
1635 
1636 uint64_t helper_efdmul(CPUPPCState *env, uint64_t op1, uint64_t op2)
1637 {
1638     CPU_DoubleU u1, u2;
1639 
1640     u1.ll = op1;
1641     u2.ll = op2;
1642     u1.d = float64_mul(u1.d, u2.d, &env->vec_status);
1643     return u1.ll;
1644 }
1645 
1646 uint64_t helper_efddiv(CPUPPCState *env, uint64_t op1, uint64_t op2)
1647 {
1648     CPU_DoubleU u1, u2;
1649 
1650     u1.ll = op1;
1651     u2.ll = op2;
1652     u1.d = float64_div(u1.d, u2.d, &env->vec_status);
1653     return u1.ll;
1654 }
1655 
1656 /* Double precision floating point helpers */
1657 uint32_t helper_efdtstlt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1658 {
1659     CPU_DoubleU u1, u2;
1660 
1661     u1.ll = op1;
1662     u2.ll = op2;
1663     return float64_lt(u1.d, u2.d, &env->vec_status) ? 4 : 0;
1664 }
1665 
1666 uint32_t helper_efdtstgt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1667 {
1668     CPU_DoubleU u1, u2;
1669 
1670     u1.ll = op1;
1671     u2.ll = op2;
1672     return float64_le(u1.d, u2.d, &env->vec_status) ? 0 : 4;
1673 }
1674 
1675 uint32_t helper_efdtsteq(CPUPPCState *env, uint64_t op1, uint64_t op2)
1676 {
1677     CPU_DoubleU u1, u2;
1678 
1679     u1.ll = op1;
1680     u2.ll = op2;
1681     return float64_eq_quiet(u1.d, u2.d, &env->vec_status) ? 4 : 0;
1682 }
1683 
1684 uint32_t helper_efdcmplt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1685 {
1686     /* XXX: TODO: test special values (NaN, infinites, ...) */
1687     return helper_efdtstlt(env, op1, op2);
1688 }
1689 
1690 uint32_t helper_efdcmpgt(CPUPPCState *env, uint64_t op1, uint64_t op2)
1691 {
1692     /* XXX: TODO: test special values (NaN, infinites, ...) */
1693     return helper_efdtstgt(env, op1, op2);
1694 }
1695 
1696 uint32_t helper_efdcmpeq(CPUPPCState *env, uint64_t op1, uint64_t op2)
1697 {
1698     /* XXX: TODO: test special values (NaN, infinites, ...) */
1699     return helper_efdtsteq(env, op1, op2);
1700 }
1701 
1702 #define float64_to_float64(x, env) x
1703 
1704 
1705 /* VSX_ADD_SUB - VSX floating point add/subract
1706  *   name  - instruction mnemonic
1707  *   op    - operation (add or sub)
1708  *   nels  - number of elements (1, 2 or 4)
1709  *   tp    - type (float32 or float64)
1710  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
1711  *   sfprf - set FPRF
1712  */
1713 #define VSX_ADD_SUB(name, op, nels, tp, fld, sfprf, r2sp)                    \
1714 void helper_##name(CPUPPCState *env, uint32_t opcode)                        \
1715 {                                                                            \
1716     ppc_vsr_t xt, xa, xb;                                                    \
1717     int i;                                                                   \
1718                                                                              \
1719     getVSR(xA(opcode), &xa, env);                                            \
1720     getVSR(xB(opcode), &xb, env);                                            \
1721     getVSR(xT(opcode), &xt, env);                                            \
1722     helper_reset_fpstatus(env);                                              \
1723                                                                              \
1724     for (i = 0; i < nels; i++) {                                             \
1725         float_status tstat = env->fp_status;                                 \
1726         set_float_exception_flags(0, &tstat);                                \
1727         xt.fld = tp##_##op(xa.fld, xb.fld, &tstat);                          \
1728         env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1729                                                                              \
1730         if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
1731             if (tp##_is_infinity(xa.fld) && tp##_is_infinity(xb.fld)) {      \
1732                 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, sfprf);    \
1733             } else if (tp##_is_signaling_nan(xa.fld, &tstat) ||              \
1734                        tp##_is_signaling_nan(xb.fld, &tstat)) {              \
1735                 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf);   \
1736             }                                                                \
1737         }                                                                    \
1738                                                                              \
1739         if (r2sp) {                                                          \
1740             xt.fld = helper_frsp(env, xt.fld);                               \
1741         }                                                                    \
1742                                                                              \
1743         if (sfprf) {                                                         \
1744             helper_compute_fprf_float64(env, xt.fld);                        \
1745         }                                                                    \
1746     }                                                                        \
1747     putVSR(xT(opcode), &xt, env);                                            \
1748     float_check_status(env);                                                 \
1749 }
1750 
1751 VSX_ADD_SUB(xsadddp, add, 1, float64, VsrD(0), 1, 0)
1752 VSX_ADD_SUB(xsaddsp, add, 1, float64, VsrD(0), 1, 1)
1753 VSX_ADD_SUB(xvadddp, add, 2, float64, VsrD(i), 0, 0)
1754 VSX_ADD_SUB(xvaddsp, add, 4, float32, VsrW(i), 0, 0)
1755 VSX_ADD_SUB(xssubdp, sub, 1, float64, VsrD(0), 1, 0)
1756 VSX_ADD_SUB(xssubsp, sub, 1, float64, VsrD(0), 1, 1)
1757 VSX_ADD_SUB(xvsubdp, sub, 2, float64, VsrD(i), 0, 0)
1758 VSX_ADD_SUB(xvsubsp, sub, 4, float32, VsrW(i), 0, 0)
1759 
1760 void helper_xsaddqp(CPUPPCState *env, uint32_t opcode)
1761 {
1762     ppc_vsr_t xt, xa, xb;
1763     float_status tstat;
1764 
1765     getVSR(rA(opcode) + 32, &xa, env);
1766     getVSR(rB(opcode) + 32, &xb, env);
1767     getVSR(rD(opcode) + 32, &xt, env);
1768     helper_reset_fpstatus(env);
1769 
1770     tstat = env->fp_status;
1771     if (unlikely(Rc(opcode) != 0)) {
1772         tstat.float_rounding_mode = float_round_to_odd;
1773     }
1774 
1775     set_float_exception_flags(0, &tstat);
1776     xt.f128 = float128_add(xa.f128, xb.f128, &tstat);
1777     env->fp_status.float_exception_flags |= tstat.float_exception_flags;
1778 
1779     if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
1780         if (float128_is_infinity(xa.f128) && float128_is_infinity(xb.f128)) {
1781             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
1782         } else if (float128_is_signaling_nan(xa.f128, &tstat) ||
1783                    float128_is_signaling_nan(xb.f128, &tstat)) {
1784             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
1785         }
1786     }
1787 
1788     helper_compute_fprf_float128(env, xt.f128);
1789 
1790     putVSR(rD(opcode) + 32, &xt, env);
1791     float_check_status(env);
1792 }
1793 
1794 /* VSX_MUL - VSX floating point multiply
1795  *   op    - instruction mnemonic
1796  *   nels  - number of elements (1, 2 or 4)
1797  *   tp    - type (float32 or float64)
1798  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
1799  *   sfprf - set FPRF
1800  */
1801 #define VSX_MUL(op, nels, tp, fld, sfprf, r2sp)                              \
1802 void helper_##op(CPUPPCState *env, uint32_t opcode)                          \
1803 {                                                                            \
1804     ppc_vsr_t xt, xa, xb;                                                    \
1805     int i;                                                                   \
1806                                                                              \
1807     getVSR(xA(opcode), &xa, env);                                            \
1808     getVSR(xB(opcode), &xb, env);                                            \
1809     getVSR(xT(opcode), &xt, env);                                            \
1810     helper_reset_fpstatus(env);                                              \
1811                                                                              \
1812     for (i = 0; i < nels; i++) {                                             \
1813         float_status tstat = env->fp_status;                                 \
1814         set_float_exception_flags(0, &tstat);                                \
1815         xt.fld = tp##_mul(xa.fld, xb.fld, &tstat);                           \
1816         env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
1817                                                                              \
1818         if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
1819             if ((tp##_is_infinity(xa.fld) && tp##_is_zero(xb.fld)) ||        \
1820                 (tp##_is_infinity(xb.fld) && tp##_is_zero(xa.fld))) {        \
1821                 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, sfprf);    \
1822             } else if (tp##_is_signaling_nan(xa.fld, &tstat) ||              \
1823                        tp##_is_signaling_nan(xb.fld, &tstat)) {              \
1824                 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf);   \
1825             }                                                                \
1826         }                                                                    \
1827                                                                              \
1828         if (r2sp) {                                                          \
1829             xt.fld = helper_frsp(env, xt.fld);                               \
1830         }                                                                    \
1831                                                                              \
1832         if (sfprf) {                                                         \
1833             helper_compute_fprf_float64(env, xt.fld);                        \
1834         }                                                                    \
1835     }                                                                        \
1836                                                                              \
1837     putVSR(xT(opcode), &xt, env);                                            \
1838     float_check_status(env);                                                 \
1839 }
1840 
1841 VSX_MUL(xsmuldp, 1, float64, VsrD(0), 1, 0)
1842 VSX_MUL(xsmulsp, 1, float64, VsrD(0), 1, 1)
1843 VSX_MUL(xvmuldp, 2, float64, VsrD(i), 0, 0)
1844 VSX_MUL(xvmulsp, 4, float32, VsrW(i), 0, 0)
1845 
1846 void helper_xsmulqp(CPUPPCState *env, uint32_t opcode)
1847 {
1848     ppc_vsr_t xt, xa, xb;
1849     float_status tstat;
1850 
1851     getVSR(rA(opcode) + 32, &xa, env);
1852     getVSR(rB(opcode) + 32, &xb, env);
1853     getVSR(rD(opcode) + 32, &xt, env);
1854 
1855     helper_reset_fpstatus(env);
1856     tstat = env->fp_status;
1857     if (unlikely(Rc(opcode) != 0)) {
1858         tstat.float_rounding_mode = float_round_to_odd;
1859     }
1860 
1861     set_float_exception_flags(0, &tstat);
1862     xt.f128 = float128_mul(xa.f128, xb.f128, &tstat);
1863     env->fp_status.float_exception_flags |= tstat.float_exception_flags;
1864 
1865     if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
1866         if ((float128_is_infinity(xa.f128) && float128_is_zero(xb.f128)) ||
1867             (float128_is_infinity(xb.f128) && float128_is_zero(xa.f128))) {
1868             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
1869         } else if (float128_is_signaling_nan(xa.f128, &tstat) ||
1870                    float128_is_signaling_nan(xb.f128, &tstat)) {
1871             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
1872         }
1873     }
1874     helper_compute_fprf_float128(env, xt.f128);
1875 
1876     putVSR(rD(opcode) + 32, &xt, env);
1877     float_check_status(env);
1878 }
1879 
1880 /* VSX_DIV - VSX floating point divide
1881  *   op    - instruction mnemonic
1882  *   nels  - number of elements (1, 2 or 4)
1883  *   tp    - type (float32 or float64)
1884  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
1885  *   sfprf - set FPRF
1886  */
1887 #define VSX_DIV(op, nels, tp, fld, sfprf, r2sp)                               \
1888 void helper_##op(CPUPPCState *env, uint32_t opcode)                           \
1889 {                                                                             \
1890     ppc_vsr_t xt, xa, xb;                                                     \
1891     int i;                                                                    \
1892                                                                               \
1893     getVSR(xA(opcode), &xa, env);                                             \
1894     getVSR(xB(opcode), &xb, env);                                             \
1895     getVSR(xT(opcode), &xt, env);                                             \
1896     helper_reset_fpstatus(env);                                               \
1897                                                                               \
1898     for (i = 0; i < nels; i++) {                                              \
1899         float_status tstat = env->fp_status;                                  \
1900         set_float_exception_flags(0, &tstat);                                 \
1901         xt.fld = tp##_div(xa.fld, xb.fld, &tstat);                            \
1902         env->fp_status.float_exception_flags |= tstat.float_exception_flags;  \
1903                                                                               \
1904         if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {     \
1905             if (tp##_is_infinity(xa.fld) && tp##_is_infinity(xb.fld)) {       \
1906                 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIDI, sfprf);     \
1907             } else if (tp##_is_zero(xa.fld) &&                                \
1908                 tp##_is_zero(xb.fld)) {                                       \
1909                 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXZDZ, sfprf);     \
1910             } else if (tp##_is_signaling_nan(xa.fld, &tstat) ||               \
1911                 tp##_is_signaling_nan(xb.fld, &tstat)) {                      \
1912                 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf);    \
1913             }                                                                 \
1914         }                                                                     \
1915                                                                               \
1916         if (r2sp) {                                                           \
1917             xt.fld = helper_frsp(env, xt.fld);                                \
1918         }                                                                     \
1919                                                                               \
1920         if (sfprf) {                                                          \
1921             helper_compute_fprf_float64(env, xt.fld);                         \
1922         }                                                                     \
1923     }                                                                         \
1924                                                                               \
1925     putVSR(xT(opcode), &xt, env);                                             \
1926     float_check_status(env);                                                  \
1927 }
1928 
1929 VSX_DIV(xsdivdp, 1, float64, VsrD(0), 1, 0)
1930 VSX_DIV(xsdivsp, 1, float64, VsrD(0), 1, 1)
1931 VSX_DIV(xvdivdp, 2, float64, VsrD(i), 0, 0)
1932 VSX_DIV(xvdivsp, 4, float32, VsrW(i), 0, 0)
1933 
1934 void helper_xsdivqp(CPUPPCState *env, uint32_t opcode)
1935 {
1936     ppc_vsr_t xt, xa, xb;
1937     float_status tstat;
1938 
1939     getVSR(rA(opcode) + 32, &xa, env);
1940     getVSR(rB(opcode) + 32, &xb, env);
1941     getVSR(rD(opcode) + 32, &xt, env);
1942 
1943     helper_reset_fpstatus(env);
1944     tstat = env->fp_status;
1945     if (unlikely(Rc(opcode) != 0)) {
1946         tstat.float_rounding_mode = float_round_to_odd;
1947     }
1948 
1949     set_float_exception_flags(0, &tstat);
1950     xt.f128 = float128_div(xa.f128, xb.f128, &tstat);
1951     env->fp_status.float_exception_flags |= tstat.float_exception_flags;
1952 
1953     if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
1954         if (float128_is_infinity(xa.f128) && float128_is_infinity(xb.f128)) {
1955             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIDI, 1);
1956         } else if (float128_is_zero(xa.f128) &&
1957             float128_is_zero(xb.f128)) {
1958             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXZDZ, 1);
1959         } else if (float128_is_signaling_nan(xa.f128, &tstat) ||
1960             float128_is_signaling_nan(xb.f128, &tstat)) {
1961             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
1962         }
1963     }
1964 
1965     helper_compute_fprf_float128(env, xt.f128);
1966     putVSR(rD(opcode) + 32, &xt, env);
1967     float_check_status(env);
1968 }
1969 
1970 /* VSX_RE  - VSX floating point reciprocal estimate
1971  *   op    - instruction mnemonic
1972  *   nels  - number of elements (1, 2 or 4)
1973  *   tp    - type (float32 or float64)
1974  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
1975  *   sfprf - set FPRF
1976  */
1977 #define VSX_RE(op, nels, tp, fld, sfprf, r2sp)                                \
1978 void helper_##op(CPUPPCState *env, uint32_t opcode)                           \
1979 {                                                                             \
1980     ppc_vsr_t xt, xb;                                                         \
1981     int i;                                                                    \
1982                                                                               \
1983     getVSR(xB(opcode), &xb, env);                                             \
1984     getVSR(xT(opcode), &xt, env);                                             \
1985     helper_reset_fpstatus(env);                                               \
1986                                                                               \
1987     for (i = 0; i < nels; i++) {                                              \
1988         if (unlikely(tp##_is_signaling_nan(xb.fld, &env->fp_status))) {       \
1989                 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf);    \
1990         }                                                                     \
1991         xt.fld = tp##_div(tp##_one, xb.fld, &env->fp_status);                 \
1992                                                                               \
1993         if (r2sp) {                                                           \
1994             xt.fld = helper_frsp(env, xt.fld);                                \
1995         }                                                                     \
1996                                                                               \
1997         if (sfprf) {                                                          \
1998             helper_compute_fprf_float64(env, xt.fld);                         \
1999         }                                                                     \
2000     }                                                                         \
2001                                                                               \
2002     putVSR(xT(opcode), &xt, env);                                             \
2003     float_check_status(env);                                                  \
2004 }
2005 
2006 VSX_RE(xsredp, 1, float64, VsrD(0), 1, 0)
2007 VSX_RE(xsresp, 1, float64, VsrD(0), 1, 1)
2008 VSX_RE(xvredp, 2, float64, VsrD(i), 0, 0)
2009 VSX_RE(xvresp, 4, float32, VsrW(i), 0, 0)
2010 
2011 /* VSX_SQRT - VSX floating point square root
2012  *   op    - instruction mnemonic
2013  *   nels  - number of elements (1, 2 or 4)
2014  *   tp    - type (float32 or float64)
2015  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2016  *   sfprf - set FPRF
2017  */
2018 #define VSX_SQRT(op, nels, tp, fld, sfprf, r2sp)                             \
2019 void helper_##op(CPUPPCState *env, uint32_t opcode)                          \
2020 {                                                                            \
2021     ppc_vsr_t xt, xb;                                                        \
2022     int i;                                                                   \
2023                                                                              \
2024     getVSR(xB(opcode), &xb, env);                                            \
2025     getVSR(xT(opcode), &xt, env);                                            \
2026     helper_reset_fpstatus(env);                                              \
2027                                                                              \
2028     for (i = 0; i < nels; i++) {                                             \
2029         float_status tstat = env->fp_status;                                 \
2030         set_float_exception_flags(0, &tstat);                                \
2031         xt.fld = tp##_sqrt(xb.fld, &tstat);                                  \
2032         env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2033                                                                              \
2034         if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
2035             if (tp##_is_neg(xb.fld) && !tp##_is_zero(xb.fld)) {              \
2036                 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, sfprf);   \
2037             } else if (tp##_is_signaling_nan(xb.fld, &tstat)) {              \
2038                 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf);   \
2039             }                                                                \
2040         }                                                                    \
2041                                                                              \
2042         if (r2sp) {                                                          \
2043             xt.fld = helper_frsp(env, xt.fld);                               \
2044         }                                                                    \
2045                                                                              \
2046         if (sfprf) {                                                         \
2047             helper_compute_fprf_float64(env, xt.fld);                        \
2048         }                                                                    \
2049     }                                                                        \
2050                                                                              \
2051     putVSR(xT(opcode), &xt, env);                                            \
2052     float_check_status(env);                                                 \
2053 }
2054 
2055 VSX_SQRT(xssqrtdp, 1, float64, VsrD(0), 1, 0)
2056 VSX_SQRT(xssqrtsp, 1, float64, VsrD(0), 1, 1)
2057 VSX_SQRT(xvsqrtdp, 2, float64, VsrD(i), 0, 0)
2058 VSX_SQRT(xvsqrtsp, 4, float32, VsrW(i), 0, 0)
2059 
2060 /* VSX_RSQRTE - VSX floating point reciprocal square root estimate
2061  *   op    - instruction mnemonic
2062  *   nels  - number of elements (1, 2 or 4)
2063  *   tp    - type (float32 or float64)
2064  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2065  *   sfprf - set FPRF
2066  */
2067 #define VSX_RSQRTE(op, nels, tp, fld, sfprf, r2sp)                           \
2068 void helper_##op(CPUPPCState *env, uint32_t opcode)                          \
2069 {                                                                            \
2070     ppc_vsr_t xt, xb;                                                        \
2071     int i;                                                                   \
2072                                                                              \
2073     getVSR(xB(opcode), &xb, env);                                            \
2074     getVSR(xT(opcode), &xt, env);                                            \
2075     helper_reset_fpstatus(env);                                              \
2076                                                                              \
2077     for (i = 0; i < nels; i++) {                                             \
2078         float_status tstat = env->fp_status;                                 \
2079         set_float_exception_flags(0, &tstat);                                \
2080         xt.fld = tp##_sqrt(xb.fld, &tstat);                                  \
2081         xt.fld = tp##_div(tp##_one, xt.fld, &tstat);                         \
2082         env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
2083                                                                              \
2084         if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
2085             if (tp##_is_neg(xb.fld) && !tp##_is_zero(xb.fld)) {              \
2086                 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, sfprf);   \
2087             } else if (tp##_is_signaling_nan(xb.fld, &tstat)) {              \
2088                 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf);   \
2089             }                                                                \
2090         }                                                                    \
2091                                                                              \
2092         if (r2sp) {                                                          \
2093             xt.fld = helper_frsp(env, xt.fld);                               \
2094         }                                                                    \
2095                                                                              \
2096         if (sfprf) {                                                         \
2097             helper_compute_fprf_float64(env, xt.fld);                        \
2098         }                                                                    \
2099     }                                                                        \
2100                                                                              \
2101     putVSR(xT(opcode), &xt, env);                                            \
2102     float_check_status(env);                                                 \
2103 }
2104 
2105 VSX_RSQRTE(xsrsqrtedp, 1, float64, VsrD(0), 1, 0)
2106 VSX_RSQRTE(xsrsqrtesp, 1, float64, VsrD(0), 1, 1)
2107 VSX_RSQRTE(xvrsqrtedp, 2, float64, VsrD(i), 0, 0)
2108 VSX_RSQRTE(xvrsqrtesp, 4, float32, VsrW(i), 0, 0)
2109 
2110 /* VSX_TDIV - VSX floating point test for divide
2111  *   op    - instruction mnemonic
2112  *   nels  - number of elements (1, 2 or 4)
2113  *   tp    - type (float32 or float64)
2114  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2115  *   emin  - minimum unbiased exponent
2116  *   emax  - maximum unbiased exponent
2117  *   nbits - number of fraction bits
2118  */
2119 #define VSX_TDIV(op, nels, tp, fld, emin, emax, nbits)                  \
2120 void helper_##op(CPUPPCState *env, uint32_t opcode)                     \
2121 {                                                                       \
2122     ppc_vsr_t xa, xb;                                                   \
2123     int i;                                                              \
2124     int fe_flag = 0;                                                    \
2125     int fg_flag = 0;                                                    \
2126                                                                         \
2127     getVSR(xA(opcode), &xa, env);                                       \
2128     getVSR(xB(opcode), &xb, env);                                       \
2129                                                                         \
2130     for (i = 0; i < nels; i++) {                                        \
2131         if (unlikely(tp##_is_infinity(xa.fld) ||                        \
2132                      tp##_is_infinity(xb.fld) ||                        \
2133                      tp##_is_zero(xb.fld))) {                           \
2134             fe_flag = 1;                                                \
2135             fg_flag = 1;                                                \
2136         } else {                                                        \
2137             int e_a = ppc_##tp##_get_unbiased_exp(xa.fld);              \
2138             int e_b = ppc_##tp##_get_unbiased_exp(xb.fld);              \
2139                                                                         \
2140             if (unlikely(tp##_is_any_nan(xa.fld) ||                     \
2141                          tp##_is_any_nan(xb.fld))) {                    \
2142                 fe_flag = 1;                                            \
2143             } else if ((e_b <= emin) || (e_b >= (emax-2))) {            \
2144                 fe_flag = 1;                                            \
2145             } else if (!tp##_is_zero(xa.fld) &&                         \
2146                        (((e_a - e_b) >= emax) ||                        \
2147                         ((e_a - e_b) <= (emin+1)) ||                    \
2148                          (e_a <= (emin+nbits)))) {                      \
2149                 fe_flag = 1;                                            \
2150             }                                                           \
2151                                                                         \
2152             if (unlikely(tp##_is_zero_or_denormal(xb.fld))) {           \
2153                 /* XB is not zero because of the above check and */     \
2154                 /* so must be denormalized.                      */     \
2155                 fg_flag = 1;                                            \
2156             }                                                           \
2157         }                                                               \
2158     }                                                                   \
2159                                                                         \
2160     env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2161 }
2162 
2163 VSX_TDIV(xstdivdp, 1, float64, VsrD(0), -1022, 1023, 52)
2164 VSX_TDIV(xvtdivdp, 2, float64, VsrD(i), -1022, 1023, 52)
2165 VSX_TDIV(xvtdivsp, 4, float32, VsrW(i), -126, 127, 23)
2166 
2167 /* VSX_TSQRT - VSX floating point test for square root
2168  *   op    - instruction mnemonic
2169  *   nels  - number of elements (1, 2 or 4)
2170  *   tp    - type (float32 or float64)
2171  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2172  *   emin  - minimum unbiased exponent
2173  *   emax  - maximum unbiased exponent
2174  *   nbits - number of fraction bits
2175  */
2176 #define VSX_TSQRT(op, nels, tp, fld, emin, nbits)                       \
2177 void helper_##op(CPUPPCState *env, uint32_t opcode)                     \
2178 {                                                                       \
2179     ppc_vsr_t xa, xb;                                                   \
2180     int i;                                                              \
2181     int fe_flag = 0;                                                    \
2182     int fg_flag = 0;                                                    \
2183                                                                         \
2184     getVSR(xA(opcode), &xa, env);                                       \
2185     getVSR(xB(opcode), &xb, env);                                       \
2186                                                                         \
2187     for (i = 0; i < nels; i++) {                                        \
2188         if (unlikely(tp##_is_infinity(xb.fld) ||                        \
2189                      tp##_is_zero(xb.fld))) {                           \
2190             fe_flag = 1;                                                \
2191             fg_flag = 1;                                                \
2192         } else {                                                        \
2193             int e_b = ppc_##tp##_get_unbiased_exp(xb.fld);              \
2194                                                                         \
2195             if (unlikely(tp##_is_any_nan(xb.fld))) {                    \
2196                 fe_flag = 1;                                            \
2197             } else if (unlikely(tp##_is_zero(xb.fld))) {                \
2198                 fe_flag = 1;                                            \
2199             } else if (unlikely(tp##_is_neg(xb.fld))) {                 \
2200                 fe_flag = 1;                                            \
2201             } else if (!tp##_is_zero(xb.fld) &&                         \
2202                       (e_b <= (emin+nbits))) {                          \
2203                 fe_flag = 1;                                            \
2204             }                                                           \
2205                                                                         \
2206             if (unlikely(tp##_is_zero_or_denormal(xb.fld))) {           \
2207                 /* XB is not zero because of the above check and */     \
2208                 /* therefore must be denormalized.               */     \
2209                 fg_flag = 1;                                            \
2210             }                                                           \
2211         }                                                               \
2212     }                                                                   \
2213                                                                         \
2214     env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
2215 }
2216 
2217 VSX_TSQRT(xstsqrtdp, 1, float64, VsrD(0), -1022, 52)
2218 VSX_TSQRT(xvtsqrtdp, 2, float64, VsrD(i), -1022, 52)
2219 VSX_TSQRT(xvtsqrtsp, 4, float32, VsrW(i), -126, 23)
2220 
2221 /* VSX_MADD - VSX floating point muliply/add variations
2222  *   op    - instruction mnemonic
2223  *   nels  - number of elements (1, 2 or 4)
2224  *   tp    - type (float32 or float64)
2225  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2226  *   maddflgs - flags for the float*muladd routine that control the
2227  *           various forms (madd, msub, nmadd, nmsub)
2228  *   afrm  - A form (1=A, 0=M)
2229  *   sfprf - set FPRF
2230  */
2231 #define VSX_MADD(op, nels, tp, fld, maddflgs, afrm, sfprf, r2sp)              \
2232 void helper_##op(CPUPPCState *env, uint32_t opcode)                           \
2233 {                                                                             \
2234     ppc_vsr_t xt_in, xa, xb, xt_out;                                          \
2235     ppc_vsr_t *b, *c;                                                         \
2236     int i;                                                                    \
2237                                                                               \
2238     if (afrm) { /* AxB + T */                                                 \
2239         b = &xb;                                                              \
2240         c = &xt_in;                                                           \
2241     } else { /* AxT + B */                                                    \
2242         b = &xt_in;                                                           \
2243         c = &xb;                                                              \
2244     }                                                                         \
2245                                                                               \
2246     getVSR(xA(opcode), &xa, env);                                             \
2247     getVSR(xB(opcode), &xb, env);                                             \
2248     getVSR(xT(opcode), &xt_in, env);                                          \
2249                                                                               \
2250     xt_out = xt_in;                                                           \
2251                                                                               \
2252     helper_reset_fpstatus(env);                                               \
2253                                                                               \
2254     for (i = 0; i < nels; i++) {                                              \
2255         float_status tstat = env->fp_status;                                  \
2256         set_float_exception_flags(0, &tstat);                                 \
2257         if (r2sp && (tstat.float_rounding_mode == float_round_nearest_even)) {\
2258             /* Avoid double rounding errors by rounding the intermediate */   \
2259             /* result to odd.                                            */   \
2260             set_float_rounding_mode(float_round_to_zero, &tstat);             \
2261             xt_out.fld = tp##_muladd(xa.fld, b->fld, c->fld,                  \
2262                                        maddflgs, &tstat);                     \
2263             xt_out.fld |= (get_float_exception_flags(&tstat) &                \
2264                               float_flag_inexact) != 0;                       \
2265         } else {                                                              \
2266             xt_out.fld = tp##_muladd(xa.fld, b->fld, c->fld,                  \
2267                                         maddflgs, &tstat);                    \
2268         }                                                                     \
2269         env->fp_status.float_exception_flags |= tstat.float_exception_flags;  \
2270                                                                               \
2271         if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {     \
2272             tp##_maddsub_update_excp(env, xa.fld, b->fld, c->fld, maddflgs);  \
2273         }                                                                     \
2274                                                                               \
2275         if (r2sp) {                                                           \
2276             xt_out.fld = helper_frsp(env, xt_out.fld);                        \
2277         }                                                                     \
2278                                                                               \
2279         if (sfprf) {                                                          \
2280             helper_compute_fprf_float64(env, xt_out.fld);                     \
2281         }                                                                     \
2282     }                                                                         \
2283     putVSR(xT(opcode), &xt_out, env);                                         \
2284     float_check_status(env);                                                  \
2285 }
2286 
2287 VSX_MADD(xsmaddadp, 1, float64, VsrD(0), MADD_FLGS, 1, 1, 0)
2288 VSX_MADD(xsmaddmdp, 1, float64, VsrD(0), MADD_FLGS, 0, 1, 0)
2289 VSX_MADD(xsmsubadp, 1, float64, VsrD(0), MSUB_FLGS, 1, 1, 0)
2290 VSX_MADD(xsmsubmdp, 1, float64, VsrD(0), MSUB_FLGS, 0, 1, 0)
2291 VSX_MADD(xsnmaddadp, 1, float64, VsrD(0), NMADD_FLGS, 1, 1, 0)
2292 VSX_MADD(xsnmaddmdp, 1, float64, VsrD(0), NMADD_FLGS, 0, 1, 0)
2293 VSX_MADD(xsnmsubadp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 1, 0)
2294 VSX_MADD(xsnmsubmdp, 1, float64, VsrD(0), NMSUB_FLGS, 0, 1, 0)
2295 
2296 VSX_MADD(xsmaddasp, 1, float64, VsrD(0), MADD_FLGS, 1, 1, 1)
2297 VSX_MADD(xsmaddmsp, 1, float64, VsrD(0), MADD_FLGS, 0, 1, 1)
2298 VSX_MADD(xsmsubasp, 1, float64, VsrD(0), MSUB_FLGS, 1, 1, 1)
2299 VSX_MADD(xsmsubmsp, 1, float64, VsrD(0), MSUB_FLGS, 0, 1, 1)
2300 VSX_MADD(xsnmaddasp, 1, float64, VsrD(0), NMADD_FLGS, 1, 1, 1)
2301 VSX_MADD(xsnmaddmsp, 1, float64, VsrD(0), NMADD_FLGS, 0, 1, 1)
2302 VSX_MADD(xsnmsubasp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 1, 1)
2303 VSX_MADD(xsnmsubmsp, 1, float64, VsrD(0), NMSUB_FLGS, 0, 1, 1)
2304 
2305 VSX_MADD(xvmaddadp, 2, float64, VsrD(i), MADD_FLGS, 1, 0, 0)
2306 VSX_MADD(xvmaddmdp, 2, float64, VsrD(i), MADD_FLGS, 0, 0, 0)
2307 VSX_MADD(xvmsubadp, 2, float64, VsrD(i), MSUB_FLGS, 1, 0, 0)
2308 VSX_MADD(xvmsubmdp, 2, float64, VsrD(i), MSUB_FLGS, 0, 0, 0)
2309 VSX_MADD(xvnmaddadp, 2, float64, VsrD(i), NMADD_FLGS, 1, 0, 0)
2310 VSX_MADD(xvnmaddmdp, 2, float64, VsrD(i), NMADD_FLGS, 0, 0, 0)
2311 VSX_MADD(xvnmsubadp, 2, float64, VsrD(i), NMSUB_FLGS, 1, 0, 0)
2312 VSX_MADD(xvnmsubmdp, 2, float64, VsrD(i), NMSUB_FLGS, 0, 0, 0)
2313 
2314 VSX_MADD(xvmaddasp, 4, float32, VsrW(i), MADD_FLGS, 1, 0, 0)
2315 VSX_MADD(xvmaddmsp, 4, float32, VsrW(i), MADD_FLGS, 0, 0, 0)
2316 VSX_MADD(xvmsubasp, 4, float32, VsrW(i), MSUB_FLGS, 1, 0, 0)
2317 VSX_MADD(xvmsubmsp, 4, float32, VsrW(i), MSUB_FLGS, 0, 0, 0)
2318 VSX_MADD(xvnmaddasp, 4, float32, VsrW(i), NMADD_FLGS, 1, 0, 0)
2319 VSX_MADD(xvnmaddmsp, 4, float32, VsrW(i), NMADD_FLGS, 0, 0, 0)
2320 VSX_MADD(xvnmsubasp, 4, float32, VsrW(i), NMSUB_FLGS, 1, 0, 0)
2321 VSX_MADD(xvnmsubmsp, 4, float32, VsrW(i), NMSUB_FLGS, 0, 0, 0)
2322 
2323 /* VSX_SCALAR_CMP_DP - VSX scalar floating point compare double precision
2324  *   op    - instruction mnemonic
2325  *   cmp   - comparison operation
2326  *   exp   - expected result of comparison
2327  *   svxvc - set VXVC bit
2328  */
2329 #define VSX_SCALAR_CMP_DP(op, cmp, exp, svxvc)                                \
2330 void helper_##op(CPUPPCState *env, uint32_t opcode)                           \
2331 {                                                                             \
2332     ppc_vsr_t xt, xa, xb;                                                     \
2333     bool vxsnan_flag = false, vxvc_flag = false, vex_flag = false;            \
2334                                                                               \
2335     getVSR(xA(opcode), &xa, env);                                             \
2336     getVSR(xB(opcode), &xb, env);                                             \
2337     getVSR(xT(opcode), &xt, env);                                             \
2338                                                                               \
2339     if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) ||              \
2340         float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) {              \
2341         vxsnan_flag = true;                                                   \
2342         if (fpscr_ve == 0 && svxvc) {                                         \
2343             vxvc_flag = true;                                                 \
2344         }                                                                     \
2345     } else if (svxvc) {                                                       \
2346         vxvc_flag = float64_is_quiet_nan(xa.VsrD(0), &env->fp_status) ||      \
2347             float64_is_quiet_nan(xb.VsrD(0), &env->fp_status);                \
2348     }                                                                         \
2349     if (vxsnan_flag) {                                                        \
2350         float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);                \
2351     }                                                                         \
2352     if (vxvc_flag) {                                                          \
2353         float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0);                  \
2354     }                                                                         \
2355     vex_flag = fpscr_ve && (vxvc_flag || vxsnan_flag);                        \
2356                                                                               \
2357     if (!vex_flag) {                                                          \
2358         if (float64_##cmp(xb.VsrD(0), xa.VsrD(0), &env->fp_status) == exp) {  \
2359             xt.VsrD(0) = -1;                                                  \
2360             xt.VsrD(1) = 0;                                                   \
2361         } else {                                                              \
2362             xt.VsrD(0) = 0;                                                   \
2363             xt.VsrD(1) = 0;                                                   \
2364         }                                                                     \
2365     }                                                                         \
2366     putVSR(xT(opcode), &xt, env);                                             \
2367     helper_float_check_status(env);                                           \
2368 }
2369 
2370 VSX_SCALAR_CMP_DP(xscmpeqdp, eq, 1, 0)
2371 VSX_SCALAR_CMP_DP(xscmpgedp, le, 1, 1)
2372 VSX_SCALAR_CMP_DP(xscmpgtdp, lt, 1, 1)
2373 VSX_SCALAR_CMP_DP(xscmpnedp, eq, 0, 0)
2374 
2375 void helper_xscmpexpdp(CPUPPCState *env, uint32_t opcode)
2376 {
2377     ppc_vsr_t xa, xb;
2378     int64_t exp_a, exp_b;
2379     uint32_t cc;
2380 
2381     getVSR(xA(opcode), &xa, env);
2382     getVSR(xB(opcode), &xb, env);
2383 
2384     exp_a = extract64(xa.VsrD(0), 52, 11);
2385     exp_b = extract64(xb.VsrD(0), 52, 11);
2386 
2387     if (unlikely(float64_is_any_nan(xa.VsrD(0)) ||
2388                  float64_is_any_nan(xb.VsrD(0)))) {
2389         cc = CRF_SO;
2390     } else {
2391         if (exp_a < exp_b) {
2392             cc = CRF_LT;
2393         } else if (exp_a > exp_b) {
2394             cc = CRF_GT;
2395         } else {
2396             cc = CRF_EQ;
2397         }
2398     }
2399 
2400     env->fpscr &= ~(0x0F << FPSCR_FPRF);
2401     env->fpscr |= cc << FPSCR_FPRF;
2402     env->crf[BF(opcode)] = cc;
2403 
2404     helper_float_check_status(env);
2405 }
2406 
2407 void helper_xscmpexpqp(CPUPPCState *env, uint32_t opcode)
2408 {
2409     ppc_vsr_t xa, xb;
2410     int64_t exp_a, exp_b;
2411     uint32_t cc;
2412 
2413     getVSR(rA(opcode) + 32, &xa, env);
2414     getVSR(rB(opcode) + 32, &xb, env);
2415 
2416     exp_a = extract64(xa.VsrD(0), 48, 15);
2417     exp_b = extract64(xb.VsrD(0), 48, 15);
2418 
2419     if (unlikely(float128_is_any_nan(xa.f128) ||
2420                  float128_is_any_nan(xb.f128))) {
2421         cc = CRF_SO;
2422     } else {
2423         if (exp_a < exp_b) {
2424             cc = CRF_LT;
2425         } else if (exp_a > exp_b) {
2426             cc = CRF_GT;
2427         } else {
2428             cc = CRF_EQ;
2429         }
2430     }
2431 
2432     env->fpscr &= ~(0x0F << FPSCR_FPRF);
2433     env->fpscr |= cc << FPSCR_FPRF;
2434     env->crf[BF(opcode)] = cc;
2435 
2436     helper_float_check_status(env);
2437 }
2438 
2439 #define VSX_SCALAR_CMP(op, ordered)                                      \
2440 void helper_##op(CPUPPCState *env, uint32_t opcode)                      \
2441 {                                                                        \
2442     ppc_vsr_t xa, xb;                                                    \
2443     uint32_t cc = 0;                                                     \
2444     bool vxsnan_flag = false, vxvc_flag = false;                         \
2445                                                                          \
2446     helper_reset_fpstatus(env);                                          \
2447     getVSR(xA(opcode), &xa, env);                                        \
2448     getVSR(xB(opcode), &xb, env);                                        \
2449                                                                          \
2450     if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) ||         \
2451         float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) {         \
2452         vxsnan_flag = true;                                              \
2453         cc = CRF_SO;                                                     \
2454         if (fpscr_ve == 0 && ordered) {                                  \
2455             vxvc_flag = true;                                            \
2456         }                                                                \
2457     } else if (float64_is_quiet_nan(xa.VsrD(0), &env->fp_status) ||      \
2458                float64_is_quiet_nan(xb.VsrD(0), &env->fp_status)) {      \
2459         cc = CRF_SO;                                                     \
2460         if (ordered) {                                                   \
2461             vxvc_flag = true;                                            \
2462         }                                                                \
2463     }                                                                    \
2464     if (vxsnan_flag) {                                                   \
2465         float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);           \
2466     }                                                                    \
2467     if (vxvc_flag) {                                                     \
2468         float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0);             \
2469     }                                                                    \
2470                                                                          \
2471     if (float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) {           \
2472         cc |= CRF_LT;                                                    \
2473     } else if (!float64_le(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) {   \
2474         cc |= CRF_GT;                                                    \
2475     } else {                                                             \
2476         cc |= CRF_EQ;                                                    \
2477     }                                                                    \
2478                                                                          \
2479     env->fpscr &= ~(0x0F << FPSCR_FPRF);                                 \
2480     env->fpscr |= cc << FPSCR_FPRF;                                      \
2481     env->crf[BF(opcode)] = cc;                                           \
2482                                                                          \
2483     float_check_status(env);                                             \
2484 }
2485 
2486 VSX_SCALAR_CMP(xscmpodp, 1)
2487 VSX_SCALAR_CMP(xscmpudp, 0)
2488 
2489 #define VSX_SCALAR_CMPQ(op, ordered)                                    \
2490 void helper_##op(CPUPPCState *env, uint32_t opcode)                     \
2491 {                                                                       \
2492     ppc_vsr_t xa, xb;                                                   \
2493     uint32_t cc = 0;                                                    \
2494     bool vxsnan_flag = false, vxvc_flag = false;                        \
2495                                                                         \
2496     helper_reset_fpstatus(env);                                         \
2497     getVSR(rA(opcode) + 32, &xa, env);                                  \
2498     getVSR(rB(opcode) + 32, &xb, env);                                  \
2499                                                                         \
2500     if (float128_is_signaling_nan(xa.f128, &env->fp_status) ||          \
2501         float128_is_signaling_nan(xb.f128, &env->fp_status)) {          \
2502         vxsnan_flag = true;                                             \
2503         cc = CRF_SO;                                                    \
2504         if (fpscr_ve == 0 && ordered) {                                 \
2505             vxvc_flag = true;                                           \
2506         }                                                               \
2507     } else if (float128_is_quiet_nan(xa.f128, &env->fp_status) ||       \
2508                float128_is_quiet_nan(xb.f128, &env->fp_status)) {       \
2509         cc = CRF_SO;                                                    \
2510         if (ordered) {                                                  \
2511             vxvc_flag = true;                                           \
2512         }                                                               \
2513     }                                                                   \
2514     if (vxsnan_flag) {                                                  \
2515         float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);          \
2516     }                                                                   \
2517     if (vxvc_flag) {                                                    \
2518         float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0);            \
2519     }                                                                   \
2520                                                                         \
2521     if (float128_lt(xa.f128, xb.f128, &env->fp_status)) {               \
2522         cc |= CRF_LT;                                                   \
2523     } else if (!float128_le(xa.f128, xb.f128, &env->fp_status)) {       \
2524         cc |= CRF_GT;                                                   \
2525     } else {                                                            \
2526         cc |= CRF_EQ;                                                   \
2527     }                                                                   \
2528                                                                         \
2529     env->fpscr &= ~(0x0F << FPSCR_FPRF);                                \
2530     env->fpscr |= cc << FPSCR_FPRF;                                     \
2531     env->crf[BF(opcode)] = cc;                                          \
2532                                                                         \
2533     float_check_status(env);                                            \
2534 }
2535 
2536 VSX_SCALAR_CMPQ(xscmpoqp, 1)
2537 VSX_SCALAR_CMPQ(xscmpuqp, 0)
2538 
2539 /* VSX_MAX_MIN - VSX floating point maximum/minimum
2540  *   name  - instruction mnemonic
2541  *   op    - operation (max or min)
2542  *   nels  - number of elements (1, 2 or 4)
2543  *   tp    - type (float32 or float64)
2544  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2545  */
2546 #define VSX_MAX_MIN(name, op, nels, tp, fld)                                  \
2547 void helper_##name(CPUPPCState *env, uint32_t opcode)                         \
2548 {                                                                             \
2549     ppc_vsr_t xt, xa, xb;                                                     \
2550     int i;                                                                    \
2551                                                                               \
2552     getVSR(xA(opcode), &xa, env);                                             \
2553     getVSR(xB(opcode), &xb, env);                                             \
2554     getVSR(xT(opcode), &xt, env);                                             \
2555                                                                               \
2556     for (i = 0; i < nels; i++) {                                              \
2557         xt.fld = tp##_##op(xa.fld, xb.fld, &env->fp_status);                  \
2558         if (unlikely(tp##_is_signaling_nan(xa.fld, &env->fp_status) ||        \
2559                      tp##_is_signaling_nan(xb.fld, &env->fp_status))) {       \
2560             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);            \
2561         }                                                                     \
2562     }                                                                         \
2563                                                                               \
2564     putVSR(xT(opcode), &xt, env);                                             \
2565     float_check_status(env);                                                  \
2566 }
2567 
2568 VSX_MAX_MIN(xsmaxdp, maxnum, 1, float64, VsrD(0))
2569 VSX_MAX_MIN(xvmaxdp, maxnum, 2, float64, VsrD(i))
2570 VSX_MAX_MIN(xvmaxsp, maxnum, 4, float32, VsrW(i))
2571 VSX_MAX_MIN(xsmindp, minnum, 1, float64, VsrD(0))
2572 VSX_MAX_MIN(xvmindp, minnum, 2, float64, VsrD(i))
2573 VSX_MAX_MIN(xvminsp, minnum, 4, float32, VsrW(i))
2574 
2575 #define VSX_MAX_MINC(name, max)                                               \
2576 void helper_##name(CPUPPCState *env, uint32_t opcode)                         \
2577 {                                                                             \
2578     ppc_vsr_t xt, xa, xb;                                                     \
2579     bool vxsnan_flag = false, vex_flag = false;                               \
2580                                                                               \
2581     getVSR(rA(opcode) + 32, &xa, env);                                        \
2582     getVSR(rB(opcode) + 32, &xb, env);                                        \
2583     getVSR(rD(opcode) + 32, &xt, env);                                        \
2584                                                                               \
2585     if (unlikely(float64_is_any_nan(xa.VsrD(0)) ||                            \
2586                  float64_is_any_nan(xb.VsrD(0)))) {                           \
2587         if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) ||          \
2588             float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) {          \
2589             vxsnan_flag = true;                                               \
2590         }                                                                     \
2591         xt.VsrD(0) = xb.VsrD(0);                                              \
2592     } else if ((max &&                                                        \
2593                !float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) ||       \
2594                (!max &&                                                       \
2595                float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status))) {        \
2596         xt.VsrD(0) = xa.VsrD(0);                                              \
2597     } else {                                                                  \
2598         xt.VsrD(0) = xb.VsrD(0);                                              \
2599     }                                                                         \
2600                                                                               \
2601     vex_flag = fpscr_ve & vxsnan_flag;                                        \
2602     if (vxsnan_flag) {                                                        \
2603             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);            \
2604     }                                                                         \
2605     if (!vex_flag) {                                                          \
2606         putVSR(rD(opcode) + 32, &xt, env);                                    \
2607     }                                                                         \
2608 }                                                                             \
2609 
2610 VSX_MAX_MINC(xsmaxcdp, 1);
2611 VSX_MAX_MINC(xsmincdp, 0);
2612 
2613 #define VSX_MAX_MINJ(name, max)                                               \
2614 void helper_##name(CPUPPCState *env, uint32_t opcode)                         \
2615 {                                                                             \
2616     ppc_vsr_t xt, xa, xb;                                                     \
2617     bool vxsnan_flag = false, vex_flag = false;                               \
2618                                                                               \
2619     getVSR(rA(opcode) + 32, &xa, env);                                        \
2620     getVSR(rB(opcode) + 32, &xb, env);                                        \
2621     getVSR(rD(opcode) + 32, &xt, env);                                        \
2622                                                                               \
2623     if (unlikely(float64_is_any_nan(xa.VsrD(0)))) {                           \
2624         if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status)) {          \
2625             vxsnan_flag = true;                                               \
2626         }                                                                     \
2627         xt.VsrD(0) = xa.VsrD(0);                                              \
2628     } else if (unlikely(float64_is_any_nan(xb.VsrD(0)))) {                    \
2629         if (float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) {          \
2630             vxsnan_flag = true;                                               \
2631         }                                                                     \
2632         xt.VsrD(0) = xb.VsrD(0);                                              \
2633     } else if (float64_is_zero(xa.VsrD(0)) && float64_is_zero(xb.VsrD(0))) {  \
2634         if (max) {                                                            \
2635             if (!float64_is_neg(xa.VsrD(0)) || !float64_is_neg(xb.VsrD(0))) { \
2636                 xt.VsrD(0) = 0ULL;                                            \
2637             } else {                                                          \
2638                 xt.VsrD(0) = 0x8000000000000000ULL;                           \
2639             }                                                                 \
2640         } else {                                                              \
2641             if (float64_is_neg(xa.VsrD(0)) || float64_is_neg(xb.VsrD(0))) {   \
2642                 xt.VsrD(0) = 0x8000000000000000ULL;                           \
2643             } else {                                                          \
2644                 xt.VsrD(0) = 0ULL;                                            \
2645             }                                                                 \
2646         }                                                                     \
2647     } else if ((max &&                                                        \
2648                !float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) ||       \
2649                (!max &&                                                       \
2650                float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status))) {        \
2651         xt.VsrD(0) = xa.VsrD(0);                                              \
2652     } else {                                                                  \
2653         xt.VsrD(0) = xb.VsrD(0);                                              \
2654     }                                                                         \
2655                                                                               \
2656     vex_flag = fpscr_ve & vxsnan_flag;                                        \
2657     if (vxsnan_flag) {                                                        \
2658             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);            \
2659     }                                                                         \
2660     if (!vex_flag) {                                                          \
2661         putVSR(rD(opcode) + 32, &xt, env);                                    \
2662     }                                                                         \
2663 }                                                                             \
2664 
2665 VSX_MAX_MINJ(xsmaxjdp, 1);
2666 VSX_MAX_MINJ(xsminjdp, 0);
2667 
2668 /* VSX_CMP - VSX floating point compare
2669  *   op    - instruction mnemonic
2670  *   nels  - number of elements (1, 2 or 4)
2671  *   tp    - type (float32 or float64)
2672  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
2673  *   cmp   - comparison operation
2674  *   svxvc - set VXVC bit
2675  *   exp   - expected result of comparison
2676  */
2677 #define VSX_CMP(op, nels, tp, fld, cmp, svxvc, exp)                       \
2678 void helper_##op(CPUPPCState *env, uint32_t opcode)                       \
2679 {                                                                         \
2680     ppc_vsr_t xt, xa, xb;                                                 \
2681     int i;                                                                \
2682     int all_true = 1;                                                     \
2683     int all_false = 1;                                                    \
2684                                                                           \
2685     getVSR(xA(opcode), &xa, env);                                         \
2686     getVSR(xB(opcode), &xb, env);                                         \
2687     getVSR(xT(opcode), &xt, env);                                         \
2688                                                                           \
2689     for (i = 0; i < nels; i++) {                                          \
2690         if (unlikely(tp##_is_any_nan(xa.fld) ||                           \
2691                      tp##_is_any_nan(xb.fld))) {                          \
2692             if (tp##_is_signaling_nan(xa.fld, &env->fp_status) ||         \
2693                 tp##_is_signaling_nan(xb.fld, &env->fp_status)) {         \
2694                 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);    \
2695             }                                                             \
2696             if (svxvc) {                                                  \
2697                 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0);      \
2698             }                                                             \
2699             xt.fld = 0;                                                   \
2700             all_true = 0;                                                 \
2701         } else {                                                          \
2702             if (tp##_##cmp(xb.fld, xa.fld, &env->fp_status) == exp) {     \
2703                 xt.fld = -1;                                              \
2704                 all_false = 0;                                            \
2705             } else {                                                      \
2706                 xt.fld = 0;                                               \
2707                 all_true = 0;                                             \
2708             }                                                             \
2709         }                                                                 \
2710     }                                                                     \
2711                                                                           \
2712     putVSR(xT(opcode), &xt, env);                                         \
2713     if ((opcode >> (31-21)) & 1) {                                        \
2714         env->crf[6] = (all_true ? 0x8 : 0) | (all_false ? 0x2 : 0);       \
2715     }                                                                     \
2716     float_check_status(env);                                              \
2717  }
2718 
2719 VSX_CMP(xvcmpeqdp, 2, float64, VsrD(i), eq, 0, 1)
2720 VSX_CMP(xvcmpgedp, 2, float64, VsrD(i), le, 1, 1)
2721 VSX_CMP(xvcmpgtdp, 2, float64, VsrD(i), lt, 1, 1)
2722 VSX_CMP(xvcmpnedp, 2, float64, VsrD(i), eq, 0, 0)
2723 VSX_CMP(xvcmpeqsp, 4, float32, VsrW(i), eq, 0, 1)
2724 VSX_CMP(xvcmpgesp, 4, float32, VsrW(i), le, 1, 1)
2725 VSX_CMP(xvcmpgtsp, 4, float32, VsrW(i), lt, 1, 1)
2726 VSX_CMP(xvcmpnesp, 4, float32, VsrW(i), eq, 0, 0)
2727 
2728 /* VSX_CVT_FP_TO_FP - VSX floating point/floating point conversion
2729  *   op    - instruction mnemonic
2730  *   nels  - number of elements (1, 2 or 4)
2731  *   stp   - source type (float32 or float64)
2732  *   ttp   - target type (float32 or float64)
2733  *   sfld  - source vsr_t field
2734  *   tfld  - target vsr_t field (f32 or f64)
2735  *   sfprf - set FPRF
2736  */
2737 #define VSX_CVT_FP_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf)    \
2738 void helper_##op(CPUPPCState *env, uint32_t opcode)                \
2739 {                                                                  \
2740     ppc_vsr_t xt, xb;                                              \
2741     int i;                                                         \
2742                                                                    \
2743     getVSR(xB(opcode), &xb, env);                                  \
2744     getVSR(xT(opcode), &xt, env);                                  \
2745                                                                    \
2746     for (i = 0; i < nels; i++) {                                   \
2747         xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status);        \
2748         if (unlikely(stp##_is_signaling_nan(xb.sfld,               \
2749                                             &env->fp_status))) {   \
2750             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2751             xt.tfld = ttp##_snan_to_qnan(xt.tfld);                 \
2752         }                                                          \
2753         if (sfprf) {                                               \
2754             helper_compute_fprf_##ttp(env, xt.tfld);               \
2755         }                                                          \
2756     }                                                              \
2757                                                                    \
2758     putVSR(xT(opcode), &xt, env);                                  \
2759     float_check_status(env);                                       \
2760 }
2761 
2762 VSX_CVT_FP_TO_FP(xscvdpsp, 1, float64, float32, VsrD(0), VsrW(0), 1)
2763 VSX_CVT_FP_TO_FP(xscvspdp, 1, float32, float64, VsrW(0), VsrD(0), 1)
2764 VSX_CVT_FP_TO_FP(xvcvdpsp, 2, float64, float32, VsrD(i), VsrW(2*i), 0)
2765 VSX_CVT_FP_TO_FP(xvcvspdp, 2, float32, float64, VsrW(2*i), VsrD(i), 0)
2766 
2767 /* VSX_CVT_FP_TO_FP_VECTOR - VSX floating point/floating point conversion
2768  *   op    - instruction mnemonic
2769  *   nels  - number of elements (1, 2 or 4)
2770  *   stp   - source type (float32 or float64)
2771  *   ttp   - target type (float32 or float64)
2772  *   sfld  - source vsr_t field
2773  *   tfld  - target vsr_t field (f32 or f64)
2774  *   sfprf - set FPRF
2775  */
2776 #define VSX_CVT_FP_TO_FP_VECTOR(op, nels, stp, ttp, sfld, tfld, sfprf)    \
2777 void helper_##op(CPUPPCState *env, uint32_t opcode)                       \
2778 {                                                                       \
2779     ppc_vsr_t xt, xb;                                                   \
2780     int i;                                                              \
2781                                                                         \
2782     getVSR(rB(opcode) + 32, &xb, env);                                  \
2783     getVSR(rD(opcode) + 32, &xt, env);                                  \
2784                                                                         \
2785     for (i = 0; i < nels; i++) {                                        \
2786         xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status);             \
2787         if (unlikely(stp##_is_signaling_nan(xb.sfld,                    \
2788                                             &env->fp_status))) {        \
2789             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);      \
2790             xt.tfld = ttp##_snan_to_qnan(xt.tfld);                      \
2791         }                                                               \
2792         if (sfprf) {                                                    \
2793             helper_compute_fprf_##ttp(env, xt.tfld);                    \
2794         }                                                               \
2795     }                                                                   \
2796                                                                         \
2797     putVSR(rD(opcode) + 32, &xt, env);                                  \
2798     float_check_status(env);                                            \
2799 }
2800 
2801 VSX_CVT_FP_TO_FP_VECTOR(xscvdpqp, 1, float64, float128, VsrD(0), f128, 1)
2802 
2803 /* VSX_CVT_FP_TO_FP_HP - VSX floating point/floating point conversion
2804  *                       involving one half precision value
2805  *   op    - instruction mnemonic
2806  *   nels  - number of elements (1, 2 or 4)
2807  *   stp   - source type
2808  *   ttp   - target type
2809  *   sfld  - source vsr_t field
2810  *   tfld  - target vsr_t field
2811  *   sfprf - set FPRF
2812  */
2813 #define VSX_CVT_FP_TO_FP_HP(op, nels, stp, ttp, sfld, tfld, sfprf) \
2814 void helper_##op(CPUPPCState *env, uint32_t opcode)                \
2815 {                                                                  \
2816     ppc_vsr_t xt, xb;                                              \
2817     int i;                                                         \
2818                                                                    \
2819     getVSR(xB(opcode), &xb, env);                                  \
2820     memset(&xt, 0, sizeof(xt));                                    \
2821                                                                    \
2822     for (i = 0; i < nels; i++) {                                   \
2823         xt.tfld = stp##_to_##ttp(xb.sfld, 1, &env->fp_status);     \
2824         if (unlikely(stp##_is_signaling_nan(xb.sfld,               \
2825                                             &env->fp_status))) {   \
2826             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
2827             xt.tfld = ttp##_snan_to_qnan(xt.tfld);                 \
2828         }                                                          \
2829         if (sfprf) {                                               \
2830             helper_compute_fprf_##ttp(env, xt.tfld);               \
2831         }                                                          \
2832     }                                                              \
2833                                                                    \
2834     putVSR(xT(opcode), &xt, env);                                  \
2835     float_check_status(env);                                       \
2836 }
2837 
2838 VSX_CVT_FP_TO_FP_HP(xscvdphp, 1, float64, float16, VsrD(0), VsrH(3), 1)
2839 VSX_CVT_FP_TO_FP_HP(xscvhpdp, 1, float16, float64, VsrH(3), VsrD(0), 1)
2840 VSX_CVT_FP_TO_FP_HP(xvcvsphp, 4, float32, float16, VsrW(i), VsrH(2 * i  + 1), 0)
2841 VSX_CVT_FP_TO_FP_HP(xvcvhpsp, 4, float16, float32, VsrH(2 * i + 1), VsrW(i), 0)
2842 
2843 /*
2844  * xscvqpdp isn't using VSX_CVT_FP_TO_FP() because xscvqpdpo will be
2845  * added to this later.
2846  */
2847 void helper_xscvqpdp(CPUPPCState *env, uint32_t opcode)
2848 {
2849     ppc_vsr_t xt, xb;
2850     float_status tstat;
2851 
2852     getVSR(rB(opcode) + 32, &xb, env);
2853     memset(&xt, 0, sizeof(xt));
2854 
2855     tstat = env->fp_status;
2856     if (unlikely(Rc(opcode) != 0)) {
2857         tstat.float_rounding_mode = float_round_to_odd;
2858     }
2859 
2860     xt.VsrD(0) = float128_to_float64(xb.f128, &tstat);
2861     env->fp_status.float_exception_flags |= tstat.float_exception_flags;
2862     if (unlikely(float128_is_signaling_nan(xb.f128,
2863                                            &tstat))) {
2864         float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);
2865         xt.VsrD(0) = float64_snan_to_qnan(xt.VsrD(0));
2866     }
2867     helper_compute_fprf_float64(env, xt.VsrD(0));
2868 
2869     putVSR(rD(opcode) + 32, &xt, env);
2870     float_check_status(env);
2871 }
2872 
2873 uint64_t helper_xscvdpspn(CPUPPCState *env, uint64_t xb)
2874 {
2875     float_status tstat = env->fp_status;
2876     set_float_exception_flags(0, &tstat);
2877 
2878     return (uint64_t)float64_to_float32(xb, &tstat) << 32;
2879 }
2880 
2881 uint64_t helper_xscvspdpn(CPUPPCState *env, uint64_t xb)
2882 {
2883     float_status tstat = env->fp_status;
2884     set_float_exception_flags(0, &tstat);
2885 
2886     return float32_to_float64(xb >> 32, &tstat);
2887 }
2888 
2889 /* VSX_CVT_FP_TO_INT - VSX floating point to integer conversion
2890  *   op    - instruction mnemonic
2891  *   nels  - number of elements (1, 2 or 4)
2892  *   stp   - source type (float32 or float64)
2893  *   ttp   - target type (int32, uint32, int64 or uint64)
2894  *   sfld  - source vsr_t field
2895  *   tfld  - target vsr_t field
2896  *   rnan  - resulting NaN
2897  */
2898 #define VSX_CVT_FP_TO_INT(op, nels, stp, ttp, sfld, tfld, rnan)              \
2899 void helper_##op(CPUPPCState *env, uint32_t opcode)                          \
2900 {                                                                            \
2901     ppc_vsr_t xt, xb;                                                        \
2902     int i;                                                                   \
2903                                                                              \
2904     getVSR(xB(opcode), &xb, env);                                            \
2905     getVSR(xT(opcode), &xt, env);                                            \
2906                                                                              \
2907     for (i = 0; i < nels; i++) {                                             \
2908         if (unlikely(stp##_is_any_nan(xb.sfld))) {                           \
2909             if (stp##_is_signaling_nan(xb.sfld, &env->fp_status)) {          \
2910                 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);       \
2911             }                                                                \
2912             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0);            \
2913             xt.tfld = rnan;                                                  \
2914         } else {                                                             \
2915             xt.tfld = stp##_to_##ttp##_round_to_zero(xb.sfld,                \
2916                           &env->fp_status);                                  \
2917             if (env->fp_status.float_exception_flags & float_flag_invalid) { \
2918                 float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0);        \
2919             }                                                                \
2920         }                                                                    \
2921     }                                                                        \
2922                                                                              \
2923     putVSR(xT(opcode), &xt, env);                                            \
2924     float_check_status(env);                                                 \
2925 }
2926 
2927 VSX_CVT_FP_TO_INT(xscvdpsxds, 1, float64, int64, VsrD(0), VsrD(0), \
2928                   0x8000000000000000ULL)
2929 VSX_CVT_FP_TO_INT(xscvdpsxws, 1, float64, int32, VsrD(0), VsrW(1), \
2930                   0x80000000U)
2931 VSX_CVT_FP_TO_INT(xscvdpuxds, 1, float64, uint64, VsrD(0), VsrD(0), 0ULL)
2932 VSX_CVT_FP_TO_INT(xscvdpuxws, 1, float64, uint32, VsrD(0), VsrW(1), 0U)
2933 VSX_CVT_FP_TO_INT(xvcvdpsxds, 2, float64, int64, VsrD(i), VsrD(i), \
2934                   0x8000000000000000ULL)
2935 VSX_CVT_FP_TO_INT(xvcvdpsxws, 2, float64, int32, VsrD(i), VsrW(2*i), \
2936                   0x80000000U)
2937 VSX_CVT_FP_TO_INT(xvcvdpuxds, 2, float64, uint64, VsrD(i), VsrD(i), 0ULL)
2938 VSX_CVT_FP_TO_INT(xvcvdpuxws, 2, float64, uint32, VsrD(i), VsrW(2*i), 0U)
2939 VSX_CVT_FP_TO_INT(xvcvspsxds, 2, float32, int64, VsrW(2*i), VsrD(i), \
2940                   0x8000000000000000ULL)
2941 VSX_CVT_FP_TO_INT(xvcvspsxws, 4, float32, int32, VsrW(i), VsrW(i), 0x80000000U)
2942 VSX_CVT_FP_TO_INT(xvcvspuxds, 2, float32, uint64, VsrW(2*i), VsrD(i), 0ULL)
2943 VSX_CVT_FP_TO_INT(xvcvspuxws, 4, float32, uint32, VsrW(i), VsrW(i), 0U)
2944 
2945 /* VSX_CVT_FP_TO_INT_VECTOR - VSX floating point to integer conversion
2946  *   op    - instruction mnemonic
2947  *   stp   - source type (float32 or float64)
2948  *   ttp   - target type (int32, uint32, int64 or uint64)
2949  *   sfld  - source vsr_t field
2950  *   tfld  - target vsr_t field
2951  *   rnan  - resulting NaN
2952  */
2953 #define VSX_CVT_FP_TO_INT_VECTOR(op, stp, ttp, sfld, tfld, rnan)             \
2954 void helper_##op(CPUPPCState *env, uint32_t opcode)                          \
2955 {                                                                            \
2956     ppc_vsr_t xt, xb;                                                        \
2957                                                                              \
2958     getVSR(rB(opcode) + 32, &xb, env);                                       \
2959     memset(&xt, 0, sizeof(xt));                                              \
2960                                                                              \
2961     if (unlikely(stp##_is_any_nan(xb.sfld))) {                               \
2962         if (stp##_is_signaling_nan(xb.sfld, &env->fp_status)) {              \
2963             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);           \
2964         }                                                                    \
2965         float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0);                \
2966         xt.tfld = rnan;                                                      \
2967     } else {                                                                 \
2968         xt.tfld = stp##_to_##ttp##_round_to_zero(xb.sfld,                    \
2969                       &env->fp_status);                                      \
2970         if (env->fp_status.float_exception_flags & float_flag_invalid) {     \
2971             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0);            \
2972         }                                                                    \
2973     }                                                                        \
2974                                                                              \
2975     putVSR(rD(opcode) + 32, &xt, env);                                       \
2976     float_check_status(env);                                                 \
2977 }
2978 
2979 VSX_CVT_FP_TO_INT_VECTOR(xscvqpsdz, float128, int64, f128, VsrD(0),          \
2980                   0x8000000000000000ULL)
2981 
2982 VSX_CVT_FP_TO_INT_VECTOR(xscvqpswz, float128, int32, f128, VsrD(0),          \
2983                   0xffffffff80000000ULL)
2984 VSX_CVT_FP_TO_INT_VECTOR(xscvqpudz, float128, uint64, f128, VsrD(0), 0x0ULL)
2985 VSX_CVT_FP_TO_INT_VECTOR(xscvqpuwz, float128, uint32, f128, VsrD(0), 0x0ULL)
2986 
2987 /* VSX_CVT_INT_TO_FP - VSX integer to floating point conversion
2988  *   op    - instruction mnemonic
2989  *   nels  - number of elements (1, 2 or 4)
2990  *   stp   - source type (int32, uint32, int64 or uint64)
2991  *   ttp   - target type (float32 or float64)
2992  *   sfld  - source vsr_t field
2993  *   tfld  - target vsr_t field
2994  *   jdef  - definition of the j index (i or 2*i)
2995  *   sfprf - set FPRF
2996  */
2997 #define VSX_CVT_INT_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf, r2sp)  \
2998 void helper_##op(CPUPPCState *env, uint32_t opcode)                     \
2999 {                                                                       \
3000     ppc_vsr_t xt, xb;                                                   \
3001     int i;                                                              \
3002                                                                         \
3003     getVSR(xB(opcode), &xb, env);                                       \
3004     getVSR(xT(opcode), &xt, env);                                       \
3005                                                                         \
3006     for (i = 0; i < nels; i++) {                                        \
3007         xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status);             \
3008         if (r2sp) {                                                     \
3009             xt.tfld = helper_frsp(env, xt.tfld);                        \
3010         }                                                               \
3011         if (sfprf) {                                                    \
3012             helper_compute_fprf_float64(env, xt.tfld);                  \
3013         }                                                               \
3014     }                                                                   \
3015                                                                         \
3016     putVSR(xT(opcode), &xt, env);                                       \
3017     float_check_status(env);                                            \
3018 }
3019 
3020 VSX_CVT_INT_TO_FP(xscvsxddp, 1, int64, float64, VsrD(0), VsrD(0), 1, 0)
3021 VSX_CVT_INT_TO_FP(xscvuxddp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 0)
3022 VSX_CVT_INT_TO_FP(xscvsxdsp, 1, int64, float64, VsrD(0), VsrD(0), 1, 1)
3023 VSX_CVT_INT_TO_FP(xscvuxdsp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 1)
3024 VSX_CVT_INT_TO_FP(xvcvsxddp, 2, int64, float64, VsrD(i), VsrD(i), 0, 0)
3025 VSX_CVT_INT_TO_FP(xvcvuxddp, 2, uint64, float64, VsrD(i), VsrD(i), 0, 0)
3026 VSX_CVT_INT_TO_FP(xvcvsxwdp, 2, int32, float64, VsrW(2*i), VsrD(i), 0, 0)
3027 VSX_CVT_INT_TO_FP(xvcvuxwdp, 2, uint64, float64, VsrW(2*i), VsrD(i), 0, 0)
3028 VSX_CVT_INT_TO_FP(xvcvsxdsp, 2, int64, float32, VsrD(i), VsrW(2*i), 0, 0)
3029 VSX_CVT_INT_TO_FP(xvcvuxdsp, 2, uint64, float32, VsrD(i), VsrW(2*i), 0, 0)
3030 VSX_CVT_INT_TO_FP(xvcvsxwsp, 4, int32, float32, VsrW(i), VsrW(i), 0, 0)
3031 VSX_CVT_INT_TO_FP(xvcvuxwsp, 4, uint32, float32, VsrW(i), VsrW(i), 0, 0)
3032 
3033 /* VSX_CVT_INT_TO_FP_VECTOR - VSX integer to floating point conversion
3034  *   op    - instruction mnemonic
3035  *   stp   - source type (int32, uint32, int64 or uint64)
3036  *   ttp   - target type (float32 or float64)
3037  *   sfld  - source vsr_t field
3038  *   tfld  - target vsr_t field
3039  */
3040 #define VSX_CVT_INT_TO_FP_VECTOR(op, stp, ttp, sfld, tfld)              \
3041 void helper_##op(CPUPPCState *env, uint32_t opcode)                     \
3042 {                                                                       \
3043     ppc_vsr_t xt, xb;                                                   \
3044                                                                         \
3045     getVSR(rB(opcode) + 32, &xb, env);                                  \
3046     getVSR(rD(opcode) + 32, &xt, env);                                  \
3047                                                                         \
3048     xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status);                 \
3049     helper_compute_fprf_##ttp(env, xt.tfld);                            \
3050                                                                         \
3051     putVSR(xT(opcode) + 32, &xt, env);                                  \
3052     float_check_status(env);                                            \
3053 }
3054 
3055 VSX_CVT_INT_TO_FP_VECTOR(xscvsdqp, int64, float128, VsrD(0), f128)
3056 VSX_CVT_INT_TO_FP_VECTOR(xscvudqp, uint64, float128, VsrD(0), f128)
3057 
3058 /* For "use current rounding mode", define a value that will not be one of
3059  * the existing rounding model enums.
3060  */
3061 #define FLOAT_ROUND_CURRENT (float_round_nearest_even + float_round_down + \
3062   float_round_up + float_round_to_zero)
3063 
3064 /* VSX_ROUND - VSX floating point round
3065  *   op    - instruction mnemonic
3066  *   nels  - number of elements (1, 2 or 4)
3067  *   tp    - type (float32 or float64)
3068  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
3069  *   rmode - rounding mode
3070  *   sfprf - set FPRF
3071  */
3072 #define VSX_ROUND(op, nels, tp, fld, rmode, sfprf)                     \
3073 void helper_##op(CPUPPCState *env, uint32_t opcode)                    \
3074 {                                                                      \
3075     ppc_vsr_t xt, xb;                                                  \
3076     int i;                                                             \
3077     getVSR(xB(opcode), &xb, env);                                      \
3078     getVSR(xT(opcode), &xt, env);                                      \
3079                                                                        \
3080     if (rmode != FLOAT_ROUND_CURRENT) {                                \
3081         set_float_rounding_mode(rmode, &env->fp_status);               \
3082     }                                                                  \
3083                                                                        \
3084     for (i = 0; i < nels; i++) {                                       \
3085         if (unlikely(tp##_is_signaling_nan(xb.fld,                     \
3086                                            &env->fp_status))) {        \
3087             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);     \
3088             xt.fld = tp##_snan_to_qnan(xb.fld);                        \
3089         } else {                                                       \
3090             xt.fld = tp##_round_to_int(xb.fld, &env->fp_status);       \
3091         }                                                              \
3092         if (sfprf) {                                                   \
3093             helper_compute_fprf_float64(env, xt.fld);                  \
3094         }                                                              \
3095     }                                                                  \
3096                                                                        \
3097     /* If this is not a "use current rounding mode" instruction,       \
3098      * then inhibit setting of the XX bit and restore rounding         \
3099      * mode from FPSCR */                                              \
3100     if (rmode != FLOAT_ROUND_CURRENT) {                                \
3101         fpscr_set_rounding_mode(env);                                  \
3102         env->fp_status.float_exception_flags &= ~float_flag_inexact;   \
3103     }                                                                  \
3104                                                                        \
3105     putVSR(xT(opcode), &xt, env);                                      \
3106     float_check_status(env);                                           \
3107 }
3108 
3109 VSX_ROUND(xsrdpi, 1, float64, VsrD(0), float_round_ties_away, 1)
3110 VSX_ROUND(xsrdpic, 1, float64, VsrD(0), FLOAT_ROUND_CURRENT, 1)
3111 VSX_ROUND(xsrdpim, 1, float64, VsrD(0), float_round_down, 1)
3112 VSX_ROUND(xsrdpip, 1, float64, VsrD(0), float_round_up, 1)
3113 VSX_ROUND(xsrdpiz, 1, float64, VsrD(0), float_round_to_zero, 1)
3114 
3115 VSX_ROUND(xvrdpi, 2, float64, VsrD(i), float_round_ties_away, 0)
3116 VSX_ROUND(xvrdpic, 2, float64, VsrD(i), FLOAT_ROUND_CURRENT, 0)
3117 VSX_ROUND(xvrdpim, 2, float64, VsrD(i), float_round_down, 0)
3118 VSX_ROUND(xvrdpip, 2, float64, VsrD(i), float_round_up, 0)
3119 VSX_ROUND(xvrdpiz, 2, float64, VsrD(i), float_round_to_zero, 0)
3120 
3121 VSX_ROUND(xvrspi, 4, float32, VsrW(i), float_round_ties_away, 0)
3122 VSX_ROUND(xvrspic, 4, float32, VsrW(i), FLOAT_ROUND_CURRENT, 0)
3123 VSX_ROUND(xvrspim, 4, float32, VsrW(i), float_round_down, 0)
3124 VSX_ROUND(xvrspip, 4, float32, VsrW(i), float_round_up, 0)
3125 VSX_ROUND(xvrspiz, 4, float32, VsrW(i), float_round_to_zero, 0)
3126 
3127 uint64_t helper_xsrsp(CPUPPCState *env, uint64_t xb)
3128 {
3129     helper_reset_fpstatus(env);
3130 
3131     uint64_t xt = helper_frsp(env, xb);
3132 
3133     helper_compute_fprf_float64(env, xt);
3134     float_check_status(env);
3135     return xt;
3136 }
3137 
3138 #define VSX_XXPERM(op, indexed)                                       \
3139 void helper_##op(CPUPPCState *env, uint32_t opcode)                   \
3140 {                                                                     \
3141     ppc_vsr_t xt, xa, pcv, xto;                                       \
3142     int i, idx;                                                       \
3143                                                                       \
3144     getVSR(xA(opcode), &xa, env);                                     \
3145     getVSR(xT(opcode), &xt, env);                                     \
3146     getVSR(xB(opcode), &pcv, env);                                    \
3147                                                                       \
3148     for (i = 0; i < 16; i++) {                                        \
3149         idx = pcv.VsrB(i) & 0x1F;                                     \
3150         if (indexed) {                                                \
3151             idx = 31 - idx;                                           \
3152         }                                                             \
3153         xto.VsrB(i) = (idx <= 15) ? xa.VsrB(idx) : xt.VsrB(idx - 16); \
3154     }                                                                 \
3155     putVSR(xT(opcode), &xto, env);                                    \
3156 }
3157 
3158 VSX_XXPERM(xxperm, 0)
3159 VSX_XXPERM(xxpermr, 1)
3160 
3161 void helper_xvxsigsp(CPUPPCState *env, uint32_t opcode)
3162 {
3163     ppc_vsr_t xt, xb;
3164     uint32_t exp, i, fraction;
3165 
3166     getVSR(xB(opcode), &xb, env);
3167     memset(&xt, 0, sizeof(xt));
3168 
3169     for (i = 0; i < 4; i++) {
3170         exp = (xb.VsrW(i) >> 23) & 0xFF;
3171         fraction = xb.VsrW(i) & 0x7FFFFF;
3172         if (exp != 0 && exp != 255) {
3173             xt.VsrW(i) = fraction | 0x00800000;
3174         } else {
3175             xt.VsrW(i) = fraction;
3176         }
3177     }
3178     putVSR(xT(opcode), &xt, env);
3179 }
3180 
3181 /* VSX_TEST_DC - VSX floating point test data class
3182  *   op    - instruction mnemonic
3183  *   nels  - number of elements (1, 2 or 4)
3184  *   xbn   - VSR register number
3185  *   tp    - type (float32 or float64)
3186  *   fld   - vsr_t field (VsrD(*) or VsrW(*))
3187  *   tfld   - target vsr_t field (VsrD(*) or VsrW(*))
3188  *   fld_max - target field max
3189  *   scrf - set result in CR and FPCC
3190  */
3191 #define VSX_TEST_DC(op, nels, xbn, tp, fld, tfld, fld_max, scrf)  \
3192 void helper_##op(CPUPPCState *env, uint32_t opcode)         \
3193 {                                                           \
3194     ppc_vsr_t xt, xb;                                       \
3195     uint32_t i, sign, dcmx;                                 \
3196     uint32_t cc, match = 0;                                 \
3197                                                             \
3198     getVSR(xbn, &xb, env);                                  \
3199     if (!scrf) {                                            \
3200         memset(&xt, 0, sizeof(xt));                         \
3201         dcmx = DCMX_XV(opcode);                             \
3202     } else {                                                \
3203         dcmx = DCMX(opcode);                                \
3204     }                                                       \
3205                                                             \
3206     for (i = 0; i < nels; i++) {                            \
3207         sign = tp##_is_neg(xb.fld);                         \
3208         if (tp##_is_any_nan(xb.fld)) {                      \
3209             match = extract32(dcmx, 6, 1);                  \
3210         } else if (tp##_is_infinity(xb.fld)) {              \
3211             match = extract32(dcmx, 4 + !sign, 1);          \
3212         } else if (tp##_is_zero(xb.fld)) {                  \
3213             match = extract32(dcmx, 2 + !sign, 1);          \
3214         } else if (tp##_is_zero_or_denormal(xb.fld)) {      \
3215             match = extract32(dcmx, 0 + !sign, 1);          \
3216         }                                                   \
3217                                                             \
3218         if (scrf) {                                         \
3219             cc = sign << CRF_LT_BIT | match << CRF_EQ_BIT;  \
3220             env->fpscr &= ~(0x0F << FPSCR_FPRF);            \
3221             env->fpscr |= cc << FPSCR_FPRF;                 \
3222             env->crf[BF(opcode)] = cc;                      \
3223         } else {                                            \
3224             xt.tfld = match ? fld_max : 0;                  \
3225         }                                                   \
3226         match = 0;                                          \
3227     }                                                       \
3228     if (!scrf) {                                            \
3229         putVSR(xT(opcode), &xt, env);                       \
3230     }                                                       \
3231 }
3232 
3233 VSX_TEST_DC(xvtstdcdp, 2, xB(opcode), float64, VsrD(i), VsrD(i), UINT64_MAX, 0)
3234 VSX_TEST_DC(xvtstdcsp, 4, xB(opcode), float32, VsrW(i), VsrW(i), UINT32_MAX, 0)
3235 VSX_TEST_DC(xststdcdp, 1, xB(opcode), float64, VsrD(0), VsrD(0), 0, 1)
3236 VSX_TEST_DC(xststdcqp, 1, (rB(opcode) + 32), float128, f128, VsrD(0), 0, 1)
3237 
3238 void helper_xststdcsp(CPUPPCState *env, uint32_t opcode)
3239 {
3240     ppc_vsr_t xb;
3241     uint32_t dcmx, sign, exp;
3242     uint32_t cc, match = 0, not_sp = 0;
3243 
3244     getVSR(xB(opcode), &xb, env);
3245     dcmx = DCMX(opcode);
3246     exp = (xb.VsrD(0) >> 52) & 0x7FF;
3247 
3248     sign = float64_is_neg(xb.VsrD(0));
3249     if (float64_is_any_nan(xb.VsrD(0))) {
3250         match = extract32(dcmx, 6, 1);
3251     } else if (float64_is_infinity(xb.VsrD(0))) {
3252         match = extract32(dcmx, 4 + !sign, 1);
3253     } else if (float64_is_zero(xb.VsrD(0))) {
3254         match = extract32(dcmx, 2 + !sign, 1);
3255     } else if (float64_is_zero_or_denormal(xb.VsrD(0)) ||
3256                (exp > 0 && exp < 0x381)) {
3257         match = extract32(dcmx, 0 + !sign, 1);
3258     }
3259 
3260     not_sp = !float64_eq(xb.VsrD(0),
3261                          float32_to_float64(
3262                              float64_to_float32(xb.VsrD(0), &env->fp_status),
3263                              &env->fp_status), &env->fp_status);
3264 
3265     cc = sign << CRF_LT_BIT | match << CRF_EQ_BIT | not_sp << CRF_SO_BIT;
3266     env->fpscr &= ~(0x0F << FPSCR_FPRF);
3267     env->fpscr |= cc << FPSCR_FPRF;
3268     env->crf[BF(opcode)] = cc;
3269 }
3270 
3271 void helper_xsrqpi(CPUPPCState *env, uint32_t opcode)
3272 {
3273     ppc_vsr_t xb;
3274     ppc_vsr_t xt;
3275     uint8_t r = Rrm(opcode);
3276     uint8_t ex = Rc(opcode);
3277     uint8_t rmc = RMC(opcode);
3278     uint8_t rmode = 0;
3279     float_status tstat;
3280 
3281     getVSR(rB(opcode) + 32, &xb, env);
3282     memset(&xt, 0, sizeof(xt));
3283     helper_reset_fpstatus(env);
3284 
3285     if (r == 0 && rmc == 0) {
3286         rmode = float_round_ties_away;
3287     } else if (r == 0 && rmc == 0x3) {
3288         rmode = fpscr_rn;
3289     } else if (r == 1) {
3290         switch (rmc) {
3291         case 0:
3292             rmode = float_round_nearest_even;
3293             break;
3294         case 1:
3295             rmode = float_round_to_zero;
3296             break;
3297         case 2:
3298             rmode = float_round_up;
3299             break;
3300         case 3:
3301             rmode = float_round_down;
3302             break;
3303         default:
3304             abort();
3305         }
3306     }
3307 
3308     tstat = env->fp_status;
3309     set_float_exception_flags(0, &tstat);
3310     set_float_rounding_mode(rmode, &tstat);
3311     xt.f128 = float128_round_to_int(xb.f128, &tstat);
3312     env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3313 
3314     if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
3315         if (float128_is_signaling_nan(xb.f128, &tstat)) {
3316             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);
3317             xt.f128 = float128_snan_to_qnan(xt.f128);
3318         }
3319     }
3320 
3321     if (ex == 0 && (tstat.float_exception_flags & float_flag_inexact)) {
3322         env->fp_status.float_exception_flags &= ~float_flag_inexact;
3323     }
3324 
3325     helper_compute_fprf_float128(env, xt.f128);
3326     float_check_status(env);
3327     putVSR(rD(opcode) + 32, &xt, env);
3328 }
3329 
3330 void helper_xsrqpxp(CPUPPCState *env, uint32_t opcode)
3331 {
3332     ppc_vsr_t xb;
3333     ppc_vsr_t xt;
3334     uint8_t r = Rrm(opcode);
3335     uint8_t rmc = RMC(opcode);
3336     uint8_t rmode = 0;
3337     floatx80 round_res;
3338     float_status tstat;
3339 
3340     getVSR(rB(opcode) + 32, &xb, env);
3341     memset(&xt, 0, sizeof(xt));
3342     helper_reset_fpstatus(env);
3343 
3344     if (r == 0 && rmc == 0) {
3345         rmode = float_round_ties_away;
3346     } else if (r == 0 && rmc == 0x3) {
3347         rmode = fpscr_rn;
3348     } else if (r == 1) {
3349         switch (rmc) {
3350         case 0:
3351             rmode = float_round_nearest_even;
3352             break;
3353         case 1:
3354             rmode = float_round_to_zero;
3355             break;
3356         case 2:
3357             rmode = float_round_up;
3358             break;
3359         case 3:
3360             rmode = float_round_down;
3361             break;
3362         default:
3363             abort();
3364         }
3365     }
3366 
3367     tstat = env->fp_status;
3368     set_float_exception_flags(0, &tstat);
3369     set_float_rounding_mode(rmode, &tstat);
3370     round_res = float128_to_floatx80(xb.f128, &tstat);
3371     xt.f128 = floatx80_to_float128(round_res, &tstat);
3372     env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3373 
3374     if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
3375         if (float128_is_signaling_nan(xb.f128, &tstat)) {
3376             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);
3377             xt.f128 = float128_snan_to_qnan(xt.f128);
3378         }
3379     }
3380 
3381     helper_compute_fprf_float128(env, xt.f128);
3382     putVSR(rD(opcode) + 32, &xt, env);
3383     float_check_status(env);
3384 }
3385 
3386 void helper_xssqrtqp(CPUPPCState *env, uint32_t opcode)
3387 {
3388     ppc_vsr_t xb;
3389     ppc_vsr_t xt;
3390     float_status tstat;
3391 
3392     getVSR(rB(opcode) + 32, &xb, env);
3393     memset(&xt, 0, sizeof(xt));
3394     helper_reset_fpstatus(env);
3395 
3396     tstat = env->fp_status;
3397     if (unlikely(Rc(opcode) != 0)) {
3398         tstat.float_rounding_mode = float_round_to_odd;
3399     }
3400 
3401     set_float_exception_flags(0, &tstat);
3402     xt.f128 = float128_sqrt(xb.f128, &tstat);
3403     env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3404 
3405     if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
3406         if (float128_is_signaling_nan(xb.f128, &tstat)) {
3407             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
3408             xt.f128 = float128_snan_to_qnan(xb.f128);
3409         } else if  (float128_is_quiet_nan(xb.f128, &tstat)) {
3410             xt.f128 = xb.f128;
3411         } else if (float128_is_neg(xb.f128) && !float128_is_zero(xb.f128)) {
3412             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, 1);
3413             xt.f128 = float128_default_nan(&env->fp_status);
3414         }
3415     }
3416 
3417     helper_compute_fprf_float128(env, xt.f128);
3418     putVSR(rD(opcode) + 32, &xt, env);
3419     float_check_status(env);
3420 }
3421 
3422 void helper_xssubqp(CPUPPCState *env, uint32_t opcode)
3423 {
3424     ppc_vsr_t xt, xa, xb;
3425     float_status tstat;
3426 
3427     getVSR(rA(opcode) + 32, &xa, env);
3428     getVSR(rB(opcode) + 32, &xb, env);
3429     getVSR(rD(opcode) + 32, &xt, env);
3430     helper_reset_fpstatus(env);
3431 
3432     tstat = env->fp_status;
3433     if (unlikely(Rc(opcode) != 0)) {
3434         tstat.float_rounding_mode = float_round_to_odd;
3435     }
3436 
3437     set_float_exception_flags(0, &tstat);
3438     xt.f128 = float128_sub(xa.f128, xb.f128, &tstat);
3439     env->fp_status.float_exception_flags |= tstat.float_exception_flags;
3440 
3441     if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
3442         if (float128_is_infinity(xa.f128) && float128_is_infinity(xb.f128)) {
3443             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
3444         } else if (float128_is_signaling_nan(xa.f128, &tstat) ||
3445                    float128_is_signaling_nan(xb.f128, &tstat)) {
3446             float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
3447         }
3448     }
3449 
3450     helper_compute_fprf_float128(env, xt.f128);
3451     putVSR(rD(opcode) + 32, &xt, env);
3452     float_check_status(env);
3453 }
3454