Lines Matching +full:src +full:-
1 // SPDX-License-Identifier: GPL-2.0-or-later
4 fp_arith.c: floating-point math routines for the Linux-m68k
7 Copyright (c) 1998-1999 David Huggins-Daines.
10 Mosberger-Tang.
32 fp_fabs(struct fp_ext *dest, struct fp_ext *src) in fp_fabs() argument
36 fp_monadic_check(dest, src); in fp_fabs()
38 dest->sign = 0; in fp_fabs()
44 fp_fneg(struct fp_ext *dest, struct fp_ext *src) in fp_fneg() argument
48 fp_monadic_check(dest, src); in fp_fneg()
50 dest->sign = !dest->sign; in fp_fneg()
61 fp_fadd(struct fp_ext *dest, struct fp_ext *src) in fp_fadd() argument
67 fp_dyadic_check(dest, src); in fp_fadd()
70 /* infinity - infinity == NaN */ in fp_fadd()
71 if (IS_INF(src) && (src->sign != dest->sign)) in fp_fadd()
75 if (IS_INF(src)) { in fp_fadd()
76 fp_copy_ext(dest, src); in fp_fadd()
81 if (IS_ZERO(src)) { in fp_fadd()
82 if (src->sign != dest->sign) { in fp_fadd()
83 if (FPDATA->rnd == FPCR_ROUND_RM) in fp_fadd()
84 dest->sign = 1; in fp_fadd()
86 dest->sign = 0; in fp_fadd()
89 fp_copy_ext(dest, src); in fp_fadd()
93 dest->lowmant = src->lowmant = 0; in fp_fadd()
95 if ((diff = dest->exp - src->exp) > 0) in fp_fadd()
96 fp_denormalize(src, diff); in fp_fadd()
97 else if ((diff = -diff) > 0) in fp_fadd()
100 if (dest->sign == src->sign) { in fp_fadd()
101 if (fp_addmant(dest, src)) in fp_fadd()
105 if (dest->mant.m64 < src->mant.m64) { in fp_fadd()
106 fp_submant(dest, src, dest); in fp_fadd()
107 dest->sign = !dest->sign; in fp_fadd()
109 fp_submant(dest, dest, src); in fp_fadd()
118 Remember that the arguments are in assembler-syntax order! */
121 fp_fsub(struct fp_ext *dest, struct fp_ext *src) in fp_fsub() argument
125 src->sign = !src->sign; in fp_fsub()
126 return fp_fadd(dest, src); in fp_fsub()
131 fp_fcmp(struct fp_ext *dest, struct fp_ext *src) in fp_fcmp() argument
135 FPDATA->temp[1] = *dest; in fp_fcmp()
136 src->sign = !src->sign; in fp_fcmp()
137 return fp_fadd(&FPDATA->temp[1], src); in fp_fcmp()
141 fp_ftst(struct fp_ext *dest, struct fp_ext *src) in fp_ftst() argument
147 return src; in fp_ftst()
151 fp_fmul(struct fp_ext *dest, struct fp_ext *src) in fp_fmul() argument
158 fp_dyadic_check(dest, src); in fp_fmul()
161 dest->sign = src->sign ^ dest->sign; in fp_fmul()
165 if (IS_ZERO(src)) in fp_fmul()
169 if (IS_INF(src)) { in fp_fmul()
173 fp_copy_ext(dest, src); in fp_fmul()
180 if (IS_ZERO(dest) || IS_ZERO(src)) { in fp_fmul()
181 dest->exp = 0; in fp_fmul()
182 dest->mant.m64 = 0; in fp_fmul()
183 dest->lowmant = 0; in fp_fmul()
188 exp = dest->exp + src->exp - 0x3ffe; in fp_fmul()
193 if ((long)dest->mant.m32[0] >= 0) in fp_fmul()
194 exp -= fp_overnormalize(dest); in fp_fmul()
195 if ((long)src->mant.m32[0] >= 0) in fp_fmul()
196 exp -= fp_overnormalize(src); in fp_fmul()
198 /* now, do a 64-bit multiply with expansion */ in fp_fmul()
199 fp_multiplymant(&temp, dest, src); in fp_fmul()
204 exp--; in fp_fmul()
213 dest->exp = exp; in fp_fmul()
216 fp_denormalize(dest, -exp); in fp_fmul()
225 Note that the order of the operands is counter-intuitive: instead
226 of src / dest, the result is actually dest / src. */
229 fp_fdiv(struct fp_ext *dest, struct fp_ext *src) in fp_fdiv() argument
236 fp_dyadic_check(dest, src); in fp_fdiv()
239 dest->sign = src->sign ^ dest->sign; in fp_fdiv()
244 if (IS_INF(src)) in fp_fdiv()
249 if (IS_INF(src)) { in fp_fdiv()
251 dest->exp = 0; in fp_fdiv()
252 dest->mant.m64 = 0; in fp_fdiv()
253 dest->lowmant = 0; in fp_fdiv()
261 if (IS_ZERO(src)) in fp_fdiv()
266 if (IS_ZERO(src)) { in fp_fdiv()
269 dest->exp = 0x7fff; in fp_fdiv()
270 dest->mant.m64 = 0; in fp_fdiv()
275 exp = dest->exp - src->exp + 0x3fff; in fp_fdiv()
280 if ((long)dest->mant.m32[0] >= 0) in fp_fdiv()
281 exp -= fp_overnormalize(dest); in fp_fdiv()
282 if ((long)src->mant.m32[0] >= 0) in fp_fdiv()
283 exp -= fp_overnormalize(src); in fp_fdiv()
285 /* now, do the 64-bit divide */ in fp_fdiv()
286 fp_dividemant(&temp, dest, src); in fp_fdiv()
291 exp--; in fp_fdiv()
300 dest->exp = exp; in fp_fdiv()
303 fp_denormalize(dest, -exp); in fp_fdiv()
310 fp_fsglmul(struct fp_ext *dest, struct fp_ext *src) in fp_fsglmul() argument
316 fp_dyadic_check(dest, src); in fp_fsglmul()
319 dest->sign = src->sign ^ dest->sign; in fp_fsglmul()
323 if (IS_ZERO(src)) in fp_fsglmul()
327 if (IS_INF(src)) { in fp_fsglmul()
331 fp_copy_ext(dest, src); in fp_fsglmul()
338 if (IS_ZERO(dest) || IS_ZERO(src)) { in fp_fsglmul()
339 dest->exp = 0; in fp_fsglmul()
340 dest->mant.m64 = 0; in fp_fsglmul()
341 dest->lowmant = 0; in fp_fsglmul()
346 exp = dest->exp + src->exp - 0x3ffe; in fp_fsglmul()
348 /* do a 32-bit multiply */ in fp_fsglmul()
349 fp_mul64(dest->mant.m32[0], dest->mant.m32[1], in fp_fsglmul()
350 dest->mant.m32[0] & 0xffffff00, in fp_fsglmul()
351 src->mant.m32[0] & 0xffffff00); in fp_fsglmul()
357 dest->exp = exp; in fp_fsglmul()
360 fp_denormalize(dest, -exp); in fp_fsglmul()
367 fp_fsgldiv(struct fp_ext *dest, struct fp_ext *src) in fp_fsgldiv() argument
374 fp_dyadic_check(dest, src); in fp_fsgldiv()
377 dest->sign = src->sign ^ dest->sign; in fp_fsgldiv()
382 if (IS_INF(src)) in fp_fsgldiv()
387 if (IS_INF(src)) { in fp_fsgldiv()
389 dest->exp = 0; in fp_fsgldiv()
390 dest->mant.m64 = 0; in fp_fsgldiv()
391 dest->lowmant = 0; in fp_fsgldiv()
399 if (IS_ZERO(src)) in fp_fsgldiv()
404 if (IS_ZERO(src)) { in fp_fsgldiv()
407 dest->exp = 0x7fff; in fp_fsgldiv()
408 dest->mant.m64 = 0; in fp_fsgldiv()
413 exp = dest->exp - src->exp + 0x3fff; in fp_fsgldiv()
415 dest->mant.m32[0] &= 0xffffff00; in fp_fsgldiv()
416 src->mant.m32[0] &= 0xffffff00; in fp_fsgldiv()
418 /* do the 32-bit divide */ in fp_fsgldiv()
419 if (dest->mant.m32[0] >= src->mant.m32[0]) { in fp_fsgldiv()
420 fp_sub64(dest->mant, src->mant); in fp_fsgldiv()
421 fp_div64(quot, rem, dest->mant.m32[0], 0, src->mant.m32[0]); in fp_fsgldiv()
422 dest->mant.m32[0] = 0x80000000 | (quot >> 1); in fp_fsgldiv()
423 dest->mant.m32[1] = (quot & 1) | rem; /* only for rounding */ in fp_fsgldiv()
425 fp_div64(quot, rem, dest->mant.m32[0], 0, src->mant.m32[0]); in fp_fsgldiv()
426 dest->mant.m32[0] = quot; in fp_fsgldiv()
427 dest->mant.m32[1] = rem; /* only for rounding */ in fp_fsgldiv()
428 exp--; in fp_fsgldiv()
435 dest->exp = exp; in fp_fsgldiv()
438 fp_denormalize(dest, -exp); in fp_fsgldiv()
463 oldmant = dest->mant; in fp_roundint()
464 switch (dest->exp) { in fp_roundint()
466 dest->mant.m64 = 0; in fp_roundint()
469 dest->mant.m32[0] &= 0xffffffffU << (0x401e - dest->exp); in fp_roundint()
470 dest->mant.m32[1] = 0; in fp_roundint()
471 if (oldmant.m64 == dest->mant.m64) in fp_roundint()
475 dest->mant.m32[1] &= 0xffffffffU << (0x403e - dest->exp); in fp_roundint()
476 if (oldmant.m32[1] == dest->mant.m32[1]) in fp_roundint()
491 0xffff8000, and the same holds for 128-bit / 64-bit. (i.e. the in fp_roundint()
498 switch (dest->exp) { in fp_roundint()
510 mask = 1 << (0x401d - dest->exp); in fp_roundint()
515 if (!(oldmant.m32[0] << (dest->exp - 0x3ffd)) && in fp_roundint()
528 mask = 1 << (0x403d - dest->exp); in fp_roundint()
533 if (!(oldmant.m32[1] << (dest->exp - 0x401d))) in fp_roundint()
543 if (dest->sign ^ (mode - FPCR_ROUND_RM)) in fp_roundint()
548 switch (dest->exp) { in fp_roundint()
550 dest->exp = 0x3fff; in fp_roundint()
551 dest->mant.m64 = 1ULL << 63; in fp_roundint()
554 mask = 1 << (0x401e - dest->exp); in fp_roundint()
555 if (dest->mant.m32[0] += mask) in fp_roundint()
557 dest->mant.m32[0] = 0x80000000; in fp_roundint()
558 dest->exp++; in fp_roundint()
561 mask = 1 << (0x403e - dest->exp); in fp_roundint()
562 if (dest->mant.m32[1] += mask) in fp_roundint()
564 if (dest->mant.m32[0] += 1) in fp_roundint()
566 dest->mant.m32[0] = 0x80000000; in fp_roundint()
567 dest->exp++; in fp_roundint()
577 modrem_kernel(struct fp_ext *dest, struct fp_ext *src, int mode) in modrem_kernel() argument
581 fp_dyadic_check(dest, src); in modrem_kernel()
584 if (IS_INF(dest) || IS_ZERO(src)) { in modrem_kernel()
588 if (IS_ZERO(dest) || IS_INF(src)) in modrem_kernel()
593 fp_fdiv(&tmp, src); /* NOTE: src might be modified */ in modrem_kernel()
595 fp_fmul(&tmp, src); in modrem_kernel()
599 fp_set_quotient((dest->mant.m64 & 0x7f) | (dest->sign << 7)); in modrem_kernel()
608 fmod(src,dest) = (dest - (src * floor(dest / src))) */
611 fp_fmod(struct fp_ext *dest, struct fp_ext *src) in fp_fmod() argument
614 return modrem_kernel(dest, src, FPCR_ROUND_RZ); in fp_fmod()
619 frem(src,dest) = (dest - (src * round(dest / src)))
623 fp_frem(struct fp_ext *dest, struct fp_ext *src) in fp_frem() argument
626 return modrem_kernel(dest, src, FPCR_ROUND_RN); in fp_frem()
630 fp_fint(struct fp_ext *dest, struct fp_ext *src) in fp_fint() argument
634 fp_copy_ext(dest, src); in fp_fint()
636 fp_roundint(dest, FPDATA->rnd); in fp_fint()
642 fp_fintrz(struct fp_ext *dest, struct fp_ext *src) in fp_fintrz() argument
646 fp_copy_ext(dest, src); in fp_fintrz()
654 fp_fscale(struct fp_ext *dest, struct fp_ext *src) in fp_fscale() argument
660 fp_dyadic_check(dest, src); in fp_fscale()
663 if (IS_INF(src)) { in fp_fscale()
671 if (IS_ZERO(src) || IS_ZERO(dest)) in fp_fscale()
675 if (src->exp >= 0x400c) { in fp_fscale()
680 /* src must be rounded with round to zero. */ in fp_fscale()
681 oldround = FPDATA->rnd; in fp_fscale()
682 FPDATA->rnd = FPCR_ROUND_RZ; in fp_fscale()
683 scale = fp_conv_ext2long(src); in fp_fscale()
684 FPDATA->rnd = oldround; in fp_fscale()
687 scale += dest->exp; in fp_fscale()
693 fp_denormalize(dest, -scale); in fp_fscale()
695 dest->exp = scale; in fp_fscale()