xref: /openbmc/linux/arch/powerpc/math-emu/udivmodti4.c (revision d2b194ed820880eb19c43b9c10d9f5f30026ee54)
15cd27208SKumar Gala /* This has so very few changes over libgcc2's __udivmoddi4 it isn't funny.  */
25cd27208SKumar Gala 
3*d2b194edSKumar Gala #include <math-emu/soft-fp.h>
45cd27208SKumar Gala 
55cd27208SKumar Gala #undef count_leading_zeros
65cd27208SKumar Gala #define count_leading_zeros  __FP_CLZ
75cd27208SKumar Gala 
85cd27208SKumar Gala void
95cd27208SKumar Gala _fp_udivmodti4(_FP_W_TYPE q[2], _FP_W_TYPE r[2],
105cd27208SKumar Gala 	       _FP_W_TYPE n1, _FP_W_TYPE n0,
115cd27208SKumar Gala 	       _FP_W_TYPE d1, _FP_W_TYPE d0)
125cd27208SKumar Gala {
135cd27208SKumar Gala   _FP_W_TYPE q0, q1, r0, r1;
145cd27208SKumar Gala   _FP_I_TYPE b, bm;
155cd27208SKumar Gala 
165cd27208SKumar Gala   if (d1 == 0)
175cd27208SKumar Gala     {
185cd27208SKumar Gala #if !UDIV_NEEDS_NORMALIZATION
195cd27208SKumar Gala       if (d0 > n1)
205cd27208SKumar Gala 	{
215cd27208SKumar Gala 	  /* 0q = nn / 0D */
225cd27208SKumar Gala 
235cd27208SKumar Gala 	  udiv_qrnnd (q0, n0, n1, n0, d0);
245cd27208SKumar Gala 	  q1 = 0;
255cd27208SKumar Gala 
265cd27208SKumar Gala 	  /* Remainder in n0.  */
275cd27208SKumar Gala 	}
285cd27208SKumar Gala       else
295cd27208SKumar Gala 	{
305cd27208SKumar Gala 	  /* qq = NN / 0d */
315cd27208SKumar Gala 
325cd27208SKumar Gala 	  if (d0 == 0)
335cd27208SKumar Gala 	    d0 = 1 / d0;	/* Divide intentionally by zero.  */
345cd27208SKumar Gala 
355cd27208SKumar Gala 	  udiv_qrnnd (q1, n1, 0, n1, d0);
365cd27208SKumar Gala 	  udiv_qrnnd (q0, n0, n1, n0, d0);
375cd27208SKumar Gala 
385cd27208SKumar Gala 	  /* Remainder in n0.  */
395cd27208SKumar Gala 	}
405cd27208SKumar Gala 
415cd27208SKumar Gala       r0 = n0;
425cd27208SKumar Gala       r1 = 0;
435cd27208SKumar Gala 
445cd27208SKumar Gala #else /* UDIV_NEEDS_NORMALIZATION */
455cd27208SKumar Gala 
465cd27208SKumar Gala       if (d0 > n1)
475cd27208SKumar Gala 	{
485cd27208SKumar Gala 	  /* 0q = nn / 0D */
495cd27208SKumar Gala 
505cd27208SKumar Gala 	  count_leading_zeros (bm, d0);
515cd27208SKumar Gala 
525cd27208SKumar Gala 	  if (bm != 0)
535cd27208SKumar Gala 	    {
545cd27208SKumar Gala 	      /* Normalize, i.e. make the most significant bit of the
555cd27208SKumar Gala 		 denominator set.  */
565cd27208SKumar Gala 
575cd27208SKumar Gala 	      d0 = d0 << bm;
585cd27208SKumar Gala 	      n1 = (n1 << bm) | (n0 >> (_FP_W_TYPE_SIZE - bm));
595cd27208SKumar Gala 	      n0 = n0 << bm;
605cd27208SKumar Gala 	    }
615cd27208SKumar Gala 
625cd27208SKumar Gala 	  udiv_qrnnd (q0, n0, n1, n0, d0);
635cd27208SKumar Gala 	  q1 = 0;
645cd27208SKumar Gala 
655cd27208SKumar Gala 	  /* Remainder in n0 >> bm.  */
665cd27208SKumar Gala 	}
675cd27208SKumar Gala       else
685cd27208SKumar Gala 	{
695cd27208SKumar Gala 	  /* qq = NN / 0d */
705cd27208SKumar Gala 
715cd27208SKumar Gala 	  if (d0 == 0)
725cd27208SKumar Gala 	    d0 = 1 / d0;	/* Divide intentionally by zero.  */
735cd27208SKumar Gala 
745cd27208SKumar Gala 	  count_leading_zeros (bm, d0);
755cd27208SKumar Gala 
765cd27208SKumar Gala 	  if (bm == 0)
775cd27208SKumar Gala 	    {
785cd27208SKumar Gala 	      /* From (n1 >= d0) /\ (the most significant bit of d0 is set),
795cd27208SKumar Gala 		 conclude (the most significant bit of n1 is set) /\ (the
805cd27208SKumar Gala 		 leading quotient digit q1 = 1).
815cd27208SKumar Gala 
825cd27208SKumar Gala 		 This special case is necessary, not an optimization.
835cd27208SKumar Gala 		 (Shifts counts of SI_TYPE_SIZE are undefined.)  */
845cd27208SKumar Gala 
855cd27208SKumar Gala 	      n1 -= d0;
865cd27208SKumar Gala 	      q1 = 1;
875cd27208SKumar Gala 	    }
885cd27208SKumar Gala 	  else
895cd27208SKumar Gala 	    {
905cd27208SKumar Gala 	      _FP_W_TYPE n2;
915cd27208SKumar Gala 
925cd27208SKumar Gala 	      /* Normalize.  */
935cd27208SKumar Gala 
945cd27208SKumar Gala 	      b = _FP_W_TYPE_SIZE - bm;
955cd27208SKumar Gala 
965cd27208SKumar Gala 	      d0 = d0 << bm;
975cd27208SKumar Gala 	      n2 = n1 >> b;
985cd27208SKumar Gala 	      n1 = (n1 << bm) | (n0 >> b);
995cd27208SKumar Gala 	      n0 = n0 << bm;
1005cd27208SKumar Gala 
1015cd27208SKumar Gala 	      udiv_qrnnd (q1, n1, n2, n1, d0);
1025cd27208SKumar Gala 	    }
1035cd27208SKumar Gala 
1045cd27208SKumar Gala 	  /* n1 != d0...  */
1055cd27208SKumar Gala 
1065cd27208SKumar Gala 	  udiv_qrnnd (q0, n0, n1, n0, d0);
1075cd27208SKumar Gala 
1085cd27208SKumar Gala 	  /* Remainder in n0 >> bm.  */
1095cd27208SKumar Gala 	}
1105cd27208SKumar Gala 
1115cd27208SKumar Gala       r0 = n0 >> bm;
1125cd27208SKumar Gala       r1 = 0;
1135cd27208SKumar Gala #endif /* UDIV_NEEDS_NORMALIZATION */
1145cd27208SKumar Gala     }
1155cd27208SKumar Gala   else
1165cd27208SKumar Gala     {
1175cd27208SKumar Gala       if (d1 > n1)
1185cd27208SKumar Gala 	{
1195cd27208SKumar Gala 	  /* 00 = nn / DD */
1205cd27208SKumar Gala 
1215cd27208SKumar Gala 	  q0 = 0;
1225cd27208SKumar Gala 	  q1 = 0;
1235cd27208SKumar Gala 
1245cd27208SKumar Gala 	  /* Remainder in n1n0.  */
1255cd27208SKumar Gala 	  r0 = n0;
1265cd27208SKumar Gala 	  r1 = n1;
1275cd27208SKumar Gala 	}
1285cd27208SKumar Gala       else
1295cd27208SKumar Gala 	{
1305cd27208SKumar Gala 	  /* 0q = NN / dd */
1315cd27208SKumar Gala 
1325cd27208SKumar Gala 	  count_leading_zeros (bm, d1);
1335cd27208SKumar Gala 	  if (bm == 0)
1345cd27208SKumar Gala 	    {
1355cd27208SKumar Gala 	      /* From (n1 >= d1) /\ (the most significant bit of d1 is set),
1365cd27208SKumar Gala 		 conclude (the most significant bit of n1 is set) /\ (the
1375cd27208SKumar Gala 		 quotient digit q0 = 0 or 1).
1385cd27208SKumar Gala 
1395cd27208SKumar Gala 		 This special case is necessary, not an optimization.  */
1405cd27208SKumar Gala 
1415cd27208SKumar Gala 	      /* The condition on the next line takes advantage of that
1425cd27208SKumar Gala 		 n1 >= d1 (true due to program flow).  */
1435cd27208SKumar Gala 	      if (n1 > d1 || n0 >= d0)
1445cd27208SKumar Gala 		{
1455cd27208SKumar Gala 		  q0 = 1;
1465cd27208SKumar Gala 		  sub_ddmmss (n1, n0, n1, n0, d1, d0);
1475cd27208SKumar Gala 		}
1485cd27208SKumar Gala 	      else
1495cd27208SKumar Gala 		q0 = 0;
1505cd27208SKumar Gala 
1515cd27208SKumar Gala 	      q1 = 0;
1525cd27208SKumar Gala 
1535cd27208SKumar Gala 	      r0 = n0;
1545cd27208SKumar Gala 	      r1 = n1;
1555cd27208SKumar Gala 	    }
1565cd27208SKumar Gala 	  else
1575cd27208SKumar Gala 	    {
1585cd27208SKumar Gala 	      _FP_W_TYPE m1, m0, n2;
1595cd27208SKumar Gala 
1605cd27208SKumar Gala 	      /* Normalize.  */
1615cd27208SKumar Gala 
1625cd27208SKumar Gala 	      b = _FP_W_TYPE_SIZE - bm;
1635cd27208SKumar Gala 
1645cd27208SKumar Gala 	      d1 = (d1 << bm) | (d0 >> b);
1655cd27208SKumar Gala 	      d0 = d0 << bm;
1665cd27208SKumar Gala 	      n2 = n1 >> b;
1675cd27208SKumar Gala 	      n1 = (n1 << bm) | (n0 >> b);
1685cd27208SKumar Gala 	      n0 = n0 << bm;
1695cd27208SKumar Gala 
1705cd27208SKumar Gala 	      udiv_qrnnd (q0, n1, n2, n1, d1);
1715cd27208SKumar Gala 	      umul_ppmm (m1, m0, q0, d0);
1725cd27208SKumar Gala 
1735cd27208SKumar Gala 	      if (m1 > n1 || (m1 == n1 && m0 > n0))
1745cd27208SKumar Gala 		{
1755cd27208SKumar Gala 		  q0--;
1765cd27208SKumar Gala 		  sub_ddmmss (m1, m0, m1, m0, d1, d0);
1775cd27208SKumar Gala 		}
1785cd27208SKumar Gala 
1795cd27208SKumar Gala 	      q1 = 0;
1805cd27208SKumar Gala 
1815cd27208SKumar Gala 	      /* Remainder in (n1n0 - m1m0) >> bm.  */
1825cd27208SKumar Gala 	      sub_ddmmss (n1, n0, n1, n0, m1, m0);
1835cd27208SKumar Gala 	      r0 = (n1 << b) | (n0 >> bm);
1845cd27208SKumar Gala 	      r1 = n1 >> bm;
1855cd27208SKumar Gala 	    }
1865cd27208SKumar Gala 	}
1875cd27208SKumar Gala     }
1885cd27208SKumar Gala 
1895cd27208SKumar Gala   q[0] = q0; q[1] = q1;
1905cd27208SKumar Gala   r[0] = r0, r[1] = r1;
1915cd27208SKumar Gala }
192