1*5cd27208SKumar Gala /* This has so very few changes over libgcc2's __udivmoddi4 it isn't funny. */ 2*5cd27208SKumar Gala 3*5cd27208SKumar Gala #include "soft-fp.h" 4*5cd27208SKumar Gala 5*5cd27208SKumar Gala #undef count_leading_zeros 6*5cd27208SKumar Gala #define count_leading_zeros __FP_CLZ 7*5cd27208SKumar Gala 8*5cd27208SKumar Gala void 9*5cd27208SKumar Gala _fp_udivmodti4(_FP_W_TYPE q[2], _FP_W_TYPE r[2], 10*5cd27208SKumar Gala _FP_W_TYPE n1, _FP_W_TYPE n0, 11*5cd27208SKumar Gala _FP_W_TYPE d1, _FP_W_TYPE d0) 12*5cd27208SKumar Gala { 13*5cd27208SKumar Gala _FP_W_TYPE q0, q1, r0, r1; 14*5cd27208SKumar Gala _FP_I_TYPE b, bm; 15*5cd27208SKumar Gala 16*5cd27208SKumar Gala if (d1 == 0) 17*5cd27208SKumar Gala { 18*5cd27208SKumar Gala #if !UDIV_NEEDS_NORMALIZATION 19*5cd27208SKumar Gala if (d0 > n1) 20*5cd27208SKumar Gala { 21*5cd27208SKumar Gala /* 0q = nn / 0D */ 22*5cd27208SKumar Gala 23*5cd27208SKumar Gala udiv_qrnnd (q0, n0, n1, n0, d0); 24*5cd27208SKumar Gala q1 = 0; 25*5cd27208SKumar Gala 26*5cd27208SKumar Gala /* Remainder in n0. */ 27*5cd27208SKumar Gala } 28*5cd27208SKumar Gala else 29*5cd27208SKumar Gala { 30*5cd27208SKumar Gala /* qq = NN / 0d */ 31*5cd27208SKumar Gala 32*5cd27208SKumar Gala if (d0 == 0) 33*5cd27208SKumar Gala d0 = 1 / d0; /* Divide intentionally by zero. */ 34*5cd27208SKumar Gala 35*5cd27208SKumar Gala udiv_qrnnd (q1, n1, 0, n1, d0); 36*5cd27208SKumar Gala udiv_qrnnd (q0, n0, n1, n0, d0); 37*5cd27208SKumar Gala 38*5cd27208SKumar Gala /* Remainder in n0. */ 39*5cd27208SKumar Gala } 40*5cd27208SKumar Gala 41*5cd27208SKumar Gala r0 = n0; 42*5cd27208SKumar Gala r1 = 0; 43*5cd27208SKumar Gala 44*5cd27208SKumar Gala #else /* UDIV_NEEDS_NORMALIZATION */ 45*5cd27208SKumar Gala 46*5cd27208SKumar Gala if (d0 > n1) 47*5cd27208SKumar Gala { 48*5cd27208SKumar Gala /* 0q = nn / 0D */ 49*5cd27208SKumar Gala 50*5cd27208SKumar Gala count_leading_zeros (bm, d0); 51*5cd27208SKumar Gala 52*5cd27208SKumar Gala if (bm != 0) 53*5cd27208SKumar Gala { 54*5cd27208SKumar Gala /* Normalize, i.e. make the most significant bit of the 55*5cd27208SKumar Gala denominator set. */ 56*5cd27208SKumar Gala 57*5cd27208SKumar Gala d0 = d0 << bm; 58*5cd27208SKumar Gala n1 = (n1 << bm) | (n0 >> (_FP_W_TYPE_SIZE - bm)); 59*5cd27208SKumar Gala n0 = n0 << bm; 60*5cd27208SKumar Gala } 61*5cd27208SKumar Gala 62*5cd27208SKumar Gala udiv_qrnnd (q0, n0, n1, n0, d0); 63*5cd27208SKumar Gala q1 = 0; 64*5cd27208SKumar Gala 65*5cd27208SKumar Gala /* Remainder in n0 >> bm. */ 66*5cd27208SKumar Gala } 67*5cd27208SKumar Gala else 68*5cd27208SKumar Gala { 69*5cd27208SKumar Gala /* qq = NN / 0d */ 70*5cd27208SKumar Gala 71*5cd27208SKumar Gala if (d0 == 0) 72*5cd27208SKumar Gala d0 = 1 / d0; /* Divide intentionally by zero. */ 73*5cd27208SKumar Gala 74*5cd27208SKumar Gala count_leading_zeros (bm, d0); 75*5cd27208SKumar Gala 76*5cd27208SKumar Gala if (bm == 0) 77*5cd27208SKumar Gala { 78*5cd27208SKumar Gala /* From (n1 >= d0) /\ (the most significant bit of d0 is set), 79*5cd27208SKumar Gala conclude (the most significant bit of n1 is set) /\ (the 80*5cd27208SKumar Gala leading quotient digit q1 = 1). 81*5cd27208SKumar Gala 82*5cd27208SKumar Gala This special case is necessary, not an optimization. 83*5cd27208SKumar Gala (Shifts counts of SI_TYPE_SIZE are undefined.) */ 84*5cd27208SKumar Gala 85*5cd27208SKumar Gala n1 -= d0; 86*5cd27208SKumar Gala q1 = 1; 87*5cd27208SKumar Gala } 88*5cd27208SKumar Gala else 89*5cd27208SKumar Gala { 90*5cd27208SKumar Gala _FP_W_TYPE n2; 91*5cd27208SKumar Gala 92*5cd27208SKumar Gala /* Normalize. */ 93*5cd27208SKumar Gala 94*5cd27208SKumar Gala b = _FP_W_TYPE_SIZE - bm; 95*5cd27208SKumar Gala 96*5cd27208SKumar Gala d0 = d0 << bm; 97*5cd27208SKumar Gala n2 = n1 >> b; 98*5cd27208SKumar Gala n1 = (n1 << bm) | (n0 >> b); 99*5cd27208SKumar Gala n0 = n0 << bm; 100*5cd27208SKumar Gala 101*5cd27208SKumar Gala udiv_qrnnd (q1, n1, n2, n1, d0); 102*5cd27208SKumar Gala } 103*5cd27208SKumar Gala 104*5cd27208SKumar Gala /* n1 != d0... */ 105*5cd27208SKumar Gala 106*5cd27208SKumar Gala udiv_qrnnd (q0, n0, n1, n0, d0); 107*5cd27208SKumar Gala 108*5cd27208SKumar Gala /* Remainder in n0 >> bm. */ 109*5cd27208SKumar Gala } 110*5cd27208SKumar Gala 111*5cd27208SKumar Gala r0 = n0 >> bm; 112*5cd27208SKumar Gala r1 = 0; 113*5cd27208SKumar Gala #endif /* UDIV_NEEDS_NORMALIZATION */ 114*5cd27208SKumar Gala } 115*5cd27208SKumar Gala else 116*5cd27208SKumar Gala { 117*5cd27208SKumar Gala if (d1 > n1) 118*5cd27208SKumar Gala { 119*5cd27208SKumar Gala /* 00 = nn / DD */ 120*5cd27208SKumar Gala 121*5cd27208SKumar Gala q0 = 0; 122*5cd27208SKumar Gala q1 = 0; 123*5cd27208SKumar Gala 124*5cd27208SKumar Gala /* Remainder in n1n0. */ 125*5cd27208SKumar Gala r0 = n0; 126*5cd27208SKumar Gala r1 = n1; 127*5cd27208SKumar Gala } 128*5cd27208SKumar Gala else 129*5cd27208SKumar Gala { 130*5cd27208SKumar Gala /* 0q = NN / dd */ 131*5cd27208SKumar Gala 132*5cd27208SKumar Gala count_leading_zeros (bm, d1); 133*5cd27208SKumar Gala if (bm == 0) 134*5cd27208SKumar Gala { 135*5cd27208SKumar Gala /* From (n1 >= d1) /\ (the most significant bit of d1 is set), 136*5cd27208SKumar Gala conclude (the most significant bit of n1 is set) /\ (the 137*5cd27208SKumar Gala quotient digit q0 = 0 or 1). 138*5cd27208SKumar Gala 139*5cd27208SKumar Gala This special case is necessary, not an optimization. */ 140*5cd27208SKumar Gala 141*5cd27208SKumar Gala /* The condition on the next line takes advantage of that 142*5cd27208SKumar Gala n1 >= d1 (true due to program flow). */ 143*5cd27208SKumar Gala if (n1 > d1 || n0 >= d0) 144*5cd27208SKumar Gala { 145*5cd27208SKumar Gala q0 = 1; 146*5cd27208SKumar Gala sub_ddmmss (n1, n0, n1, n0, d1, d0); 147*5cd27208SKumar Gala } 148*5cd27208SKumar Gala else 149*5cd27208SKumar Gala q0 = 0; 150*5cd27208SKumar Gala 151*5cd27208SKumar Gala q1 = 0; 152*5cd27208SKumar Gala 153*5cd27208SKumar Gala r0 = n0; 154*5cd27208SKumar Gala r1 = n1; 155*5cd27208SKumar Gala } 156*5cd27208SKumar Gala else 157*5cd27208SKumar Gala { 158*5cd27208SKumar Gala _FP_W_TYPE m1, m0, n2; 159*5cd27208SKumar Gala 160*5cd27208SKumar Gala /* Normalize. */ 161*5cd27208SKumar Gala 162*5cd27208SKumar Gala b = _FP_W_TYPE_SIZE - bm; 163*5cd27208SKumar Gala 164*5cd27208SKumar Gala d1 = (d1 << bm) | (d0 >> b); 165*5cd27208SKumar Gala d0 = d0 << bm; 166*5cd27208SKumar Gala n2 = n1 >> b; 167*5cd27208SKumar Gala n1 = (n1 << bm) | (n0 >> b); 168*5cd27208SKumar Gala n0 = n0 << bm; 169*5cd27208SKumar Gala 170*5cd27208SKumar Gala udiv_qrnnd (q0, n1, n2, n1, d1); 171*5cd27208SKumar Gala umul_ppmm (m1, m0, q0, d0); 172*5cd27208SKumar Gala 173*5cd27208SKumar Gala if (m1 > n1 || (m1 == n1 && m0 > n0)) 174*5cd27208SKumar Gala { 175*5cd27208SKumar Gala q0--; 176*5cd27208SKumar Gala sub_ddmmss (m1, m0, m1, m0, d1, d0); 177*5cd27208SKumar Gala } 178*5cd27208SKumar Gala 179*5cd27208SKumar Gala q1 = 0; 180*5cd27208SKumar Gala 181*5cd27208SKumar Gala /* Remainder in (n1n0 - m1m0) >> bm. */ 182*5cd27208SKumar Gala sub_ddmmss (n1, n0, n1, n0, m1, m0); 183*5cd27208SKumar Gala r0 = (n1 << b) | (n0 >> bm); 184*5cd27208SKumar Gala r1 = n1 >> bm; 185*5cd27208SKumar Gala } 186*5cd27208SKumar Gala } 187*5cd27208SKumar Gala } 188*5cd27208SKumar Gala 189*5cd27208SKumar Gala q[0] = q0; q[1] = q1; 190*5cd27208SKumar Gala r[0] = r0, r[1] = r1; 191*5cd27208SKumar Gala } 192