xref: /openbmc/linux/lib/math/div64.c (revision ecfb9f40)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2003 Bernardo Innocenti <bernie@develer.com>
4  *
5  * Based on former do_div() implementation from asm-parisc/div64.h:
6  *	Copyright (C) 1999 Hewlett-Packard Co
7  *	Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
8  *
9  *
10  * Generic C version of 64bit/32bit division and modulo, with
11  * 64bit result and 32bit remainder.
12  *
13  * The fast case for (n>>32 == 0) is handled inline by do_div().
14  *
15  * Code generated for this function might be very inefficient
16  * for some CPUs. __div64_32() can be overridden by linking arch-specific
17  * assembly versions such as arch/ppc/lib/div64.S and arch/sh/lib/div64.S
18  * or by defining a preprocessor macro in arch/include/asm/div64.h.
19  */
20 
21 #include <linux/bitops.h>
22 #include <linux/export.h>
23 #include <linux/math.h>
24 #include <linux/math64.h>
25 #include <linux/log2.h>
26 
27 /* Not needed on 64bit architectures */
28 #if BITS_PER_LONG == 32
29 
30 #ifndef __div64_32
31 uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
32 {
33 	uint64_t rem = *n;
34 	uint64_t b = base;
35 	uint64_t res, d = 1;
36 	uint32_t high = rem >> 32;
37 
38 	/* Reduce the thing a bit first */
39 	res = 0;
40 	if (high >= base) {
41 		high /= base;
42 		res = (uint64_t) high << 32;
43 		rem -= (uint64_t) (high*base) << 32;
44 	}
45 
46 	while ((int64_t)b > 0 && b < rem) {
47 		b = b+b;
48 		d = d+d;
49 	}
50 
51 	do {
52 		if (rem >= b) {
53 			rem -= b;
54 			res += d;
55 		}
56 		b >>= 1;
57 		d >>= 1;
58 	} while (d);
59 
60 	*n = res;
61 	return rem;
62 }
63 EXPORT_SYMBOL(__div64_32);
64 #endif
65 
66 #ifndef div_s64_rem
67 s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
68 {
69 	u64 quotient;
70 
71 	if (dividend < 0) {
72 		quotient = div_u64_rem(-dividend, abs(divisor), (u32 *)remainder);
73 		*remainder = -*remainder;
74 		if (divisor > 0)
75 			quotient = -quotient;
76 	} else {
77 		quotient = div_u64_rem(dividend, abs(divisor), (u32 *)remainder);
78 		if (divisor < 0)
79 			quotient = -quotient;
80 	}
81 	return quotient;
82 }
83 EXPORT_SYMBOL(div_s64_rem);
84 #endif
85 
86 /*
87  * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
88  * @dividend:	64bit dividend
89  * @divisor:	64bit divisor
90  * @remainder:  64bit remainder
91  *
92  * This implementation is a comparable to algorithm used by div64_u64.
93  * But this operation, which includes math for calculating the remainder,
94  * is kept distinct to avoid slowing down the div64_u64 operation on 32bit
95  * systems.
96  */
97 #ifndef div64_u64_rem
98 u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
99 {
100 	u32 high = divisor >> 32;
101 	u64 quot;
102 
103 	if (high == 0) {
104 		u32 rem32;
105 		quot = div_u64_rem(dividend, divisor, &rem32);
106 		*remainder = rem32;
107 	} else {
108 		int n = fls(high);
109 		quot = div_u64(dividend >> n, divisor >> n);
110 
111 		if (quot != 0)
112 			quot--;
113 
114 		*remainder = dividend - quot * divisor;
115 		if (*remainder >= divisor) {
116 			quot++;
117 			*remainder -= divisor;
118 		}
119 	}
120 
121 	return quot;
122 }
123 EXPORT_SYMBOL(div64_u64_rem);
124 #endif
125 
126 /*
127  * div64_u64 - unsigned 64bit divide with 64bit divisor
128  * @dividend:	64bit dividend
129  * @divisor:	64bit divisor
130  *
131  * This implementation is a modified version of the algorithm proposed
132  * by the book 'Hacker's Delight'.  The original source and full proof
133  * can be found here and is available for use without restriction.
134  *
135  * 'http://www.hackersdelight.org/hdcodetxt/divDouble.c.txt'
136  */
137 #ifndef div64_u64
138 u64 div64_u64(u64 dividend, u64 divisor)
139 {
140 	u32 high = divisor >> 32;
141 	u64 quot;
142 
143 	if (high == 0) {
144 		quot = div_u64(dividend, divisor);
145 	} else {
146 		int n = fls(high);
147 		quot = div_u64(dividend >> n, divisor >> n);
148 
149 		if (quot != 0)
150 			quot--;
151 		if ((dividend - quot * divisor) >= divisor)
152 			quot++;
153 	}
154 
155 	return quot;
156 }
157 EXPORT_SYMBOL(div64_u64);
158 #endif
159 
160 #ifndef div64_s64
161 s64 div64_s64(s64 dividend, s64 divisor)
162 {
163 	s64 quot, t;
164 
165 	quot = div64_u64(abs(dividend), abs(divisor));
166 	t = (dividend ^ divisor) >> 63;
167 
168 	return (quot ^ t) - t;
169 }
170 EXPORT_SYMBOL(div64_s64);
171 #endif
172 
173 #endif /* BITS_PER_LONG == 32 */
174 
175 /*
176  * Iterative div/mod for use when dividend is not expected to be much
177  * bigger than divisor.
178  */
179 u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
180 {
181 	return __iter_div_u64_rem(dividend, divisor, remainder);
182 }
183 EXPORT_SYMBOL(iter_div_u64_rem);
184 
185 #ifndef mul_u64_u64_div_u64
186 u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 c)
187 {
188 	u64 res = 0, div, rem;
189 	int shift;
190 
191 	/* can a * b overflow ? */
192 	if (ilog2(a) + ilog2(b) > 62) {
193 		/*
194 		 * (b * a) / c is equal to
195 		 *
196 		 *      (b / c) * a +
197 		 *      (b % c) * a / c
198 		 *
199 		 * if nothing overflows. Can the 1st multiplication
200 		 * overflow? Yes, but we do not care: this can only
201 		 * happen if the end result can't fit in u64 anyway.
202 		 *
203 		 * So the code below does
204 		 *
205 		 *      res = (b / c) * a;
206 		 *      b = b % c;
207 		 */
208 		div = div64_u64_rem(b, c, &rem);
209 		res = div * a;
210 		b = rem;
211 
212 		shift = ilog2(a) + ilog2(b) - 62;
213 		if (shift > 0) {
214 			/* drop precision */
215 			b >>= shift;
216 			c >>= shift;
217 			if (!c)
218 				return res;
219 		}
220 	}
221 
222 	return res + div64_u64(a * b, c);
223 }
224 EXPORT_SYMBOL(mul_u64_u64_div_u64);
225 #endif
226