xref: /openbmc/linux/include/linux/math64.h (revision fc4a0db4)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
22418f4f2SRoman Zippel #ifndef _LINUX_MATH64_H
32418f4f2SRoman Zippel #define _LINUX_MATH64_H
42418f4f2SRoman Zippel 
52418f4f2SRoman Zippel #include <linux/types.h>
6605a140aSIlias Stamatis #include <linux/math.h>
7b874b835SVincenzo Frascino #include <vdso/math64.h>
82418f4f2SRoman Zippel #include <asm/div64.h>
92418f4f2SRoman Zippel 
102418f4f2SRoman Zippel #if BITS_PER_LONG == 64
112418f4f2SRoman Zippel 
12f910381aSSasha Levin #define div64_long(x, y) div64_s64((x), (y))
13c2853c8dSAlex Shi #define div64_ul(x, y)   div64_u64((x), (y))
14f910381aSSasha Levin 
152418f4f2SRoman Zippel /**
162418f4f2SRoman Zippel  * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder
17078843f7SRandy Dunlap  * @dividend: unsigned 64bit dividend
18078843f7SRandy Dunlap  * @divisor: unsigned 32bit divisor
19078843f7SRandy Dunlap  * @remainder: pointer to unsigned 32bit remainder
20078843f7SRandy Dunlap  *
21078843f7SRandy Dunlap  * Return: sets ``*remainder``, then returns dividend / divisor
222418f4f2SRoman Zippel  *
232418f4f2SRoman Zippel  * This is commonly provided by 32bit archs to provide an optimized 64bit
242418f4f2SRoman Zippel  * divide.
252418f4f2SRoman Zippel  */
div_u64_rem(u64 dividend,u32 divisor,u32 * remainder)262418f4f2SRoman Zippel static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
272418f4f2SRoman Zippel {
282418f4f2SRoman Zippel 	*remainder = dividend % divisor;
292418f4f2SRoman Zippel 	return dividend / divisor;
302418f4f2SRoman Zippel }
312418f4f2SRoman Zippel 
32d28a1de5SLiam Beguin /**
332418f4f2SRoman Zippel  * div_s64_rem - signed 64bit divide with 32bit divisor with remainder
34078843f7SRandy Dunlap  * @dividend: signed 64bit dividend
35078843f7SRandy Dunlap  * @divisor: signed 32bit divisor
36078843f7SRandy Dunlap  * @remainder: pointer to signed 32bit remainder
37078843f7SRandy Dunlap  *
38078843f7SRandy Dunlap  * Return: sets ``*remainder``, then returns dividend / divisor
392418f4f2SRoman Zippel  */
div_s64_rem(s64 dividend,s32 divisor,s32 * remainder)402418f4f2SRoman Zippel static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
412418f4f2SRoman Zippel {
422418f4f2SRoman Zippel 	*remainder = dividend % divisor;
432418f4f2SRoman Zippel 	return dividend / divisor;
442418f4f2SRoman Zippel }
452418f4f2SRoman Zippel 
46d28a1de5SLiam Beguin /**
47eb18cba7SMike Snitzer  * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
48078843f7SRandy Dunlap  * @dividend: unsigned 64bit dividend
49078843f7SRandy Dunlap  * @divisor: unsigned 64bit divisor
50078843f7SRandy Dunlap  * @remainder: pointer to unsigned 64bit remainder
51078843f7SRandy Dunlap  *
52078843f7SRandy Dunlap  * Return: sets ``*remainder``, then returns dividend / divisor
53eb18cba7SMike Snitzer  */
div64_u64_rem(u64 dividend,u64 divisor,u64 * remainder)54eb18cba7SMike Snitzer static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
55eb18cba7SMike Snitzer {
56eb18cba7SMike Snitzer 	*remainder = dividend % divisor;
57eb18cba7SMike Snitzer 	return dividend / divisor;
58eb18cba7SMike Snitzer }
59eb18cba7SMike Snitzer 
60d28a1de5SLiam Beguin /**
616f6d6a1aSRoman Zippel  * div64_u64 - unsigned 64bit divide with 64bit divisor
62078843f7SRandy Dunlap  * @dividend: unsigned 64bit dividend
63078843f7SRandy Dunlap  * @divisor: unsigned 64bit divisor
64078843f7SRandy Dunlap  *
65078843f7SRandy Dunlap  * Return: dividend / divisor
666f6d6a1aSRoman Zippel  */
div64_u64(u64 dividend,u64 divisor)676f6d6a1aSRoman Zippel static inline u64 div64_u64(u64 dividend, u64 divisor)
686f6d6a1aSRoman Zippel {
696f6d6a1aSRoman Zippel 	return dividend / divisor;
706f6d6a1aSRoman Zippel }
716f6d6a1aSRoman Zippel 
72d28a1de5SLiam Beguin /**
73658716d1SBrian Behlendorf  * div64_s64 - signed 64bit divide with 64bit divisor
74078843f7SRandy Dunlap  * @dividend: signed 64bit dividend
75078843f7SRandy Dunlap  * @divisor: signed 64bit divisor
76078843f7SRandy Dunlap  *
77078843f7SRandy Dunlap  * Return: dividend / divisor
78658716d1SBrian Behlendorf  */
div64_s64(s64 dividend,s64 divisor)79658716d1SBrian Behlendorf static inline s64 div64_s64(s64 dividend, s64 divisor)
80658716d1SBrian Behlendorf {
81658716d1SBrian Behlendorf 	return dividend / divisor;
82658716d1SBrian Behlendorf }
83658716d1SBrian Behlendorf 
842418f4f2SRoman Zippel #elif BITS_PER_LONG == 32
852418f4f2SRoman Zippel 
86f910381aSSasha Levin #define div64_long(x, y) div_s64((x), (y))
87c2853c8dSAlex Shi #define div64_ul(x, y)   div_u64((x), (y))
88f910381aSSasha Levin 
892418f4f2SRoman Zippel #ifndef div_u64_rem
div_u64_rem(u64 dividend,u32 divisor,u32 * remainder)902418f4f2SRoman Zippel static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
912418f4f2SRoman Zippel {
922418f4f2SRoman Zippel 	*remainder = do_div(dividend, divisor);
932418f4f2SRoman Zippel 	return dividend;
942418f4f2SRoman Zippel }
952418f4f2SRoman Zippel #endif
962418f4f2SRoman Zippel 
972418f4f2SRoman Zippel #ifndef div_s64_rem
982418f4f2SRoman Zippel extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
992418f4f2SRoman Zippel #endif
1002418f4f2SRoman Zippel 
101eb18cba7SMike Snitzer #ifndef div64_u64_rem
102eb18cba7SMike Snitzer extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
103eb18cba7SMike Snitzer #endif
104eb18cba7SMike Snitzer 
1056f6d6a1aSRoman Zippel #ifndef div64_u64
106f3002134SStanislaw Gruszka extern u64 div64_u64(u64 dividend, u64 divisor);
1076f6d6a1aSRoman Zippel #endif
1086f6d6a1aSRoman Zippel 
109658716d1SBrian Behlendorf #ifndef div64_s64
110658716d1SBrian Behlendorf extern s64 div64_s64(s64 dividend, s64 divisor);
111658716d1SBrian Behlendorf #endif
112658716d1SBrian Behlendorf 
1132418f4f2SRoman Zippel #endif /* BITS_PER_LONG */
1142418f4f2SRoman Zippel 
1152418f4f2SRoman Zippel /**
1162418f4f2SRoman Zippel  * div_u64 - unsigned 64bit divide with 32bit divisor
117078843f7SRandy Dunlap  * @dividend: unsigned 64bit dividend
118078843f7SRandy Dunlap  * @divisor: unsigned 32bit divisor
1192418f4f2SRoman Zippel  *
1202418f4f2SRoman Zippel  * This is the most common 64bit divide and should be used if possible,
1212418f4f2SRoman Zippel  * as many 32bit archs can optimize this variant better than a full 64bit
1222418f4f2SRoman Zippel  * divide.
123a898db21SLiam Beguin  *
124a898db21SLiam Beguin  * Return: dividend / divisor
1252418f4f2SRoman Zippel  */
1262418f4f2SRoman Zippel #ifndef div_u64
div_u64(u64 dividend,u32 divisor)1272418f4f2SRoman Zippel static inline u64 div_u64(u64 dividend, u32 divisor)
1282418f4f2SRoman Zippel {
1292418f4f2SRoman Zippel 	u32 remainder;
1302418f4f2SRoman Zippel 	return div_u64_rem(dividend, divisor, &remainder);
1312418f4f2SRoman Zippel }
1322418f4f2SRoman Zippel #endif
1332418f4f2SRoman Zippel 
1342418f4f2SRoman Zippel /**
1352418f4f2SRoman Zippel  * div_s64 - signed 64bit divide with 32bit divisor
136078843f7SRandy Dunlap  * @dividend: signed 64bit dividend
137078843f7SRandy Dunlap  * @divisor: signed 32bit divisor
138a898db21SLiam Beguin  *
139a898db21SLiam Beguin  * Return: dividend / divisor
1402418f4f2SRoman Zippel  */
1412418f4f2SRoman Zippel #ifndef div_s64
div_s64(s64 dividend,s32 divisor)1422418f4f2SRoman Zippel static inline s64 div_s64(s64 dividend, s32 divisor)
1432418f4f2SRoman Zippel {
1442418f4f2SRoman Zippel 	s32 remainder;
1452418f4f2SRoman Zippel 	return div_s64_rem(dividend, divisor, &remainder);
1462418f4f2SRoman Zippel }
1472418f4f2SRoman Zippel #endif
1482418f4f2SRoman Zippel 
149f595ec96SJeremy Fitzhardinge u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder);
150f595ec96SJeremy Fitzhardinge 
1519e3d6223SPeter Zijlstra #ifndef mul_u32_u32
1529e3d6223SPeter Zijlstra /*
1539e3d6223SPeter Zijlstra  * Many a GCC version messes this up and generates a 64x64 mult :-(
1549e3d6223SPeter Zijlstra  */
mul_u32_u32(u32 a,u32 b)1559e3d6223SPeter Zijlstra static inline u64 mul_u32_u32(u32 a, u32 b)
1569e3d6223SPeter Zijlstra {
1579e3d6223SPeter Zijlstra 	return (u64)a * b;
1589e3d6223SPeter Zijlstra }
1599e3d6223SPeter Zijlstra #endif
1609e3d6223SPeter Zijlstra 
161be5e610cSPeter Zijlstra #if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
162be5e610cSPeter Zijlstra 
163be5e610cSPeter Zijlstra #ifndef mul_u64_u32_shr
mul_u64_u32_shr(u64 a,u32 mul,unsigned int shift)1648739c681SPeter Zijlstra static __always_inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
165be5e610cSPeter Zijlstra {
166be5e610cSPeter Zijlstra 	return (u64)(((unsigned __int128)a * mul) >> shift);
167be5e610cSPeter Zijlstra }
168be5e610cSPeter Zijlstra #endif /* mul_u64_u32_shr */
169be5e610cSPeter Zijlstra 
17035181e86SHaozhong Zhang #ifndef mul_u64_u64_shr
mul_u64_u64_shr(u64 a,u64 mul,unsigned int shift)171*fc4a0db4SPeter Zijlstra static __always_inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift)
17235181e86SHaozhong Zhang {
17335181e86SHaozhong Zhang 	return (u64)(((unsigned __int128)a * mul) >> shift);
17435181e86SHaozhong Zhang }
17535181e86SHaozhong Zhang #endif /* mul_u64_u64_shr */
17635181e86SHaozhong Zhang 
177be5e610cSPeter Zijlstra #else
178be5e610cSPeter Zijlstra 
179be5e610cSPeter Zijlstra #ifndef mul_u64_u32_shr
mul_u64_u32_shr(u64 a,u32 mul,unsigned int shift)1808739c681SPeter Zijlstra static __always_inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
181be5e610cSPeter Zijlstra {
182be5e610cSPeter Zijlstra 	u32 ah, al;
183be5e610cSPeter Zijlstra 	u64 ret;
184be5e610cSPeter Zijlstra 
185be5e610cSPeter Zijlstra 	al = a;
186be5e610cSPeter Zijlstra 	ah = a >> 32;
187be5e610cSPeter Zijlstra 
1889e3d6223SPeter Zijlstra 	ret = mul_u32_u32(al, mul) >> shift;
189be5e610cSPeter Zijlstra 	if (ah)
1909e3d6223SPeter Zijlstra 		ret += mul_u32_u32(ah, mul) << (32 - shift);
191be5e610cSPeter Zijlstra 
192be5e610cSPeter Zijlstra 	return ret;
193be5e610cSPeter Zijlstra }
194be5e610cSPeter Zijlstra #endif /* mul_u64_u32_shr */
195be5e610cSPeter Zijlstra 
19635181e86SHaozhong Zhang #ifndef mul_u64_u64_shr
mul_u64_u64_shr(u64 a,u64 b,unsigned int shift)19735181e86SHaozhong Zhang static inline u64 mul_u64_u64_shr(u64 a, u64 b, unsigned int shift)
19835181e86SHaozhong Zhang {
19935181e86SHaozhong Zhang 	union {
20035181e86SHaozhong Zhang 		u64 ll;
20135181e86SHaozhong Zhang 		struct {
20235181e86SHaozhong Zhang #ifdef __BIG_ENDIAN
20335181e86SHaozhong Zhang 			u32 high, low;
20435181e86SHaozhong Zhang #else
20535181e86SHaozhong Zhang 			u32 low, high;
20635181e86SHaozhong Zhang #endif
20735181e86SHaozhong Zhang 		} l;
20835181e86SHaozhong Zhang 	} rl, rm, rn, rh, a0, b0;
20935181e86SHaozhong Zhang 	u64 c;
21035181e86SHaozhong Zhang 
21135181e86SHaozhong Zhang 	a0.ll = a;
21235181e86SHaozhong Zhang 	b0.ll = b;
21335181e86SHaozhong Zhang 
2149e3d6223SPeter Zijlstra 	rl.ll = mul_u32_u32(a0.l.low, b0.l.low);
2159e3d6223SPeter Zijlstra 	rm.ll = mul_u32_u32(a0.l.low, b0.l.high);
2169e3d6223SPeter Zijlstra 	rn.ll = mul_u32_u32(a0.l.high, b0.l.low);
2179e3d6223SPeter Zijlstra 	rh.ll = mul_u32_u32(a0.l.high, b0.l.high);
21835181e86SHaozhong Zhang 
21935181e86SHaozhong Zhang 	/*
22035181e86SHaozhong Zhang 	 * Each of these lines computes a 64-bit intermediate result into "c",
22135181e86SHaozhong Zhang 	 * starting at bits 32-95.  The low 32-bits go into the result of the
22235181e86SHaozhong Zhang 	 * multiplication, the high 32-bits are carried into the next step.
22335181e86SHaozhong Zhang 	 */
22435181e86SHaozhong Zhang 	rl.l.high = c = (u64)rl.l.high + rm.l.low + rn.l.low;
22535181e86SHaozhong Zhang 	rh.l.low = c = (c >> 32) + rm.l.high + rn.l.high + rh.l.low;
22635181e86SHaozhong Zhang 	rh.l.high = (c >> 32) + rh.l.high;
22735181e86SHaozhong Zhang 
22835181e86SHaozhong Zhang 	/*
22935181e86SHaozhong Zhang 	 * The 128-bit result of the multiplication is in rl.ll and rh.ll,
23035181e86SHaozhong Zhang 	 * shift it right and throw away the high part of the result.
23135181e86SHaozhong Zhang 	 */
23235181e86SHaozhong Zhang 	if (shift == 0)
23335181e86SHaozhong Zhang 		return rl.ll;
23435181e86SHaozhong Zhang 	if (shift < 64)
23535181e86SHaozhong Zhang 		return (rl.ll >> shift) | (rh.ll << (64 - shift));
23635181e86SHaozhong Zhang 	return rh.ll >> (shift & 63);
23735181e86SHaozhong Zhang }
23835181e86SHaozhong Zhang #endif /* mul_u64_u64_shr */
23935181e86SHaozhong Zhang 
240be5e610cSPeter Zijlstra #endif
241be5e610cSPeter Zijlstra 
242605a140aSIlias Stamatis #ifndef mul_s64_u64_shr
mul_s64_u64_shr(s64 a,u64 b,unsigned int shift)243605a140aSIlias Stamatis static inline u64 mul_s64_u64_shr(s64 a, u64 b, unsigned int shift)
244605a140aSIlias Stamatis {
245605a140aSIlias Stamatis 	u64 ret;
246605a140aSIlias Stamatis 
247605a140aSIlias Stamatis 	/*
248605a140aSIlias Stamatis 	 * Extract the sign before the multiplication and put it back
249605a140aSIlias Stamatis 	 * afterwards if needed.
250605a140aSIlias Stamatis 	 */
251605a140aSIlias Stamatis 	ret = mul_u64_u64_shr(abs(a), b, shift);
252605a140aSIlias Stamatis 
253605a140aSIlias Stamatis 	if (a < 0)
254605a140aSIlias Stamatis 		ret = -((s64) ret);
255605a140aSIlias Stamatis 
256605a140aSIlias Stamatis 	return ret;
257605a140aSIlias Stamatis }
258605a140aSIlias Stamatis #endif /* mul_s64_u64_shr */
259605a140aSIlias Stamatis 
260381d585cSHaozhong Zhang #ifndef mul_u64_u32_div
mul_u64_u32_div(u64 a,u32 mul,u32 divisor)261381d585cSHaozhong Zhang static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
262381d585cSHaozhong Zhang {
263381d585cSHaozhong Zhang 	union {
264381d585cSHaozhong Zhang 		u64 ll;
265381d585cSHaozhong Zhang 		struct {
266381d585cSHaozhong Zhang #ifdef __BIG_ENDIAN
267381d585cSHaozhong Zhang 			u32 high, low;
268381d585cSHaozhong Zhang #else
269381d585cSHaozhong Zhang 			u32 low, high;
270381d585cSHaozhong Zhang #endif
271381d585cSHaozhong Zhang 		} l;
272381d585cSHaozhong Zhang 	} u, rl, rh;
273381d585cSHaozhong Zhang 
274381d585cSHaozhong Zhang 	u.ll = a;
2759e3d6223SPeter Zijlstra 	rl.ll = mul_u32_u32(u.l.low, mul);
2769e3d6223SPeter Zijlstra 	rh.ll = mul_u32_u32(u.l.high, mul) + rl.l.high;
277381d585cSHaozhong Zhang 
278381d585cSHaozhong Zhang 	/* Bits 32-63 of the result will be in rh.l.low. */
279381d585cSHaozhong Zhang 	rl.l.high = do_div(rh.ll, divisor);
280381d585cSHaozhong Zhang 
281381d585cSHaozhong Zhang 	/* Bits 0-31 of the result will be in rl.l.low.	*/
282381d585cSHaozhong Zhang 	do_div(rl.ll, divisor);
283381d585cSHaozhong Zhang 
284381d585cSHaozhong Zhang 	rl.l.high = rh.l.low;
285381d585cSHaozhong Zhang 	return rl.ll;
286381d585cSHaozhong Zhang }
287381d585cSHaozhong Zhang #endif /* mul_u64_u32_div */
288381d585cSHaozhong Zhang 
2893dc167baSOleg Nesterov u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div);
2903dc167baSOleg Nesterov 
291090f13caSLiam Beguin /**
292090f13caSLiam Beguin  * DIV64_U64_ROUND_UP - unsigned 64bit divide with 64bit divisor rounded up
293090f13caSLiam Beguin  * @ll: unsigned 64bit dividend
294090f13caSLiam Beguin  * @d: unsigned 64bit divisor
295090f13caSLiam Beguin  *
296090f13caSLiam Beguin  * Divide unsigned 64bit dividend by unsigned 64bit divisor
297090f13caSLiam Beguin  * and round up.
298090f13caSLiam Beguin  *
299090f13caSLiam Beguin  * Return: dividend / divisor rounded up
300090f13caSLiam Beguin  */
30168600f62SRoman Gushchin #define DIV64_U64_ROUND_UP(ll, d)	\
30268600f62SRoman Gushchin 	({ u64 _tmp = (d); div64_u64((ll) + _tmp - 1, _tmp); })
30368600f62SRoman Gushchin 
304cb8be119SSimon Horman /**
305cb8be119SSimon Horman  * DIV64_U64_ROUND_CLOSEST - unsigned 64bit divide with 64bit divisor rounded to nearest integer
306cb8be119SSimon Horman  * @dividend: unsigned 64bit dividend
307cb8be119SSimon Horman  * @divisor: unsigned 64bit divisor
308cb8be119SSimon Horman  *
309cb8be119SSimon Horman  * Divide unsigned 64bit dividend by unsigned 64bit divisor
310cb8be119SSimon Horman  * and round to closest integer.
311cb8be119SSimon Horman  *
312cb8be119SSimon Horman  * Return: dividend / divisor rounded to nearest integer
313cb8be119SSimon Horman  */
314cb8be119SSimon Horman #define DIV64_U64_ROUND_CLOSEST(dividend, divisor)	\
315cb8be119SSimon Horman 	({ u64 _tmp = (divisor); div64_u64((dividend) + _tmp / 2, _tmp); })
316cb8be119SSimon Horman 
317d28a1de5SLiam Beguin /**
3182c861b73SPali Rohár  * DIV_U64_ROUND_CLOSEST - unsigned 64bit divide with 32bit divisor rounded to nearest integer
3192c861b73SPali Rohár  * @dividend: unsigned 64bit dividend
3202c861b73SPali Rohár  * @divisor: unsigned 32bit divisor
3212c861b73SPali Rohár  *
3222c861b73SPali Rohár  * Divide unsigned 64bit dividend by unsigned 32bit divisor
3232c861b73SPali Rohár  * and round to closest integer.
3242c861b73SPali Rohár  *
3252c861b73SPali Rohár  * Return: dividend / divisor rounded to nearest integer
3262c861b73SPali Rohár  */
3272c861b73SPali Rohár #define DIV_U64_ROUND_CLOSEST(dividend, divisor)	\
3282c861b73SPali Rohár 	({ u32 _tmp = (divisor); div_u64((u64)(dividend) + _tmp / 2, _tmp); })
3292c861b73SPali Rohár 
330d28a1de5SLiam Beguin /**
331af60459aSChunyan Zhang  * DIV_S64_ROUND_CLOSEST - signed 64bit divide with 32bit divisor rounded to nearest integer
332af60459aSChunyan Zhang  * @dividend: signed 64bit dividend
333af60459aSChunyan Zhang  * @divisor: signed 32bit divisor
334af60459aSChunyan Zhang  *
335af60459aSChunyan Zhang  * Divide signed 64bit dividend by signed 32bit divisor
336af60459aSChunyan Zhang  * and round to closest integer.
337af60459aSChunyan Zhang  *
338af60459aSChunyan Zhang  * Return: dividend / divisor rounded to nearest integer
339af60459aSChunyan Zhang  */
340af60459aSChunyan Zhang #define DIV_S64_ROUND_CLOSEST(dividend, divisor)(	\
341af60459aSChunyan Zhang {							\
342af60459aSChunyan Zhang 	s64 __x = (dividend);				\
343af60459aSChunyan Zhang 	s32 __d = (divisor);				\
344af60459aSChunyan Zhang 	((__x > 0) == (__d > 0)) ?			\
345af60459aSChunyan Zhang 		div_s64((__x + (__d / 2)), __d) :	\
346af60459aSChunyan Zhang 		div_s64((__x - (__d / 2)), __d);	\
347af60459aSChunyan Zhang }							\
348af60459aSChunyan Zhang )
3492418f4f2SRoman Zippel #endif /* _LINUX_MATH64_H */
350