1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_GENERIC_DIV64_H 3 #define _ASM_GENERIC_DIV64_H 4 /* 5 * Copyright (C) 2003 Bernardo Innocenti <bernie@develer.com> 6 * Based on former asm-ppc/div64.h and asm-m68knommu/div64.h 7 * 8 * Optimization for constant divisors on 32-bit machines: 9 * Copyright (C) 2006-2015 Nicolas Pitre 10 * 11 * The semantics of do_div() is, in C++ notation, observing that the name 12 * is a function-like macro and the n parameter has the semantics of a C++ 13 * reference: 14 * 15 * uint32_t do_div(uint64_t &n, uint32_t base) 16 * { 17 * uint32_t remainder = n % base; 18 * n = n / base; 19 * return remainder; 20 * } 21 * 22 * NOTE: macro parameter n is evaluated multiple times, 23 * beware of side effects! 24 */ 25 26 #include <linux/types.h> 27 #include <linux/compiler.h> 28 29 #if BITS_PER_LONG == 64 30 31 /** 32 * do_div - returns 2 values: calculate remainder and update new dividend 33 * @n: uint64_t dividend (will be updated) 34 * @base: uint32_t divisor 35 * 36 * Summary: 37 * ``uint32_t remainder = n % base;`` 38 * ``n = n / base;`` 39 * 40 * Return: (uint32_t)remainder 41 * 42 * NOTE: macro parameter @n is evaluated multiple times, 43 * beware of side effects! 44 */ 45 # define do_div(n,base) ({ \ 46 uint32_t __base = (base); \ 47 uint32_t __rem; \ 48 __rem = ((uint64_t)(n)) % __base; \ 49 (n) = ((uint64_t)(n)) / __base; \ 50 __rem; \ 51 }) 52 53 #elif BITS_PER_LONG == 32 54 55 #include <linux/log2.h> 56 57 /* 58 * If the divisor happens to be constant, we determine the appropriate 59 * inverse at compile time to turn the division into a few inline 60 * multiplications which ought to be much faster. And yet only if compiling 61 * with a sufficiently recent gcc version to perform proper 64-bit constant 62 * propagation. 63 * 64 * (It is unfortunate that gcc doesn't perform all this internally.) 65 */ 66 67 #ifndef __div64_const32_is_OK 68 #define __div64_const32_is_OK (__GNUC__ >= 4) 69 #endif 70 71 #define __div64_const32(n, ___b) \ 72 ({ \ 73 /* \ 74 * Multiplication by reciprocal of b: n / b = n * (p / b) / p \ 75 * \ 76 * We rely on the fact that most of this code gets optimized \ 77 * away at compile time due to constant propagation and only \ 78 * a few multiplication instructions should remain. \ 79 * Hence this monstrous macro (static inline doesn't always \ 80 * do the trick here). \ 81 */ \ 82 uint64_t ___res, ___x, ___t, ___m, ___n = (n); \ 83 uint32_t ___p, ___bias; \ 84 \ 85 /* determine MSB of b */ \ 86 ___p = 1 << ilog2(___b); \ 87 \ 88 /* compute m = ((p << 64) + b - 1) / b */ \ 89 ___m = (~0ULL / ___b) * ___p; \ 90 ___m += (((~0ULL % ___b + 1) * ___p) + ___b - 1) / ___b; \ 91 \ 92 /* one less than the dividend with highest result */ \ 93 ___x = ~0ULL / ___b * ___b - 1; \ 94 \ 95 /* test our ___m with res = m * x / (p << 64) */ \ 96 ___res = ((___m & 0xffffffff) * (___x & 0xffffffff)) >> 32; \ 97 ___t = ___res += (___m & 0xffffffff) * (___x >> 32); \ 98 ___res += (___x & 0xffffffff) * (___m >> 32); \ 99 ___t = (___res < ___t) ? (1ULL << 32) : 0; \ 100 ___res = (___res >> 32) + ___t; \ 101 ___res += (___m >> 32) * (___x >> 32); \ 102 ___res /= ___p; \ 103 \ 104 /* Now sanitize and optimize what we've got. */ \ 105 if (~0ULL % (___b / (___b & -___b)) == 0) { \ 106 /* special case, can be simplified to ... */ \ 107 ___n /= (___b & -___b); \ 108 ___m = ~0ULL / (___b / (___b & -___b)); \ 109 ___p = 1; \ 110 ___bias = 1; \ 111 } else if (___res != ___x / ___b) { \ 112 /* \ 113 * We can't get away without a bias to compensate \ 114 * for bit truncation errors. To avoid it we'd need an \ 115 * additional bit to represent m which would overflow \ 116 * a 64-bit variable. \ 117 * \ 118 * Instead we do m = p / b and n / b = (n * m + m) / p. \ 119 */ \ 120 ___bias = 1; \ 121 /* Compute m = (p << 64) / b */ \ 122 ___m = (~0ULL / ___b) * ___p; \ 123 ___m += ((~0ULL % ___b + 1) * ___p) / ___b; \ 124 } else { \ 125 /* \ 126 * Reduce m / p, and try to clear bit 31 of m when \ 127 * possible, otherwise that'll need extra overflow \ 128 * handling later. \ 129 */ \ 130 uint32_t ___bits = -(___m & -___m); \ 131 ___bits |= ___m >> 32; \ 132 ___bits = (~___bits) << 1; \ 133 /* \ 134 * If ___bits == 0 then setting bit 31 is unavoidable. \ 135 * Simply apply the maximum possible reduction in that \ 136 * case. Otherwise the MSB of ___bits indicates the \ 137 * best reduction we should apply. \ 138 */ \ 139 if (!___bits) { \ 140 ___p /= (___m & -___m); \ 141 ___m /= (___m & -___m); \ 142 } else { \ 143 ___p >>= ilog2(___bits); \ 144 ___m >>= ilog2(___bits); \ 145 } \ 146 /* No bias needed. */ \ 147 ___bias = 0; \ 148 } \ 149 \ 150 /* \ 151 * Now we have a combination of 2 conditions: \ 152 * \ 153 * 1) whether or not we need to apply a bias, and \ 154 * \ 155 * 2) whether or not there might be an overflow in the cross \ 156 * product determined by (___m & ((1 << 63) | (1 << 31))). \ 157 * \ 158 * Select the best way to do (m_bias + m * n) / (1 << 64). \ 159 * From now on there will be actual runtime code generated. \ 160 */ \ 161 ___res = __arch_xprod_64(___m, ___n, ___bias); \ 162 \ 163 ___res /= ___p; \ 164 }) 165 166 #ifndef __arch_xprod_64 167 /* 168 * Default C implementation for __arch_xprod_64() 169 * 170 * Prototype: uint64_t __arch_xprod_64(const uint64_t m, uint64_t n, bool bias) 171 * Semantic: retval = ((bias ? m : 0) + m * n) >> 64 172 * 173 * The product is a 128-bit value, scaled down to 64 bits. 174 * Assuming constant propagation to optimize away unused conditional code. 175 * Architectures may provide their own optimized assembly implementation. 176 */ 177 static inline uint64_t __arch_xprod_64(const uint64_t m, uint64_t n, bool bias) 178 { 179 uint32_t m_lo = m; 180 uint32_t m_hi = m >> 32; 181 uint32_t n_lo = n; 182 uint32_t n_hi = n >> 32; 183 uint64_t res; 184 uint32_t res_lo, res_hi, tmp; 185 186 if (!bias) { 187 res = ((uint64_t)m_lo * n_lo) >> 32; 188 } else if (!(m & ((1ULL << 63) | (1ULL << 31)))) { 189 /* there can't be any overflow here */ 190 res = (m + (uint64_t)m_lo * n_lo) >> 32; 191 } else { 192 res = m + (uint64_t)m_lo * n_lo; 193 res_lo = res >> 32; 194 res_hi = (res_lo < m_hi); 195 res = res_lo | ((uint64_t)res_hi << 32); 196 } 197 198 if (!(m & ((1ULL << 63) | (1ULL << 31)))) { 199 /* there can't be any overflow here */ 200 res += (uint64_t)m_lo * n_hi; 201 res += (uint64_t)m_hi * n_lo; 202 res >>= 32; 203 } else { 204 res += (uint64_t)m_lo * n_hi; 205 tmp = res >> 32; 206 res += (uint64_t)m_hi * n_lo; 207 res_lo = res >> 32; 208 res_hi = (res_lo < tmp); 209 res = res_lo | ((uint64_t)res_hi << 32); 210 } 211 212 res += (uint64_t)m_hi * n_hi; 213 214 return res; 215 } 216 #endif 217 218 #ifndef __div64_32 219 extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor); 220 #endif 221 222 /* The unnecessary pointer compare is there 223 * to check for type safety (n must be 64bit) 224 */ 225 # define do_div(n,base) ({ \ 226 uint32_t __base = (base); \ 227 uint32_t __rem; \ 228 (void)(((typeof((n)) *)0) == ((uint64_t *)0)); \ 229 if (__builtin_constant_p(__base) && \ 230 is_power_of_2(__base)) { \ 231 __rem = (n) & (__base - 1); \ 232 (n) >>= ilog2(__base); \ 233 } else if (__div64_const32_is_OK && \ 234 __builtin_constant_p(__base) && \ 235 __base != 0) { \ 236 uint32_t __res_lo, __n_lo = (n); \ 237 (n) = __div64_const32(n, __base); \ 238 /* the remainder can be computed with 32-bit regs */ \ 239 __res_lo = (n); \ 240 __rem = __n_lo - __res_lo * __base; \ 241 } else if (likely(((n) >> 32) == 0)) { \ 242 __rem = (uint32_t)(n) % __base; \ 243 (n) = (uint32_t)(n) / __base; \ 244 } else \ 245 __rem = __div64_32(&(n), __base); \ 246 __rem; \ 247 }) 248 249 #else /* BITS_PER_LONG == ?? */ 250 251 # error do_div() does not yet support the C64 252 253 #endif /* BITS_PER_LONG */ 254 255 #endif /* _ASM_GENERIC_DIV64_H */ 256