xref: /openbmc/qemu/include/qemu/host-utils.h (revision c94bee4cd6693c1c65ba43bb8970cf909dec378b)
1 /*
2  * Utility compute operations used by translated code.
3  *
4  * Copyright (c) 2007 Thiemo Seufer
5  * Copyright (c) 2007 Jocelyn Mayer
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy
8  * of this software and associated documentation files (the "Software"), to deal
9  * in the Software without restriction, including without limitation the rights
10  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11  * copies of the Software, and to permit persons to whom the Software is
12  * furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in
15  * all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23  * THE SOFTWARE.
24  */
25 
26 /* Portions of this work are licensed under the terms of the GNU GPL,
27  * version 2 or later. See the COPYING file in the top-level directory.
28  */
29 
30 #ifndef HOST_UTILS_H
31 #define HOST_UTILS_H
32 
33 #include "qemu/bswap.h"
34 #include "qemu/int128.h"
35 
36 #ifdef CONFIG_INT128
mulu64(uint64_t * plow,uint64_t * phigh,uint64_t a,uint64_t b)37 static inline void mulu64(uint64_t *plow, uint64_t *phigh,
38                           uint64_t a, uint64_t b)
39 {
40     __uint128_t r = (__uint128_t)a * b;
41     *plow = r;
42     *phigh = r >> 64;
43 }
44 
muls64(uint64_t * plow,uint64_t * phigh,int64_t a,int64_t b)45 static inline void muls64(uint64_t *plow, uint64_t *phigh,
46                           int64_t a, int64_t b)
47 {
48     __int128_t r = (__int128_t)a * b;
49     *plow = r;
50     *phigh = r >> 64;
51 }
52 
53 /* compute with 96 bit intermediate result: (a*b)/c */
muldiv64(uint64_t a,uint32_t b,uint32_t c)54 static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c)
55 {
56     return (__int128_t)a * b / c;
57 }
58 
muldiv64_round_up(uint64_t a,uint32_t b,uint32_t c)59 static inline uint64_t muldiv64_round_up(uint64_t a, uint32_t b, uint32_t c)
60 {
61     return ((__int128_t)a * b + c - 1) / c;
62 }
63 
divu128(uint64_t * plow,uint64_t * phigh,uint64_t divisor)64 static inline uint64_t divu128(uint64_t *plow, uint64_t *phigh,
65                                uint64_t divisor)
66 {
67     __uint128_t dividend = ((__uint128_t)*phigh << 64) | *plow;
68     __uint128_t result = dividend / divisor;
69 
70     *plow = result;
71     *phigh = result >> 64;
72     return dividend % divisor;
73 }
74 
divs128(uint64_t * plow,int64_t * phigh,int64_t divisor)75 static inline int64_t divs128(uint64_t *plow, int64_t *phigh,
76                               int64_t divisor)
77 {
78     __int128_t dividend = ((__int128_t)*phigh << 64) | *plow;
79     __int128_t result = dividend / divisor;
80 
81     *plow = result;
82     *phigh = result >> 64;
83     return dividend % divisor;
84 }
85 #else
86 void muls64(uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b);
87 void mulu64(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b);
88 uint64_t divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor);
89 int64_t divs128(uint64_t *plow, int64_t *phigh, int64_t divisor);
90 
muldiv64_rounding(uint64_t a,uint32_t b,uint32_t c,bool round_up)91 static inline uint64_t muldiv64_rounding(uint64_t a, uint32_t b, uint32_t c,
92                                   bool round_up)
93 {
94     union {
95         uint64_t ll;
96         struct {
97 #if HOST_BIG_ENDIAN
98             uint32_t high, low;
99 #else
100             uint32_t low, high;
101 #endif
102         } l;
103     } u, res;
104     uint64_t rl, rh;
105 
106     u.ll = a;
107     rl = (uint64_t)u.l.low * (uint64_t)b;
108     if (round_up) {
109         rl += c - 1;
110     }
111     rh = (uint64_t)u.l.high * (uint64_t)b;
112     rh += (rl >> 32);
113     res.l.high = rh / c;
114     res.l.low = (((rh % c) << 32) + (rl & 0xffffffff)) / c;
115     return res.ll;
116 }
117 
muldiv64(uint64_t a,uint32_t b,uint32_t c)118 static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c)
119 {
120     return muldiv64_rounding(a, b, c, false);
121 }
122 
muldiv64_round_up(uint64_t a,uint32_t b,uint32_t c)123 static inline uint64_t muldiv64_round_up(uint64_t a, uint32_t b, uint32_t c)
124 {
125     return muldiv64_rounding(a, b, c, true);
126 }
127 #endif
128 
129 /**
130  * clz8 - count leading zeros in a 8-bit value.
131  * @val: The value to search
132  *
133  * Returns 8 if the value is zero.  Note that the GCC builtin is
134  * undefined if the value is zero.
135  *
136  * Note that the GCC builtin will upcast its argument to an `unsigned int`
137  * so this function subtracts off the number of prepended zeroes.
138  */
clz8(uint8_t val)139 static inline int clz8(uint8_t val)
140 {
141     return val ? __builtin_clz(val) - 24 : 8;
142 }
143 
144 /**
145  * clz16 - count leading zeros in a 16-bit value.
146  * @val: The value to search
147  *
148  * Returns 16 if the value is zero.  Note that the GCC builtin is
149  * undefined if the value is zero.
150  *
151  * Note that the GCC builtin will upcast its argument to an `unsigned int`
152  * so this function subtracts off the number of prepended zeroes.
153  */
clz16(uint16_t val)154 static inline int clz16(uint16_t val)
155 {
156     return val ? __builtin_clz(val) - 16 : 16;
157 }
158 
159 /**
160  * clz32 - count leading zeros in a 32-bit value.
161  * @val: The value to search
162  *
163  * Returns 32 if the value is zero.  Note that the GCC builtin is
164  * undefined if the value is zero.
165  */
clz32(uint32_t val)166 static inline int clz32(uint32_t val)
167 {
168     return val ? __builtin_clz(val) : 32;
169 }
170 
171 /**
172  * clo32 - count leading ones in a 32-bit value.
173  * @val: The value to search
174  *
175  * Returns 32 if the value is -1.
176  */
clo32(uint32_t val)177 static inline int clo32(uint32_t val)
178 {
179     return clz32(~val);
180 }
181 
182 /**
183  * clz64 - count leading zeros in a 64-bit value.
184  * @val: The value to search
185  *
186  * Returns 64 if the value is zero.  Note that the GCC builtin is
187  * undefined if the value is zero.
188  */
clz64(uint64_t val)189 static inline int clz64(uint64_t val)
190 {
191     return val ? __builtin_clzll(val) : 64;
192 }
193 
194 /**
195  * clo64 - count leading ones in a 64-bit value.
196  * @val: The value to search
197  *
198  * Returns 64 if the value is -1.
199  */
clo64(uint64_t val)200 static inline int clo64(uint64_t val)
201 {
202     return clz64(~val);
203 }
204 
205 /**
206  * ctz8 - count trailing zeros in a 8-bit value.
207  * @val: The value to search
208  *
209  * Returns 8 if the value is zero.  Note that the GCC builtin is
210  * undefined if the value is zero.
211  */
ctz8(uint8_t val)212 static inline int ctz8(uint8_t val)
213 {
214     return val ? __builtin_ctz(val) : 8;
215 }
216 
217 /**
218  * ctz16 - count trailing zeros in a 16-bit value.
219  * @val: The value to search
220  *
221  * Returns 16 if the value is zero.  Note that the GCC builtin is
222  * undefined if the value is zero.
223  */
ctz16(uint16_t val)224 static inline int ctz16(uint16_t val)
225 {
226     return val ? __builtin_ctz(val) : 16;
227 }
228 
229 /**
230  * ctz32 - count trailing zeros in a 32-bit value.
231  * @val: The value to search
232  *
233  * Returns 32 if the value is zero.  Note that the GCC builtin is
234  * undefined if the value is zero.
235  */
ctz32(uint32_t val)236 static inline int ctz32(uint32_t val)
237 {
238     return val ? __builtin_ctz(val) : 32;
239 }
240 
241 /**
242  * cto32 - count trailing ones in a 32-bit value.
243  * @val: The value to search
244  *
245  * Returns 32 if the value is -1.
246  */
cto32(uint32_t val)247 static inline int cto32(uint32_t val)
248 {
249     return ctz32(~val);
250 }
251 
252 /**
253  * ctz64 - count trailing zeros in a 64-bit value.
254  * @val: The value to search
255  *
256  * Returns 64 if the value is zero.  Note that the GCC builtin is
257  * undefined if the value is zero.
258  */
ctz64(uint64_t val)259 static inline int ctz64(uint64_t val)
260 {
261     return val ? __builtin_ctzll(val) : 64;
262 }
263 
264 /**
265  * cto64 - count trailing ones in a 64-bit value.
266  * @val: The value to search
267  *
268  * Returns 64 if the value is -1.
269  */
cto64(uint64_t val)270 static inline int cto64(uint64_t val)
271 {
272     return ctz64(~val);
273 }
274 
275 /**
276  * clrsb32 - count leading redundant sign bits in a 32-bit value.
277  * @val: The value to search
278  *
279  * Returns the number of bits following the sign bit that are equal to it.
280  * No special cases; output range is [0-31].
281  */
clrsb32(uint32_t val)282 static inline int clrsb32(uint32_t val)
283 {
284 #if __has_builtin(__builtin_clrsb) || !defined(__clang__)
285     return __builtin_clrsb(val);
286 #else
287     return clz32(val ^ ((int32_t)val >> 1)) - 1;
288 #endif
289 }
290 
291 /**
292  * clrsb64 - count leading redundant sign bits in a 64-bit value.
293  * @val: The value to search
294  *
295  * Returns the number of bits following the sign bit that are equal to it.
296  * No special cases; output range is [0-63].
297  */
clrsb64(uint64_t val)298 static inline int clrsb64(uint64_t val)
299 {
300 #if __has_builtin(__builtin_clrsbll) || !defined(__clang__)
301     return __builtin_clrsbll(val);
302 #else
303     return clz64(val ^ ((int64_t)val >> 1)) - 1;
304 #endif
305 }
306 
307 /**
308  * ctpop8 - count the population of one bits in an 8-bit value.
309  * @val: The value to search
310  */
ctpop8(uint8_t val)311 static inline int ctpop8(uint8_t val)
312 {
313     return __builtin_popcount(val);
314 }
315 
316 /*
317  * parity8 - return the parity (1 = odd) of an 8-bit value.
318  * @val: The value to search
319  */
parity8(uint8_t val)320 static inline int parity8(uint8_t val)
321 {
322     return __builtin_parity(val);
323 }
324 
325 /**
326  * ctpop16 - count the population of one bits in a 16-bit value.
327  * @val: The value to search
328  */
ctpop16(uint16_t val)329 static inline int ctpop16(uint16_t val)
330 {
331     return __builtin_popcount(val);
332 }
333 
334 /**
335  * ctpop32 - count the population of one bits in a 32-bit value.
336  * @val: The value to search
337  */
ctpop32(uint32_t val)338 static inline int ctpop32(uint32_t val)
339 {
340     return __builtin_popcount(val);
341 }
342 
343 /**
344  * ctpop64 - count the population of one bits in a 64-bit value.
345  * @val: The value to search
346  */
ctpop64(uint64_t val)347 static inline int ctpop64(uint64_t val)
348 {
349     return __builtin_popcountll(val);
350 }
351 
352 /**
353  * revbit8 - reverse the bits in an 8-bit value.
354  * @x: The value to modify.
355  */
revbit8(uint8_t x)356 static inline uint8_t revbit8(uint8_t x)
357 {
358 #if __has_builtin(__builtin_bitreverse8)
359     return __builtin_bitreverse8(x);
360 #else
361     /* Assign the correct nibble position.  */
362     x = ((x & 0xf0) >> 4)
363       | ((x & 0x0f) << 4);
364     /* Assign the correct bit position.  */
365     x = ((x & 0x88) >> 3)
366       | ((x & 0x44) >> 1)
367       | ((x & 0x22) << 1)
368       | ((x & 0x11) << 3);
369     return x;
370 #endif
371 }
372 
373 /**
374  * revbit16 - reverse the bits in a 16-bit value.
375  * @x: The value to modify.
376  */
revbit16(uint16_t x)377 static inline uint16_t revbit16(uint16_t x)
378 {
379 #if __has_builtin(__builtin_bitreverse16)
380     return __builtin_bitreverse16(x);
381 #else
382     /* Assign the correct byte position.  */
383     x = bswap16(x);
384     /* Assign the correct nibble position.  */
385     x = ((x & 0xf0f0) >> 4)
386       | ((x & 0x0f0f) << 4);
387     /* Assign the correct bit position.  */
388     x = ((x & 0x8888) >> 3)
389       | ((x & 0x4444) >> 1)
390       | ((x & 0x2222) << 1)
391       | ((x & 0x1111) << 3);
392     return x;
393 #endif
394 }
395 
396 /**
397  * revbit32 - reverse the bits in a 32-bit value.
398  * @x: The value to modify.
399  */
revbit32(uint32_t x)400 static inline uint32_t revbit32(uint32_t x)
401 {
402 #if __has_builtin(__builtin_bitreverse32)
403     return __builtin_bitreverse32(x);
404 #else
405     /* Assign the correct byte position.  */
406     x = bswap32(x);
407     /* Assign the correct nibble position.  */
408     x = ((x & 0xf0f0f0f0u) >> 4)
409       | ((x & 0x0f0f0f0fu) << 4);
410     /* Assign the correct bit position.  */
411     x = ((x & 0x88888888u) >> 3)
412       | ((x & 0x44444444u) >> 1)
413       | ((x & 0x22222222u) << 1)
414       | ((x & 0x11111111u) << 3);
415     return x;
416 #endif
417 }
418 
419 /**
420  * revbit64 - reverse the bits in a 64-bit value.
421  * @x: The value to modify.
422  */
revbit64(uint64_t x)423 static inline uint64_t revbit64(uint64_t x)
424 {
425 #if __has_builtin(__builtin_bitreverse64)
426     return __builtin_bitreverse64(x);
427 #else
428     /* Assign the correct byte position.  */
429     x = bswap64(x);
430     /* Assign the correct nibble position.  */
431     x = ((x & 0xf0f0f0f0f0f0f0f0ull) >> 4)
432       | ((x & 0x0f0f0f0f0f0f0f0full) << 4);
433     /* Assign the correct bit position.  */
434     x = ((x & 0x8888888888888888ull) >> 3)
435       | ((x & 0x4444444444444444ull) >> 1)
436       | ((x & 0x2222222222222222ull) << 1)
437       | ((x & 0x1111111111111111ull) << 3);
438     return x;
439 #endif
440 }
441 
442 /**
443  * Return the absolute value of a 64-bit integer as an unsigned 64-bit value
444  */
uabs64(int64_t v)445 static inline uint64_t uabs64(int64_t v)
446 {
447     return v < 0 ? -v : v;
448 }
449 
450 /**
451  * sadd32_overflow - addition with overflow indication
452  * @x, @y: addends
453  * @ret: Output for sum
454  *
455  * Computes *@ret = @x + @y, and returns true if and only if that
456  * value has been truncated.
457  */
sadd32_overflow(int32_t x,int32_t y,int32_t * ret)458 static inline bool sadd32_overflow(int32_t x, int32_t y, int32_t *ret)
459 {
460     return __builtin_add_overflow(x, y, ret);
461 }
462 
463 /**
464  * sadd64_overflow - addition with overflow indication
465  * @x, @y: addends
466  * @ret: Output for sum
467  *
468  * Computes *@ret = @x + @y, and returns true if and only if that
469  * value has been truncated.
470  */
sadd64_overflow(int64_t x,int64_t y,int64_t * ret)471 static inline bool sadd64_overflow(int64_t x, int64_t y, int64_t *ret)
472 {
473     return __builtin_add_overflow(x, y, ret);
474 }
475 
476 /**
477  * uadd32_overflow - addition with overflow indication
478  * @x, @y: addends
479  * @ret: Output for sum
480  *
481  * Computes *@ret = @x + @y, and returns true if and only if that
482  * value has been truncated.
483  */
uadd32_overflow(uint32_t x,uint32_t y,uint32_t * ret)484 static inline bool uadd32_overflow(uint32_t x, uint32_t y, uint32_t *ret)
485 {
486     return __builtin_add_overflow(x, y, ret);
487 }
488 
489 /**
490  * uadd64_overflow - addition with overflow indication
491  * @x, @y: addends
492  * @ret: Output for sum
493  *
494  * Computes *@ret = @x + @y, and returns true if and only if that
495  * value has been truncated.
496  */
uadd64_overflow(uint64_t x,uint64_t y,uint64_t * ret)497 static inline bool uadd64_overflow(uint64_t x, uint64_t y, uint64_t *ret)
498 {
499     return __builtin_add_overflow(x, y, ret);
500 }
501 
502 /**
503  * ssub32_overflow - subtraction with overflow indication
504  * @x: Minuend
505  * @y: Subtrahend
506  * @ret: Output for difference
507  *
508  * Computes *@ret = @x - @y, and returns true if and only if that
509  * value has been truncated.
510  */
ssub32_overflow(int32_t x,int32_t y,int32_t * ret)511 static inline bool ssub32_overflow(int32_t x, int32_t y, int32_t *ret)
512 {
513     return __builtin_sub_overflow(x, y, ret);
514 }
515 
516 /**
517  * ssub64_overflow - subtraction with overflow indication
518  * @x: Minuend
519  * @y: Subtrahend
520  * @ret: Output for sum
521  *
522  * Computes *@ret = @x - @y, and returns true if and only if that
523  * value has been truncated.
524  */
ssub64_overflow(int64_t x,int64_t y,int64_t * ret)525 static inline bool ssub64_overflow(int64_t x, int64_t y, int64_t *ret)
526 {
527     return __builtin_sub_overflow(x, y, ret);
528 }
529 
530 /**
531  * usub32_overflow - subtraction with overflow indication
532  * @x: Minuend
533  * @y: Subtrahend
534  * @ret: Output for sum
535  *
536  * Computes *@ret = @x - @y, and returns true if and only if that
537  * value has been truncated.
538  */
usub32_overflow(uint32_t x,uint32_t y,uint32_t * ret)539 static inline bool usub32_overflow(uint32_t x, uint32_t y, uint32_t *ret)
540 {
541     return __builtin_sub_overflow(x, y, ret);
542 }
543 
544 /**
545  * usub64_overflow - subtraction with overflow indication
546  * @x: Minuend
547  * @y: Subtrahend
548  * @ret: Output for sum
549  *
550  * Computes *@ret = @x - @y, and returns true if and only if that
551  * value has been truncated.
552  */
usub64_overflow(uint64_t x,uint64_t y,uint64_t * ret)553 static inline bool usub64_overflow(uint64_t x, uint64_t y, uint64_t *ret)
554 {
555     return __builtin_sub_overflow(x, y, ret);
556 }
557 
558 /**
559  * smul32_overflow - multiplication with overflow indication
560  * @x, @y: Input multipliers
561  * @ret: Output for product
562  *
563  * Computes *@ret = @x * @y, and returns true if and only if that
564  * value has been truncated.
565  */
smul32_overflow(int32_t x,int32_t y,int32_t * ret)566 static inline bool smul32_overflow(int32_t x, int32_t y, int32_t *ret)
567 {
568     return __builtin_mul_overflow(x, y, ret);
569 }
570 
571 /**
572  * smul64_overflow - multiplication with overflow indication
573  * @x, @y: Input multipliers
574  * @ret: Output for product
575  *
576  * Computes *@ret = @x * @y, and returns true if and only if that
577  * value has been truncated.
578  */
smul64_overflow(int64_t x,int64_t y,int64_t * ret)579 static inline bool smul64_overflow(int64_t x, int64_t y, int64_t *ret)
580 {
581     return __builtin_mul_overflow(x, y, ret);
582 }
583 
584 /**
585  * umul32_overflow - multiplication with overflow indication
586  * @x, @y: Input multipliers
587  * @ret: Output for product
588  *
589  * Computes *@ret = @x * @y, and returns true if and only if that
590  * value has been truncated.
591  */
umul32_overflow(uint32_t x,uint32_t y,uint32_t * ret)592 static inline bool umul32_overflow(uint32_t x, uint32_t y, uint32_t *ret)
593 {
594     return __builtin_mul_overflow(x, y, ret);
595 }
596 
597 /**
598  * umul64_overflow - multiplication with overflow indication
599  * @x, @y: Input multipliers
600  * @ret: Output for product
601  *
602  * Computes *@ret = @x * @y, and returns true if and only if that
603  * value has been truncated.
604  */
umul64_overflow(uint64_t x,uint64_t y,uint64_t * ret)605 static inline bool umul64_overflow(uint64_t x, uint64_t y, uint64_t *ret)
606 {
607     return __builtin_mul_overflow(x, y, ret);
608 }
609 
610 /*
611  * Unsigned 128x64 multiplication.
612  * Returns true if the result got truncated to 128 bits.
613  * Otherwise, returns false and the multiplication result via plow and phigh.
614  */
mulu128(uint64_t * plow,uint64_t * phigh,uint64_t factor)615 static inline bool mulu128(uint64_t *plow, uint64_t *phigh, uint64_t factor)
616 {
617 #if defined(CONFIG_INT128)
618     bool res;
619     __uint128_t r;
620     __uint128_t f = ((__uint128_t)*phigh << 64) | *plow;
621     res = __builtin_mul_overflow(f, factor, &r);
622 
623     *plow = r;
624     *phigh = r >> 64;
625 
626     return res;
627 #else
628     uint64_t dhi = *phigh;
629     uint64_t dlo = *plow;
630     uint64_t ahi;
631     uint64_t blo, bhi;
632 
633     if (dhi == 0) {
634         mulu64(plow, phigh, dlo, factor);
635         return false;
636     }
637 
638     mulu64(plow, &ahi, dlo, factor);
639     mulu64(&blo, &bhi, dhi, factor);
640 
641     return uadd64_overflow(ahi, blo, phigh) || bhi != 0;
642 #endif
643 }
644 
645 /**
646  * uadd64_carry - addition with carry-in and carry-out
647  * @x, @y: addends
648  * @pcarry: in-out carry value
649  *
650  * Computes @x + @y + *@pcarry, placing the carry-out back
651  * into *@pcarry and returning the 64-bit sum.
652  */
uadd64_carry(uint64_t x,uint64_t y,bool * pcarry)653 static inline uint64_t uadd64_carry(uint64_t x, uint64_t y, bool *pcarry)
654 {
655 #if __has_builtin(__builtin_addcll)
656     unsigned long long c = *pcarry;
657     x = __builtin_addcll(x, y, c, &c);
658     *pcarry = c & 1;
659     return x;
660 #else
661     bool c = *pcarry;
662     /* This is clang's internal expansion of __builtin_addc. */
663     c = uadd64_overflow(x, c, &x);
664     c |= uadd64_overflow(x, y, &x);
665     *pcarry = c;
666     return x;
667 #endif
668 }
669 
670 /**
671  * usub64_borrow - subtraction with borrow-in and borrow-out
672  * @x, @y: addends
673  * @pborrow: in-out borrow value
674  *
675  * Computes @x - @y - *@pborrow, placing the borrow-out back
676  * into *@pborrow and returning the 64-bit sum.
677  */
usub64_borrow(uint64_t x,uint64_t y,bool * pborrow)678 static inline uint64_t usub64_borrow(uint64_t x, uint64_t y, bool *pborrow)
679 {
680 #if __has_builtin(__builtin_subcll) && !defined(BUILTIN_SUBCLL_BROKEN)
681     unsigned long long b = *pborrow;
682     x = __builtin_subcll(x, y, b, &b);
683     *pborrow = b & 1;
684     return x;
685 #else
686     bool b = *pborrow;
687     b = usub64_overflow(x, b, &x);
688     b |= usub64_overflow(x, y, &x);
689     *pborrow = b;
690     return x;
691 #endif
692 }
693 
694 /* Host type specific sizes of these routines.  */
695 
696 #if ULONG_MAX == UINT32_MAX
697 # define clzl   clz32
698 # define ctzl   ctz32
699 # define clol   clo32
700 # define ctol   cto32
701 # define ctpopl ctpop32
702 # define revbitl revbit32
703 #elif ULONG_MAX == UINT64_MAX
704 # define clzl   clz64
705 # define ctzl   ctz64
706 # define clol   clo64
707 # define ctol   cto64
708 # define ctpopl ctpop64
709 # define revbitl revbit64
710 #else
711 # error Unknown sizeof long
712 #endif
713 
is_power_of_2(uint64_t value)714 static inline bool is_power_of_2(uint64_t value)
715 {
716     if (!value) {
717         return false;
718     }
719 
720     return !(value & (value - 1));
721 }
722 
723 /**
724  * Return @value rounded down to the nearest power of two or zero.
725  */
pow2floor(uint64_t value)726 static inline uint64_t pow2floor(uint64_t value)
727 {
728     if (!value) {
729         /* Avoid undefined shift by 64 */
730         return 0;
731     }
732     return 0x8000000000000000ull >> clz64(value);
733 }
734 
735 /*
736  * Return @value rounded up to the nearest power of two modulo 2^64.
737  * This is *zero* for @value > 2^63, so be careful.
738  */
pow2ceil(uint64_t value)739 static inline uint64_t pow2ceil(uint64_t value)
740 {
741     int n = clz64(value - 1);
742 
743     if (!n) {
744         /*
745          * @value - 1 has no leading zeroes, thus @value - 1 >= 2^63
746          * Therefore, either @value == 0 or @value > 2^63.
747          * If it's 0, return 1, else return 0.
748          */
749         return !value;
750     }
751     return 0x8000000000000000ull >> (n - 1);
752 }
753 
pow2roundup32(uint32_t x)754 static inline uint32_t pow2roundup32(uint32_t x)
755 {
756     x |= (x >> 1);
757     x |= (x >> 2);
758     x |= (x >> 4);
759     x |= (x >> 8);
760     x |= (x >> 16);
761     return x + 1;
762 }
763 
764 /**
765  * urshift - 128-bit Unsigned Right Shift.
766  * @plow: in/out - lower 64-bit integer.
767  * @phigh: in/out - higher 64-bit integer.
768  * @shift: in - bytes to shift, between 0 and 127.
769  *
770  * Result is zero-extended and stored in plow/phigh, which are
771  * input/output variables. Shift values outside the range will
772  * be mod to 128. In other words, the caller is responsible to
773  * verify/assert both the shift range and plow/phigh pointers.
774  */
775 void urshift(uint64_t *plow, uint64_t *phigh, int32_t shift);
776 
777 /**
778  * ulshift - 128-bit Unsigned Left Shift.
779  * @plow: in/out - lower 64-bit integer.
780  * @phigh: in/out - higher 64-bit integer.
781  * @shift: in - bytes to shift, between 0 and 127.
782  * @overflow: out - true if any 1-bit is shifted out.
783  *
784  * Result is zero-extended and stored in plow/phigh, which are
785  * input/output variables. Shift values outside the range will
786  * be mod to 128. In other words, the caller is responsible to
787  * verify/assert both the shift range and plow/phigh pointers.
788  */
789 void ulshift(uint64_t *plow, uint64_t *phigh, int32_t shift, bool *overflow);
790 
791 /* From the GNU Multi Precision Library - longlong.h __udiv_qrnnd
792  * (https://gmplib.org/repo/gmp/file/tip/longlong.h)
793  *
794  * Licensed under the GPLv2/LGPLv3
795  */
udiv_qrnnd(uint64_t * r,uint64_t n1,uint64_t n0,uint64_t d)796 static inline uint64_t udiv_qrnnd(uint64_t *r, uint64_t n1,
797                                   uint64_t n0, uint64_t d)
798 {
799 #if defined(__x86_64__)
800     uint64_t q;
801     asm("divq %4" : "=a"(q), "=d"(*r) : "0"(n0), "1"(n1), "rm"(d));
802     return q;
803 #elif defined(__s390x__) && !defined(__clang__)
804     /* Need to use a TImode type to get an even register pair for DLGR.  */
805     unsigned __int128 n = (unsigned __int128)n1 << 64 | n0;
806     asm("dlgr %0, %1" : "+r"(n) : "r"(d));
807     *r = n >> 64;
808     return n;
809 #elif defined(_ARCH_PPC64) && defined(_ARCH_PWR7)
810     /* From Power ISA 2.06, programming note for divdeu.  */
811     uint64_t q1, q2, Q, r1, r2, R;
812     asm("divdeu %0,%2,%4; divdu %1,%3,%4"
813         : "=&r"(q1), "=r"(q2)
814         : "r"(n1), "r"(n0), "r"(d));
815     r1 = -(q1 * d);         /* low part of (n1<<64) - (q1 * d) */
816     r2 = n0 - (q2 * d);
817     Q = q1 + q2;
818     R = r1 + r2;
819     if (R >= d || R < r2) { /* overflow implies R > d */
820         Q += 1;
821         R -= d;
822     }
823     *r = R;
824     return Q;
825 #else
826     uint64_t d0, d1, q0, q1, r1, r0, m;
827 
828     d0 = (uint32_t)d;
829     d1 = d >> 32;
830 
831     r1 = n1 % d1;
832     q1 = n1 / d1;
833     m = q1 * d0;
834     r1 = (r1 << 32) | (n0 >> 32);
835     if (r1 < m) {
836         q1 -= 1;
837         r1 += d;
838         if (r1 >= d) {
839             if (r1 < m) {
840                 q1 -= 1;
841                 r1 += d;
842             }
843         }
844     }
845     r1 -= m;
846 
847     r0 = r1 % d1;
848     q0 = r1 / d1;
849     m = q0 * d0;
850     r0 = (r0 << 32) | (uint32_t)n0;
851     if (r0 < m) {
852         q0 -= 1;
853         r0 += d;
854         if (r0 >= d) {
855             if (r0 < m) {
856                 q0 -= 1;
857                 r0 += d;
858             }
859         }
860     }
861     r0 -= m;
862 
863     *r = r0;
864     return (q1 << 32) | q0;
865 #endif
866 }
867 
868 Int128 divu256(Int128 *plow, Int128 *phigh, Int128 divisor);
869 Int128 divs256(Int128 *plow, Int128 *phigh, Int128 divisor);
870 #endif
871