xref: /openbmc/qemu/util/host-utils.c (revision 2e1cacfb)
1 /*
2  * Utility compute operations used by translated code.
3  *
4  * Copyright (c) 2003 Fabrice Bellard
5  * Copyright (c) 2007 Aurelien Jarno
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy
8  * of this software and associated documentation files (the "Software"), to deal
9  * in the Software without restriction, including without limitation the rights
10  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11  * copies of the Software, and to permit persons to whom the Software is
12  * furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in
15  * all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23  * THE SOFTWARE.
24  */
25 
26 #include "qemu/osdep.h"
27 #include "qemu/host-utils.h"
28 
29 #ifndef CONFIG_INT128
30 /* Long integer helpers */
31 static inline void mul64(uint64_t *plow, uint64_t *phigh,
32                          uint64_t a, uint64_t b)
33 {
34     typedef union {
35         uint64_t ll;
36         struct {
37 #if HOST_BIG_ENDIAN
38             uint32_t high, low;
39 #else
40             uint32_t low, high;
41 #endif
42         } l;
43     } LL;
44     LL rl, rm, rn, rh, a0, b0;
45     uint64_t c;
46 
47     a0.ll = a;
48     b0.ll = b;
49 
50     rl.ll = (uint64_t)a0.l.low * b0.l.low;
51     rm.ll = (uint64_t)a0.l.low * b0.l.high;
52     rn.ll = (uint64_t)a0.l.high * b0.l.low;
53     rh.ll = (uint64_t)a0.l.high * b0.l.high;
54 
55     c = (uint64_t)rl.l.high + rm.l.low + rn.l.low;
56     rl.l.high = c;
57     c >>= 32;
58     c = c + rm.l.high + rn.l.high + rh.l.low;
59     rh.l.low = c;
60     rh.l.high += (uint32_t)(c >> 32);
61 
62     *plow = rl.ll;
63     *phigh = rh.ll;
64 }
65 
66 /* Unsigned 64x64 -> 128 multiplication */
67 void mulu64 (uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
68 {
69     mul64(plow, phigh, a, b);
70 }
71 
72 /* Signed 64x64 -> 128 multiplication */
73 void muls64 (uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b)
74 {
75     uint64_t rh;
76 
77     mul64(plow, &rh, a, b);
78 
79     /* Adjust for signs.  */
80     if (b < 0) {
81         rh -= a;
82     }
83     if (a < 0) {
84         rh -= b;
85     }
86     *phigh = rh;
87 }
88 
89 /*
90  * Unsigned 128-by-64 division.
91  * Returns the remainder.
92  * Returns quotient via plow and phigh.
93  * Also returns the remainder via the function return value.
94  */
95 uint64_t divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
96 {
97     uint64_t dhi = *phigh;
98     uint64_t dlo = *plow;
99     uint64_t rem, dhighest;
100     int sh;
101 
102     if (divisor == 0 || dhi == 0) {
103         *plow  = dlo / divisor;
104         *phigh = 0;
105         return dlo % divisor;
106     } else {
107         sh = clz64(divisor);
108 
109         if (dhi < divisor) {
110             if (sh != 0) {
111                 /* normalize the divisor, shifting the dividend accordingly */
112                 divisor <<= sh;
113                 dhi = (dhi << sh) | (dlo >> (64 - sh));
114                 dlo <<= sh;
115             }
116 
117             *phigh = 0;
118             *plow = udiv_qrnnd(&rem, dhi, dlo, divisor);
119         } else {
120             if (sh != 0) {
121                 /* normalize the divisor, shifting the dividend accordingly */
122                 divisor <<= sh;
123                 dhighest = dhi >> (64 - sh);
124                 dhi = (dhi << sh) | (dlo >> (64 - sh));
125                 dlo <<= sh;
126 
127                 *phigh = udiv_qrnnd(&dhi, dhighest, dhi, divisor);
128             } else {
129                 /**
130                  * dhi >= divisor
131                  * Since the MSB of divisor is set (sh == 0),
132                  * (dhi - divisor) < divisor
133                  *
134                  * Thus, the high part of the quotient is 1, and we can
135                  * calculate the low part with a single call to udiv_qrnnd
136                  * after subtracting divisor from dhi
137                  */
138                 dhi -= divisor;
139                 *phigh = 1;
140             }
141 
142             *plow = udiv_qrnnd(&rem, dhi, dlo, divisor);
143         }
144 
145         /*
146          * since the dividend/divisor might have been normalized,
147          * the remainder might also have to be shifted back
148          */
149         return rem >> sh;
150     }
151 }
152 
153 /*
154  * Signed 128-by-64 division.
155  * Returns quotient via plow and phigh.
156  * Also returns the remainder via the function return value.
157  */
158 int64_t divs128(uint64_t *plow, int64_t *phigh, int64_t divisor)
159 {
160     bool neg_quotient = false, neg_remainder = false;
161     uint64_t unsig_hi = *phigh, unsig_lo = *plow;
162     uint64_t rem;
163 
164     if (*phigh < 0) {
165         neg_quotient = !neg_quotient;
166         neg_remainder = !neg_remainder;
167 
168         if (unsig_lo == 0) {
169             unsig_hi = -unsig_hi;
170         } else {
171             unsig_hi = ~unsig_hi;
172             unsig_lo = -unsig_lo;
173         }
174     }
175 
176     if (divisor < 0) {
177         neg_quotient = !neg_quotient;
178 
179         divisor = -divisor;
180     }
181 
182     rem = divu128(&unsig_lo, &unsig_hi, (uint64_t)divisor);
183 
184     if (neg_quotient) {
185         if (unsig_lo == 0) {
186             *phigh = -unsig_hi;
187             *plow = 0;
188         } else {
189             *phigh = ~unsig_hi;
190             *plow = -unsig_lo;
191         }
192     } else {
193         *phigh = unsig_hi;
194         *plow = unsig_lo;
195     }
196 
197     if (neg_remainder) {
198         return -rem;
199     } else {
200         return rem;
201     }
202 }
203 #endif
204 
205 /**
206  * urshift - 128-bit Unsigned Right Shift.
207  * @plow: in/out - lower 64-bit integer.
208  * @phigh: in/out - higher 64-bit integer.
209  * @shift: in - bytes to shift, between 0 and 127.
210  *
211  * Result is zero-extended and stored in plow/phigh, which are
212  * input/output variables. Shift values outside the range will
213  * be mod to 128. In other words, the caller is responsible to
214  * verify/assert both the shift range and plow/phigh pointers.
215  */
216 void urshift(uint64_t *plow, uint64_t *phigh, int32_t shift)
217 {
218     shift &= 127;
219     if (shift == 0) {
220         return;
221     }
222 
223     uint64_t h = *phigh >> (shift & 63);
224     if (shift >= 64) {
225         *plow = h;
226         *phigh = 0;
227     } else {
228         *plow = (*plow >> (shift & 63)) | (*phigh << (64 - (shift & 63)));
229         *phigh = h;
230     }
231 }
232 
233 /**
234  * ulshift - 128-bit Unsigned Left Shift.
235  * @plow: in/out - lower 64-bit integer.
236  * @phigh: in/out - higher 64-bit integer.
237  * @shift: in - bytes to shift, between 0 and 127.
238  * @overflow: out - true if any 1-bit is shifted out.
239  *
240  * Result is zero-extended and stored in plow/phigh, which are
241  * input/output variables. Shift values outside the range will
242  * be mod to 128. In other words, the caller is responsible to
243  * verify/assert both the shift range and plow/phigh pointers.
244  */
245 void ulshift(uint64_t *plow, uint64_t *phigh, int32_t shift, bool *overflow)
246 {
247     uint64_t low = *plow;
248     uint64_t high = *phigh;
249 
250     shift &= 127;
251     if (shift == 0) {
252         return;
253     }
254 
255     /* check if any bit will be shifted out */
256     urshift(&low, &high, 128 - shift);
257     if (low | high) {
258         *overflow = true;
259     }
260 
261     if (shift >= 64) {
262         *phigh = *plow << (shift & 63);
263         *plow = 0;
264     } else {
265         *phigh = (*plow >> (64 - (shift & 63))) | (*phigh << (shift & 63));
266         *plow = *plow << shift;
267     }
268 }
269 
270 /*
271  * Unsigned 256-by-128 division.
272  * Returns the remainder via r.
273  * Returns lower 128 bit of quotient.
274  * Needs a normalized divisor (most significant bit set to 1).
275  *
276  * Adapted from include/qemu/host-utils.h udiv_qrnnd,
277  * from the GNU Multi Precision Library - longlong.h __udiv_qrnnd
278  * (https://gmplib.org/repo/gmp/file/tip/longlong.h)
279  *
280  * Licensed under the GPLv2/LGPLv3
281  */
282 static Int128 udiv256_qrnnd(Int128 *r, Int128 n1, Int128 n0, Int128 d)
283 {
284     Int128 d0, d1, q0, q1, r1, r0, m;
285     uint64_t mp0, mp1;
286 
287     d0 = int128_make64(int128_getlo(d));
288     d1 = int128_make64(int128_gethi(d));
289 
290     r1 = int128_remu(n1, d1);
291     q1 = int128_divu(n1, d1);
292     mp0 = int128_getlo(q1);
293     mp1 = int128_gethi(q1);
294     mulu128(&mp0, &mp1, int128_getlo(d0));
295     m = int128_make128(mp0, mp1);
296     r1 = int128_make128(int128_gethi(n0), int128_getlo(r1));
297     if (int128_ult(r1, m)) {
298         q1 = int128_sub(q1, int128_one());
299         r1 = int128_add(r1, d);
300         if (int128_uge(r1, d)) {
301             if (int128_ult(r1, m)) {
302                 q1 = int128_sub(q1, int128_one());
303                 r1 = int128_add(r1, d);
304             }
305         }
306     }
307     r1 = int128_sub(r1, m);
308 
309     r0 = int128_remu(r1, d1);
310     q0 = int128_divu(r1, d1);
311     mp0 = int128_getlo(q0);
312     mp1 = int128_gethi(q0);
313     mulu128(&mp0, &mp1, int128_getlo(d0));
314     m = int128_make128(mp0, mp1);
315     r0 = int128_make128(int128_getlo(n0), int128_getlo(r0));
316     if (int128_ult(r0, m)) {
317         q0 = int128_sub(q0, int128_one());
318         r0 = int128_add(r0, d);
319         if (int128_uge(r0, d)) {
320             if (int128_ult(r0, m)) {
321                 q0 = int128_sub(q0, int128_one());
322                 r0 = int128_add(r0, d);
323             }
324         }
325     }
326     r0 = int128_sub(r0, m);
327 
328     *r = r0;
329     return int128_or(int128_lshift(q1, 64), q0);
330 }
331 
332 /*
333  * Unsigned 256-by-128 division.
334  * Returns the remainder.
335  * Returns quotient via plow and phigh.
336  * Also returns the remainder via the function return value.
337  */
338 Int128 divu256(Int128 *plow, Int128 *phigh, Int128 divisor)
339 {
340     Int128 dhi = *phigh;
341     Int128 dlo = *plow;
342     Int128 rem, dhighest;
343     int sh;
344 
345     if (!int128_nz(divisor) || !int128_nz(dhi)) {
346         *plow  = int128_divu(dlo, divisor);
347         *phigh = int128_zero();
348         return int128_remu(dlo, divisor);
349     } else {
350         sh = clz128(divisor);
351 
352         if (int128_ult(dhi, divisor)) {
353             if (sh != 0) {
354                 /* normalize the divisor, shifting the dividend accordingly */
355                 divisor = int128_lshift(divisor, sh);
356                 dhi = int128_or(int128_lshift(dhi, sh),
357                                 int128_urshift(dlo, (128 - sh)));
358                 dlo = int128_lshift(dlo, sh);
359             }
360 
361             *phigh = int128_zero();
362             *plow = udiv256_qrnnd(&rem, dhi, dlo, divisor);
363         } else {
364             if (sh != 0) {
365                 /* normalize the divisor, shifting the dividend accordingly */
366                 divisor = int128_lshift(divisor, sh);
367                 dhighest = int128_rshift(dhi, (128 - sh));
368                 dhi = int128_or(int128_lshift(dhi, sh),
369                                 int128_urshift(dlo, (128 - sh)));
370                 dlo = int128_lshift(dlo, sh);
371 
372                 *phigh = udiv256_qrnnd(&dhi, dhighest, dhi, divisor);
373             } else {
374                 /*
375                  * dhi >= divisor
376                  * Since the MSB of divisor is set (sh == 0),
377                  * (dhi - divisor) < divisor
378                  *
379                  * Thus, the high part of the quotient is 1, and we can
380                  * calculate the low part with a single call to udiv_qrnnd
381                  * after subtracting divisor from dhi
382                  */
383                 dhi = int128_sub(dhi, divisor);
384                 *phigh = int128_one();
385             }
386 
387             *plow = udiv256_qrnnd(&rem, dhi, dlo, divisor);
388         }
389 
390         /*
391          * since the dividend/divisor might have been normalized,
392          * the remainder might also have to be shifted back
393          */
394         rem = int128_urshift(rem, sh);
395         return rem;
396     }
397 }
398 
399 /*
400  * Signed 256-by-128 division.
401  * Returns quotient via plow and phigh.
402  * Also returns the remainder via the function return value.
403  */
404 Int128 divs256(Int128 *plow, Int128 *phigh, Int128 divisor)
405 {
406     bool neg_quotient = false, neg_remainder = false;
407     Int128 unsig_hi = *phigh, unsig_lo = *plow;
408     Int128 rem;
409 
410     if (!int128_nonneg(*phigh)) {
411         neg_quotient = !neg_quotient;
412         neg_remainder = !neg_remainder;
413 
414         if (!int128_nz(unsig_lo)) {
415             unsig_hi = int128_neg(unsig_hi);
416         } else {
417             unsig_hi = int128_not(unsig_hi);
418             unsig_lo = int128_neg(unsig_lo);
419         }
420     }
421 
422     if (!int128_nonneg(divisor)) {
423         neg_quotient = !neg_quotient;
424 
425         divisor = int128_neg(divisor);
426     }
427 
428     rem = divu256(&unsig_lo, &unsig_hi, divisor);
429 
430     if (neg_quotient) {
431         if (!int128_nz(unsig_lo)) {
432             *phigh = int128_neg(unsig_hi);
433             *plow = int128_zero();
434         } else {
435             *phigh = int128_not(unsig_hi);
436             *plow = int128_neg(unsig_lo);
437         }
438     } else {
439         *phigh = unsig_hi;
440         *plow = unsig_lo;
441     }
442 
443     if (neg_remainder) {
444         return int128_neg(rem);
445     } else {
446         return rem;
447     }
448 }
449