xref: /openbmc/qemu/util/hbitmap.c (revision 5242ef88)
1 /*
2  * Hierarchical Bitmap Data Type
3  *
4  * Copyright Red Hat, Inc., 2012
5  *
6  * Author: Paolo Bonzini <pbonzini@redhat.com>
7  *
8  * This work is licensed under the terms of the GNU GPL, version 2 or
9  * later.  See the COPYING file in the top-level directory.
10  */
11 
12 #include "qemu/osdep.h"
13 #include "qemu/hbitmap.h"
14 #include "qemu/host-utils.h"
15 #include "trace.h"
16 #include "crypto/hash.h"
17 
18 /* HBitmaps provides an array of bits.  The bits are stored as usual in an
19  * array of unsigned longs, but HBitmap is also optimized to provide fast
20  * iteration over set bits; going from one bit to the next is O(logB n)
21  * worst case, with B = sizeof(long) * CHAR_BIT: the result is low enough
22  * that the number of levels is in fact fixed.
23  *
24  * In order to do this, it stacks multiple bitmaps with progressively coarser
25  * granularity; in all levels except the last, bit N is set iff the N-th
26  * unsigned long is nonzero in the immediately next level.  When iteration
27  * completes on the last level it can examine the 2nd-last level to quickly
28  * skip entire words, and even do so recursively to skip blocks of 64 words or
29  * powers thereof (32 on 32-bit machines).
30  *
31  * Given an index in the bitmap, it can be split in group of bits like
32  * this (for the 64-bit case):
33  *
34  *   bits 0-57 => word in the last bitmap     | bits 58-63 => bit in the word
35  *   bits 0-51 => word in the 2nd-last bitmap | bits 52-57 => bit in the word
36  *   bits 0-45 => word in the 3rd-last bitmap | bits 46-51 => bit in the word
37  *
38  * So it is easy to move up simply by shifting the index right by
39  * log2(BITS_PER_LONG) bits.  To move down, you shift the index left
40  * similarly, and add the word index within the group.  Iteration uses
41  * ffs (find first set bit) to find the next word to examine; this
42  * operation can be done in constant time in most current architectures.
43  *
44  * Setting or clearing a range of m bits on all levels, the work to perform
45  * is O(m + m/W + m/W^2 + ...), which is O(m) like on a regular bitmap.
46  *
47  * When iterating on a bitmap, each bit (on any level) is only visited
48  * once.  Hence, The total cost of visiting a bitmap with m bits in it is
49  * the number of bits that are set in all bitmaps.  Unless the bitmap is
50  * extremely sparse, this is also O(m + m/W + m/W^2 + ...), so the amortized
51  * cost of advancing from one bit to the next is usually constant (worst case
52  * O(logB n) as in the non-amortized complexity).
53  */
54 
55 struct HBitmap {
56     /*
57      * Size of the bitmap, as requested in hbitmap_alloc or in hbitmap_truncate.
58      */
59     uint64_t orig_size;
60 
61     /* Number of total bits in the bottom level.  */
62     uint64_t size;
63 
64     /* Number of set bits in the bottom level.  */
65     uint64_t count;
66 
67     /* A scaling factor.  Given a granularity of G, each bit in the bitmap will
68      * will actually represent a group of 2^G elements.  Each operation on a
69      * range of bits first rounds the bits to determine which group they land
70      * in, and then affect the entire page; iteration will only visit the first
71      * bit of each group.  Here is an example of operations in a size-16,
72      * granularity-1 HBitmap:
73      *
74      *    initial state            00000000
75      *    set(start=0, count=9)    11111000 (iter: 0, 2, 4, 6, 8)
76      *    reset(start=1, count=3)  00111000 (iter: 4, 6, 8)
77      *    set(start=9, count=2)    00111100 (iter: 4, 6, 8, 10)
78      *    reset(start=5, count=5)  00000000
79      *
80      * From an implementation point of view, when setting or resetting bits,
81      * the bitmap will scale bit numbers right by this amount of bits.  When
82      * iterating, the bitmap will scale bit numbers left by this amount of
83      * bits.
84      */
85     int granularity;
86 
87     /* A meta dirty bitmap to track the dirtiness of bits in this HBitmap. */
88     HBitmap *meta;
89 
90     /* A number of progressively less coarse bitmaps (i.e. level 0 is the
91      * coarsest).  Each bit in level N represents a word in level N+1 that
92      * has a set bit, except the last level where each bit represents the
93      * actual bitmap.
94      *
95      * Note that all bitmaps have the same number of levels.  Even a 1-bit
96      * bitmap will still allocate HBITMAP_LEVELS arrays.
97      */
98     unsigned long *levels[HBITMAP_LEVELS];
99 
100     /* The length of each levels[] array. */
101     uint64_t sizes[HBITMAP_LEVELS];
102 };
103 
104 /* Advance hbi to the next nonzero word and return it.  hbi->pos
105  * is updated.  Returns zero if we reach the end of the bitmap.
106  */
107 static unsigned long hbitmap_iter_skip_words(HBitmapIter *hbi)
108 {
109     size_t pos = hbi->pos;
110     const HBitmap *hb = hbi->hb;
111     unsigned i = HBITMAP_LEVELS - 1;
112 
113     unsigned long cur;
114     do {
115         i--;
116         pos >>= BITS_PER_LEVEL;
117         cur = hbi->cur[i] & hb->levels[i][pos];
118     } while (cur == 0);
119 
120     /* Check for end of iteration.  We always use fewer than BITS_PER_LONG
121      * bits in the level 0 bitmap; thus we can repurpose the most significant
122      * bit as a sentinel.  The sentinel is set in hbitmap_alloc and ensures
123      * that the above loop ends even without an explicit check on i.
124      */
125 
126     if (i == 0 && cur == (1UL << (BITS_PER_LONG - 1))) {
127         return 0;
128     }
129     for (; i < HBITMAP_LEVELS - 1; i++) {
130         /* Shift back pos to the left, matching the right shifts above.
131          * The index of this word's least significant set bit provides
132          * the low-order bits.
133          */
134         assert(cur);
135         pos = (pos << BITS_PER_LEVEL) + ctzl(cur);
136         hbi->cur[i] = cur & (cur - 1);
137 
138         /* Set up next level for iteration.  */
139         cur = hb->levels[i + 1][pos];
140     }
141 
142     hbi->pos = pos;
143     trace_hbitmap_iter_skip_words(hbi->hb, hbi, pos, cur);
144 
145     assert(cur);
146     return cur;
147 }
148 
149 int64_t hbitmap_iter_next(HBitmapIter *hbi)
150 {
151     unsigned long cur = hbi->cur[HBITMAP_LEVELS - 1] &
152             hbi->hb->levels[HBITMAP_LEVELS - 1][hbi->pos];
153     int64_t item;
154 
155     if (cur == 0) {
156         cur = hbitmap_iter_skip_words(hbi);
157         if (cur == 0) {
158             return -1;
159         }
160     }
161 
162     /* The next call will resume work from the next bit.  */
163     hbi->cur[HBITMAP_LEVELS - 1] = cur & (cur - 1);
164     item = ((uint64_t)hbi->pos << BITS_PER_LEVEL) + ctzl(cur);
165 
166     return item << hbi->granularity;
167 }
168 
169 void hbitmap_iter_init(HBitmapIter *hbi, const HBitmap *hb, uint64_t first)
170 {
171     unsigned i, bit;
172     uint64_t pos;
173 
174     hbi->hb = hb;
175     pos = first >> hb->granularity;
176     assert(pos < hb->size);
177     hbi->pos = pos >> BITS_PER_LEVEL;
178     hbi->granularity = hb->granularity;
179 
180     for (i = HBITMAP_LEVELS; i-- > 0; ) {
181         bit = pos & (BITS_PER_LONG - 1);
182         pos >>= BITS_PER_LEVEL;
183 
184         /* Drop bits representing items before first.  */
185         hbi->cur[i] = hb->levels[i][pos] & ~((1UL << bit) - 1);
186 
187         /* We have already added level i+1, so the lowest set bit has
188          * been processed.  Clear it.
189          */
190         if (i != HBITMAP_LEVELS - 1) {
191             hbi->cur[i] &= ~(1UL << bit);
192         }
193     }
194 }
195 
196 int64_t hbitmap_next_dirty(const HBitmap *hb, int64_t start, int64_t count)
197 {
198     HBitmapIter hbi;
199     int64_t first_dirty_off;
200     uint64_t end;
201 
202     assert(start >= 0 && count >= 0);
203 
204     if (start >= hb->orig_size || count == 0) {
205         return -1;
206     }
207 
208     end = count > hb->orig_size - start ? hb->orig_size : start + count;
209 
210     hbitmap_iter_init(&hbi, hb, start);
211     first_dirty_off = hbitmap_iter_next(&hbi);
212 
213     if (first_dirty_off < 0 || first_dirty_off >= end) {
214         return -1;
215     }
216 
217     return MAX(start, first_dirty_off);
218 }
219 
220 int64_t hbitmap_next_zero(const HBitmap *hb, int64_t start, int64_t count)
221 {
222     size_t pos = (start >> hb->granularity) >> BITS_PER_LEVEL;
223     unsigned long *last_lev = hb->levels[HBITMAP_LEVELS - 1];
224     unsigned long cur = last_lev[pos];
225     unsigned start_bit_offset;
226     uint64_t end_bit, sz;
227     int64_t res;
228 
229     assert(start >= 0 && count >= 0);
230 
231     if (start >= hb->orig_size || count == 0) {
232         return -1;
233     }
234 
235     end_bit = count > hb->orig_size - start ?
236                 hb->size :
237                 ((start + count - 1) >> hb->granularity) + 1;
238     sz = (end_bit + BITS_PER_LONG - 1) >> BITS_PER_LEVEL;
239 
240     /* There may be some zero bits in @cur before @start. We are not interested
241      * in them, let's set them.
242      */
243     start_bit_offset = (start >> hb->granularity) & (BITS_PER_LONG - 1);
244     cur |= (1UL << start_bit_offset) - 1;
245     assert((start >> hb->granularity) < hb->size);
246 
247     if (cur == (unsigned long)-1) {
248         do {
249             pos++;
250         } while (pos < sz && last_lev[pos] == (unsigned long)-1);
251 
252         if (pos >= sz) {
253             return -1;
254         }
255 
256         cur = last_lev[pos];
257     }
258 
259     res = (pos << BITS_PER_LEVEL) + ctol(cur);
260     if (res >= end_bit) {
261         return -1;
262     }
263 
264     res = res << hb->granularity;
265     if (res < start) {
266         assert(((start - res) >> hb->granularity) == 0);
267         return start;
268     }
269 
270     return res;
271 }
272 
273 bool hbitmap_next_dirty_area(const HBitmap *hb, int64_t start, int64_t end,
274                              int64_t max_dirty_count,
275                              int64_t *dirty_start, int64_t *dirty_count)
276 {
277     int64_t next_zero;
278 
279     assert(start >= 0 && end >= 0 && max_dirty_count > 0);
280 
281     end = MIN(end, hb->orig_size);
282     if (start >= end) {
283         return false;
284     }
285 
286     start = hbitmap_next_dirty(hb, start, end - start);
287     if (start < 0) {
288         return false;
289     }
290 
291     end = start + MIN(end - start, max_dirty_count);
292 
293     next_zero = hbitmap_next_zero(hb, start, end - start);
294     if (next_zero >= 0) {
295         end = next_zero;
296     }
297 
298     *dirty_start = start;
299     *dirty_count = end - start;
300 
301     return true;
302 }
303 
304 bool hbitmap_status(const HBitmap *hb, int64_t start, int64_t count,
305                     int64_t *pnum)
306 {
307     int64_t next_dirty, next_zero;
308 
309     assert(start >= 0);
310     assert(count > 0);
311     assert(start + count <= hb->orig_size);
312 
313     next_dirty = hbitmap_next_dirty(hb, start, count);
314     if (next_dirty == -1) {
315         *pnum = count;
316         return false;
317     }
318 
319     if (next_dirty > start) {
320         *pnum = next_dirty - start;
321         return false;
322     }
323 
324     assert(next_dirty == start);
325 
326     next_zero = hbitmap_next_zero(hb, start, count);
327     if (next_zero == -1) {
328         *pnum = count;
329         return true;
330     }
331 
332     assert(next_zero > start);
333     *pnum = next_zero - start;
334     return false;
335 }
336 
337 bool hbitmap_empty(const HBitmap *hb)
338 {
339     return hb->count == 0;
340 }
341 
342 int hbitmap_granularity(const HBitmap *hb)
343 {
344     return hb->granularity;
345 }
346 
347 uint64_t hbitmap_count(const HBitmap *hb)
348 {
349     return hb->count << hb->granularity;
350 }
351 
352 /**
353  * hbitmap_iter_next_word:
354  * @hbi: HBitmapIter to operate on.
355  * @p_cur: Location where to store the next non-zero word.
356  *
357  * Return the index of the next nonzero word that is set in @hbi's
358  * associated HBitmap, and set *p_cur to the content of that word
359  * (bits before the index that was passed to hbitmap_iter_init are
360  * trimmed on the first call).  Return -1, and set *p_cur to zero,
361  * if all remaining words are zero.
362  */
363 static size_t hbitmap_iter_next_word(HBitmapIter *hbi, unsigned long *p_cur)
364 {
365     unsigned long cur = hbi->cur[HBITMAP_LEVELS - 1];
366 
367     if (cur == 0) {
368         cur = hbitmap_iter_skip_words(hbi);
369         if (cur == 0) {
370             *p_cur = 0;
371             return -1;
372         }
373     }
374 
375     /* The next call will resume work from the next word.  */
376     hbi->cur[HBITMAP_LEVELS - 1] = 0;
377     *p_cur = cur;
378     return hbi->pos;
379 }
380 
381 /* Count the number of set bits between start and end, not accounting for
382  * the granularity.  Also an example of how to use hbitmap_iter_next_word.
383  */
384 static uint64_t hb_count_between(HBitmap *hb, uint64_t start, uint64_t last)
385 {
386     HBitmapIter hbi;
387     uint64_t count = 0;
388     uint64_t end = last + 1;
389     unsigned long cur;
390     size_t pos;
391 
392     hbitmap_iter_init(&hbi, hb, start << hb->granularity);
393     for (;;) {
394         pos = hbitmap_iter_next_word(&hbi, &cur);
395         if (pos >= (end >> BITS_PER_LEVEL)) {
396             break;
397         }
398         count += ctpopl(cur);
399     }
400 
401     if (pos == (end >> BITS_PER_LEVEL)) {
402         /* Drop bits representing the END-th and subsequent items.  */
403         int bit = end & (BITS_PER_LONG - 1);
404         cur &= (1UL << bit) - 1;
405         count += ctpopl(cur);
406     }
407 
408     return count;
409 }
410 
411 /* Setting starts at the last layer and propagates up if an element
412  * changes.
413  */
414 static inline bool hb_set_elem(unsigned long *elem, uint64_t start, uint64_t last)
415 {
416     unsigned long mask;
417     unsigned long old;
418 
419     assert((last >> BITS_PER_LEVEL) == (start >> BITS_PER_LEVEL));
420     assert(start <= last);
421 
422     mask = 2UL << (last & (BITS_PER_LONG - 1));
423     mask -= 1UL << (start & (BITS_PER_LONG - 1));
424     old = *elem;
425     *elem |= mask;
426     return old != *elem;
427 }
428 
429 /* The recursive workhorse (the depth is limited to HBITMAP_LEVELS)...
430  * Returns true if at least one bit is changed. */
431 static bool hb_set_between(HBitmap *hb, int level, uint64_t start,
432                            uint64_t last)
433 {
434     size_t pos = start >> BITS_PER_LEVEL;
435     size_t lastpos = last >> BITS_PER_LEVEL;
436     bool changed = false;
437     size_t i;
438 
439     i = pos;
440     if (i < lastpos) {
441         uint64_t next = (start | (BITS_PER_LONG - 1)) + 1;
442         changed |= hb_set_elem(&hb->levels[level][i], start, next - 1);
443         for (;;) {
444             start = next;
445             next += BITS_PER_LONG;
446             if (++i == lastpos) {
447                 break;
448             }
449             changed |= (hb->levels[level][i] == 0);
450             hb->levels[level][i] = ~0UL;
451         }
452     }
453     changed |= hb_set_elem(&hb->levels[level][i], start, last);
454 
455     /* If there was any change in this layer, we may have to update
456      * the one above.
457      */
458     if (level > 0 && changed) {
459         hb_set_between(hb, level - 1, pos, lastpos);
460     }
461     return changed;
462 }
463 
464 void hbitmap_set(HBitmap *hb, uint64_t start, uint64_t count)
465 {
466     /* Compute range in the last layer.  */
467     uint64_t first, n;
468     uint64_t last = start + count - 1;
469 
470     if (count == 0) {
471         return;
472     }
473 
474     trace_hbitmap_set(hb, start, count,
475                       start >> hb->granularity, last >> hb->granularity);
476 
477     first = start >> hb->granularity;
478     last >>= hb->granularity;
479     assert(last < hb->size);
480     n = last - first + 1;
481 
482     hb->count += n - hb_count_between(hb, first, last);
483     if (hb_set_between(hb, HBITMAP_LEVELS - 1, first, last) &&
484         hb->meta) {
485         hbitmap_set(hb->meta, start, count);
486     }
487 }
488 
489 /* Resetting works the other way round: propagate up if the new
490  * value is zero.
491  */
492 static inline bool hb_reset_elem(unsigned long *elem, uint64_t start, uint64_t last)
493 {
494     unsigned long mask;
495     bool blanked;
496 
497     assert((last >> BITS_PER_LEVEL) == (start >> BITS_PER_LEVEL));
498     assert(start <= last);
499 
500     mask = 2UL << (last & (BITS_PER_LONG - 1));
501     mask -= 1UL << (start & (BITS_PER_LONG - 1));
502     blanked = *elem != 0 && ((*elem & ~mask) == 0);
503     *elem &= ~mask;
504     return blanked;
505 }
506 
507 /* The recursive workhorse (the depth is limited to HBITMAP_LEVELS)...
508  * Returns true if at least one bit is changed. */
509 static bool hb_reset_between(HBitmap *hb, int level, uint64_t start,
510                              uint64_t last)
511 {
512     size_t pos = start >> BITS_PER_LEVEL;
513     size_t lastpos = last >> BITS_PER_LEVEL;
514     bool changed = false;
515     size_t i;
516 
517     i = pos;
518     if (i < lastpos) {
519         uint64_t next = (start | (BITS_PER_LONG - 1)) + 1;
520 
521         /* Here we need a more complex test than when setting bits.  Even if
522          * something was changed, we must not blank bits in the upper level
523          * unless the lower-level word became entirely zero.  So, remove pos
524          * from the upper-level range if bits remain set.
525          */
526         if (hb_reset_elem(&hb->levels[level][i], start, next - 1)) {
527             changed = true;
528         } else {
529             pos++;
530         }
531 
532         for (;;) {
533             start = next;
534             next += BITS_PER_LONG;
535             if (++i == lastpos) {
536                 break;
537             }
538             changed |= (hb->levels[level][i] != 0);
539             hb->levels[level][i] = 0UL;
540         }
541     }
542 
543     /* Same as above, this time for lastpos.  */
544     if (hb_reset_elem(&hb->levels[level][i], start, last)) {
545         changed = true;
546     } else {
547         lastpos--;
548     }
549 
550     if (level > 0 && changed) {
551         hb_reset_between(hb, level - 1, pos, lastpos);
552     }
553 
554     return changed;
555 
556 }
557 
558 void hbitmap_reset(HBitmap *hb, uint64_t start, uint64_t count)
559 {
560     /* Compute range in the last layer.  */
561     uint64_t first;
562     uint64_t last = start + count - 1;
563     uint64_t gran = 1ULL << hb->granularity;
564 
565     if (count == 0) {
566         return;
567     }
568 
569     assert(QEMU_IS_ALIGNED(start, gran));
570     assert(QEMU_IS_ALIGNED(count, gran) || (start + count == hb->orig_size));
571 
572     trace_hbitmap_reset(hb, start, count,
573                         start >> hb->granularity, last >> hb->granularity);
574 
575     first = start >> hb->granularity;
576     last >>= hb->granularity;
577     assert(last < hb->size);
578 
579     hb->count -= hb_count_between(hb, first, last);
580     if (hb_reset_between(hb, HBITMAP_LEVELS - 1, first, last) &&
581         hb->meta) {
582         hbitmap_set(hb->meta, start, count);
583     }
584 }
585 
586 void hbitmap_reset_all(HBitmap *hb)
587 {
588     unsigned int i;
589 
590     /* Same as hbitmap_alloc() except for memset() instead of malloc() */
591     for (i = HBITMAP_LEVELS; --i >= 1; ) {
592         memset(hb->levels[i], 0, hb->sizes[i] * sizeof(unsigned long));
593     }
594 
595     hb->levels[0][0] = 1UL << (BITS_PER_LONG - 1);
596     hb->count = 0;
597 }
598 
599 bool hbitmap_is_serializable(const HBitmap *hb)
600 {
601     /* Every serialized chunk must be aligned to 64 bits so that endianness
602      * requirements can be fulfilled on both 64 bit and 32 bit hosts.
603      * We have hbitmap_serialization_align() which converts this
604      * alignment requirement from bitmap bits to items covered (e.g. sectors).
605      * That value is:
606      *    64 << hb->granularity
607      * Since this value must not exceed UINT64_MAX, hb->granularity must be
608      * less than 58 (== 64 - 6, where 6 is ld(64), i.e. 1 << 6 == 64).
609      *
610      * In order for hbitmap_serialization_align() to always return a
611      * meaningful value, bitmaps that are to be serialized must have a
612      * granularity of less than 58. */
613 
614     return hb->granularity < 58;
615 }
616 
617 bool hbitmap_get(const HBitmap *hb, uint64_t item)
618 {
619     /* Compute position and bit in the last layer.  */
620     uint64_t pos = item >> hb->granularity;
621     unsigned long bit = 1UL << (pos & (BITS_PER_LONG - 1));
622     assert(pos < hb->size);
623 
624     return (hb->levels[HBITMAP_LEVELS - 1][pos >> BITS_PER_LEVEL] & bit) != 0;
625 }
626 
627 uint64_t hbitmap_serialization_align(const HBitmap *hb)
628 {
629     assert(hbitmap_is_serializable(hb));
630 
631     /* Require at least 64 bit granularity to be safe on both 64 bit and 32 bit
632      * hosts. */
633     return UINT64_C(64) << hb->granularity;
634 }
635 
636 /* Start should be aligned to serialization granularity, chunk size should be
637  * aligned to serialization granularity too, except for last chunk.
638  */
639 static void serialization_chunk(const HBitmap *hb,
640                                 uint64_t start, uint64_t count,
641                                 unsigned long **first_el, uint64_t *el_count)
642 {
643     uint64_t last = start + count - 1;
644     uint64_t gran = hbitmap_serialization_align(hb);
645 
646     assert((start & (gran - 1)) == 0);
647     assert((last >> hb->granularity) < hb->size);
648     if ((last >> hb->granularity) != hb->size - 1) {
649         assert((count & (gran - 1)) == 0);
650     }
651 
652     start = (start >> hb->granularity) >> BITS_PER_LEVEL;
653     last = (last >> hb->granularity) >> BITS_PER_LEVEL;
654 
655     *first_el = &hb->levels[HBITMAP_LEVELS - 1][start];
656     *el_count = last - start + 1;
657 }
658 
659 uint64_t hbitmap_serialization_size(const HBitmap *hb,
660                                     uint64_t start, uint64_t count)
661 {
662     uint64_t el_count;
663     unsigned long *cur;
664 
665     if (!count) {
666         return 0;
667     }
668     serialization_chunk(hb, start, count, &cur, &el_count);
669 
670     return el_count * sizeof(unsigned long);
671 }
672 
673 void hbitmap_serialize_part(const HBitmap *hb, uint8_t *buf,
674                             uint64_t start, uint64_t count)
675 {
676     uint64_t el_count;
677     unsigned long *cur, *end;
678 
679     if (!count) {
680         return;
681     }
682     serialization_chunk(hb, start, count, &cur, &el_count);
683     end = cur + el_count;
684 
685     while (cur != end) {
686         unsigned long el =
687             (BITS_PER_LONG == 32 ? cpu_to_le32(*cur) : cpu_to_le64(*cur));
688 
689         memcpy(buf, &el, sizeof(el));
690         buf += sizeof(el);
691         cur++;
692     }
693 }
694 
695 void hbitmap_deserialize_part(HBitmap *hb, uint8_t *buf,
696                               uint64_t start, uint64_t count,
697                               bool finish)
698 {
699     uint64_t el_count;
700     unsigned long *cur, *end;
701 
702     if (!count) {
703         return;
704     }
705     serialization_chunk(hb, start, count, &cur, &el_count);
706     end = cur + el_count;
707 
708     while (cur != end) {
709         memcpy(cur, buf, sizeof(*cur));
710 
711         if (BITS_PER_LONG == 32) {
712             le32_to_cpus((uint32_t *)cur);
713         } else {
714             le64_to_cpus((uint64_t *)cur);
715         }
716 
717         buf += sizeof(unsigned long);
718         cur++;
719     }
720     if (finish) {
721         hbitmap_deserialize_finish(hb);
722     }
723 }
724 
725 void hbitmap_deserialize_zeroes(HBitmap *hb, uint64_t start, uint64_t count,
726                                 bool finish)
727 {
728     uint64_t el_count;
729     unsigned long *first;
730 
731     if (!count) {
732         return;
733     }
734     serialization_chunk(hb, start, count, &first, &el_count);
735 
736     memset(first, 0, el_count * sizeof(unsigned long));
737     if (finish) {
738         hbitmap_deserialize_finish(hb);
739     }
740 }
741 
742 void hbitmap_deserialize_ones(HBitmap *hb, uint64_t start, uint64_t count,
743                               bool finish)
744 {
745     uint64_t el_count;
746     unsigned long *first;
747 
748     if (!count) {
749         return;
750     }
751     serialization_chunk(hb, start, count, &first, &el_count);
752 
753     memset(first, 0xff, el_count * sizeof(unsigned long));
754     if (finish) {
755         hbitmap_deserialize_finish(hb);
756     }
757 }
758 
759 void hbitmap_deserialize_finish(HBitmap *bitmap)
760 {
761     int64_t i, size, prev_size;
762     int lev;
763 
764     /* restore levels starting from penultimate to zero level, assuming
765      * that the last level is ok */
766     size = MAX((bitmap->size + BITS_PER_LONG - 1) >> BITS_PER_LEVEL, 1);
767     for (lev = HBITMAP_LEVELS - 1; lev-- > 0; ) {
768         prev_size = size;
769         size = MAX((size + BITS_PER_LONG - 1) >> BITS_PER_LEVEL, 1);
770         memset(bitmap->levels[lev], 0, size * sizeof(unsigned long));
771 
772         for (i = 0; i < prev_size; ++i) {
773             if (bitmap->levels[lev + 1][i]) {
774                 bitmap->levels[lev][i >> BITS_PER_LEVEL] |=
775                     1UL << (i & (BITS_PER_LONG - 1));
776             }
777         }
778     }
779 
780     bitmap->levels[0][0] |= 1UL << (BITS_PER_LONG - 1);
781     bitmap->count = hb_count_between(bitmap, 0, bitmap->size - 1);
782 }
783 
784 void hbitmap_free(HBitmap *hb)
785 {
786     unsigned i;
787     assert(!hb->meta);
788     for (i = HBITMAP_LEVELS; i-- > 0; ) {
789         g_free(hb->levels[i]);
790     }
791     g_free(hb);
792 }
793 
794 HBitmap *hbitmap_alloc(uint64_t size, int granularity)
795 {
796     HBitmap *hb = g_new0(struct HBitmap, 1);
797     unsigned i;
798 
799     assert(size <= INT64_MAX);
800     hb->orig_size = size;
801 
802     assert(granularity >= 0 && granularity < 64);
803     size = (size + (1ULL << granularity) - 1) >> granularity;
804     assert(size <= ((uint64_t)1 << HBITMAP_LOG_MAX_SIZE));
805 
806     hb->size = size;
807     hb->granularity = granularity;
808     for (i = HBITMAP_LEVELS; i-- > 0; ) {
809         size = MAX((size + BITS_PER_LONG - 1) >> BITS_PER_LEVEL, 1);
810         hb->sizes[i] = size;
811         hb->levels[i] = g_new0(unsigned long, size);
812     }
813 
814     /* We necessarily have free bits in level 0 due to the definition
815      * of HBITMAP_LEVELS, so use one for a sentinel.  This speeds up
816      * hbitmap_iter_skip_words.
817      */
818     assert(size == 1);
819     hb->levels[0][0] |= 1UL << (BITS_PER_LONG - 1);
820     return hb;
821 }
822 
823 void hbitmap_truncate(HBitmap *hb, uint64_t size)
824 {
825     bool shrink;
826     unsigned i;
827     uint64_t num_elements = size;
828     uint64_t old;
829 
830     assert(size <= INT64_MAX);
831     hb->orig_size = size;
832 
833     /* Size comes in as logical elements, adjust for granularity. */
834     size = (size + (1ULL << hb->granularity) - 1) >> hb->granularity;
835     assert(size <= ((uint64_t)1 << HBITMAP_LOG_MAX_SIZE));
836     shrink = size < hb->size;
837 
838     /* bit sizes are identical; nothing to do. */
839     if (size == hb->size) {
840         return;
841     }
842 
843     /* If we're losing bits, let's clear those bits before we invalidate all of
844      * our invariants. This helps keep the bitcount consistent, and will prevent
845      * us from carrying around garbage bits beyond the end of the map.
846      */
847     if (shrink) {
848         /* Don't clear partial granularity groups;
849          * start at the first full one. */
850         uint64_t start = ROUND_UP(num_elements, UINT64_C(1) << hb->granularity);
851         uint64_t fix_count = (hb->size << hb->granularity) - start;
852 
853         assert(fix_count);
854         hbitmap_reset(hb, start, fix_count);
855     }
856 
857     hb->size = size;
858     for (i = HBITMAP_LEVELS; i-- > 0; ) {
859         size = MAX(BITS_TO_LONGS(size), 1);
860         if (hb->sizes[i] == size) {
861             break;
862         }
863         old = hb->sizes[i];
864         hb->sizes[i] = size;
865         hb->levels[i] = g_renew(unsigned long, hb->levels[i], size);
866         if (!shrink) {
867             memset(&hb->levels[i][old], 0x00,
868                    (size - old) * sizeof(*hb->levels[i]));
869         }
870     }
871     if (hb->meta) {
872         hbitmap_truncate(hb->meta, hb->size << hb->granularity);
873     }
874 }
875 
876 bool hbitmap_can_merge(const HBitmap *a, const HBitmap *b)
877 {
878     return (a->orig_size == b->orig_size);
879 }
880 
881 /**
882  * hbitmap_sparse_merge: performs dst = dst | src
883  * works with differing granularities.
884  * best used when src is sparsely populated.
885  */
886 static void hbitmap_sparse_merge(HBitmap *dst, const HBitmap *src)
887 {
888     int64_t offset;
889     int64_t count;
890 
891     for (offset = 0;
892          hbitmap_next_dirty_area(src, offset, src->orig_size, INT64_MAX,
893                                  &offset, &count);
894          offset += count)
895     {
896         hbitmap_set(dst, offset, count);
897     }
898 }
899 
900 /**
901  * Given HBitmaps A and B, let R := A (BITOR) B.
902  * Bitmaps A and B will not be modified,
903  *     except when bitmap R is an alias of A or B.
904  *
905  * @return true if the merge was successful,
906  *         false if it was not attempted.
907  */
908 bool hbitmap_merge(const HBitmap *a, const HBitmap *b, HBitmap *result)
909 {
910     int i;
911     uint64_t j;
912 
913     if (!hbitmap_can_merge(a, b) || !hbitmap_can_merge(a, result)) {
914         return false;
915     }
916     assert(hbitmap_can_merge(b, result));
917 
918     if ((!hbitmap_count(a) && result == b) ||
919         (!hbitmap_count(b) && result == a)) {
920         return true;
921     }
922 
923     if (!hbitmap_count(a) && !hbitmap_count(b)) {
924         hbitmap_reset_all(result);
925         return true;
926     }
927 
928     if (a->granularity != b->granularity) {
929         if ((a != result) && (b != result)) {
930             hbitmap_reset_all(result);
931         }
932         if (a != result) {
933             hbitmap_sparse_merge(result, a);
934         }
935         if (b != result) {
936             hbitmap_sparse_merge(result, b);
937         }
938         return true;
939     }
940 
941     /* This merge is O(size), as BITS_PER_LONG and HBITMAP_LEVELS are constant.
942      * It may be possible to improve running times for sparsely populated maps
943      * by using hbitmap_iter_next, but this is suboptimal for dense maps.
944      */
945     assert(a->size == b->size);
946     for (i = HBITMAP_LEVELS - 1; i >= 0; i--) {
947         for (j = 0; j < a->sizes[i]; j++) {
948             result->levels[i][j] = a->levels[i][j] | b->levels[i][j];
949         }
950     }
951 
952     /* Recompute the dirty count */
953     result->count = hb_count_between(result, 0, result->size - 1);
954 
955     return true;
956 }
957 
958 char *hbitmap_sha256(const HBitmap *bitmap, Error **errp)
959 {
960     size_t size = bitmap->sizes[HBITMAP_LEVELS - 1] * sizeof(unsigned long);
961     char *data = (char *)bitmap->levels[HBITMAP_LEVELS - 1];
962     char *hash = NULL;
963     qcrypto_hash_digest(QCRYPTO_HASH_ALG_SHA256, data, size, &hash, errp);
964 
965     return hash;
966 }
967