xref: /openbmc/qemu/util/hbitmap.c (revision 0fd61a2d)
1 /*
2  * Hierarchical Bitmap Data Type
3  *
4  * Copyright Red Hat, Inc., 2012
5  *
6  * Author: Paolo Bonzini <pbonzini@redhat.com>
7  *
8  * This work is licensed under the terms of the GNU GPL, version 2 or
9  * later.  See the COPYING file in the top-level directory.
10  */
11 
12 #include "qemu/osdep.h"
13 #include "qemu/hbitmap.h"
14 #include "qemu/host-utils.h"
15 #include "trace.h"
16 #include "crypto/hash.h"
17 
18 /* HBitmaps provides an array of bits.  The bits are stored as usual in an
19  * array of unsigned longs, but HBitmap is also optimized to provide fast
20  * iteration over set bits; going from one bit to the next is O(logB n)
21  * worst case, with B = sizeof(long) * CHAR_BIT: the result is low enough
22  * that the number of levels is in fact fixed.
23  *
24  * In order to do this, it stacks multiple bitmaps with progressively coarser
25  * granularity; in all levels except the last, bit N is set iff the N-th
26  * unsigned long is nonzero in the immediately next level.  When iteration
27  * completes on the last level it can examine the 2nd-last level to quickly
28  * skip entire words, and even do so recursively to skip blocks of 64 words or
29  * powers thereof (32 on 32-bit machines).
30  *
31  * Given an index in the bitmap, it can be split in group of bits like
32  * this (for the 64-bit case):
33  *
34  *   bits 0-57 => word in the last bitmap     | bits 58-63 => bit in the word
35  *   bits 0-51 => word in the 2nd-last bitmap | bits 52-57 => bit in the word
36  *   bits 0-45 => word in the 3rd-last bitmap | bits 46-51 => bit in the word
37  *
38  * So it is easy to move up simply by shifting the index right by
39  * log2(BITS_PER_LONG) bits.  To move down, you shift the index left
40  * similarly, and add the word index within the group.  Iteration uses
41  * ffs (find first set bit) to find the next word to examine; this
42  * operation can be done in constant time in most current architectures.
43  *
44  * Setting or clearing a range of m bits on all levels, the work to perform
45  * is O(m + m/W + m/W^2 + ...), which is O(m) like on a regular bitmap.
46  *
47  * When iterating on a bitmap, each bit (on any level) is only visited
48  * once.  Hence, The total cost of visiting a bitmap with m bits in it is
49  * the number of bits that are set in all bitmaps.  Unless the bitmap is
50  * extremely sparse, this is also O(m + m/W + m/W^2 + ...), so the amortized
51  * cost of advancing from one bit to the next is usually constant (worst case
52  * O(logB n) as in the non-amortized complexity).
53  */
54 
55 struct HBitmap {
56     /*
57      * Size of the bitmap, as requested in hbitmap_alloc or in hbitmap_truncate.
58      */
59     uint64_t orig_size;
60 
61     /* Number of total bits in the bottom level.  */
62     uint64_t size;
63 
64     /* Number of set bits in the bottom level.  */
65     uint64_t count;
66 
67     /* A scaling factor.  Given a granularity of G, each bit in the bitmap will
68      * will actually represent a group of 2^G elements.  Each operation on a
69      * range of bits first rounds the bits to determine which group they land
70      * in, and then affect the entire page; iteration will only visit the first
71      * bit of each group.  Here is an example of operations in a size-16,
72      * granularity-1 HBitmap:
73      *
74      *    initial state            00000000
75      *    set(start=0, count=9)    11111000 (iter: 0, 2, 4, 6, 8)
76      *    reset(start=1, count=3)  00111000 (iter: 4, 6, 8)
77      *    set(start=9, count=2)    00111100 (iter: 4, 6, 8, 10)
78      *    reset(start=5, count=5)  00000000
79      *
80      * From an implementation point of view, when setting or resetting bits,
81      * the bitmap will scale bit numbers right by this amount of bits.  When
82      * iterating, the bitmap will scale bit numbers left by this amount of
83      * bits.
84      */
85     int granularity;
86 
87     /* A meta dirty bitmap to track the dirtiness of bits in this HBitmap. */
88     HBitmap *meta;
89 
90     /* A number of progressively less coarse bitmaps (i.e. level 0 is the
91      * coarsest).  Each bit in level N represents a word in level N+1 that
92      * has a set bit, except the last level where each bit represents the
93      * actual bitmap.
94      *
95      * Note that all bitmaps have the same number of levels.  Even a 1-bit
96      * bitmap will still allocate HBITMAP_LEVELS arrays.
97      */
98     unsigned long *levels[HBITMAP_LEVELS];
99 
100     /* The length of each levels[] array. */
101     uint64_t sizes[HBITMAP_LEVELS];
102 };
103 
104 /* Advance hbi to the next nonzero word and return it.  hbi->pos
105  * is updated.  Returns zero if we reach the end of the bitmap.
106  */
107 unsigned long hbitmap_iter_skip_words(HBitmapIter *hbi)
108 {
109     size_t pos = hbi->pos;
110     const HBitmap *hb = hbi->hb;
111     unsigned i = HBITMAP_LEVELS - 1;
112 
113     unsigned long cur;
114     do {
115         i--;
116         pos >>= BITS_PER_LEVEL;
117         cur = hbi->cur[i] & hb->levels[i][pos];
118     } while (cur == 0);
119 
120     /* Check for end of iteration.  We always use fewer than BITS_PER_LONG
121      * bits in the level 0 bitmap; thus we can repurpose the most significant
122      * bit as a sentinel.  The sentinel is set in hbitmap_alloc and ensures
123      * that the above loop ends even without an explicit check on i.
124      */
125 
126     if (i == 0 && cur == (1UL << (BITS_PER_LONG - 1))) {
127         return 0;
128     }
129     for (; i < HBITMAP_LEVELS - 1; i++) {
130         /* Shift back pos to the left, matching the right shifts above.
131          * The index of this word's least significant set bit provides
132          * the low-order bits.
133          */
134         assert(cur);
135         pos = (pos << BITS_PER_LEVEL) + ctzl(cur);
136         hbi->cur[i] = cur & (cur - 1);
137 
138         /* Set up next level for iteration.  */
139         cur = hb->levels[i + 1][pos];
140     }
141 
142     hbi->pos = pos;
143     trace_hbitmap_iter_skip_words(hbi->hb, hbi, pos, cur);
144 
145     assert(cur);
146     return cur;
147 }
148 
149 int64_t hbitmap_iter_next(HBitmapIter *hbi)
150 {
151     unsigned long cur = hbi->cur[HBITMAP_LEVELS - 1] &
152             hbi->hb->levels[HBITMAP_LEVELS - 1][hbi->pos];
153     int64_t item;
154 
155     if (cur == 0) {
156         cur = hbitmap_iter_skip_words(hbi);
157         if (cur == 0) {
158             return -1;
159         }
160     }
161 
162     /* The next call will resume work from the next bit.  */
163     hbi->cur[HBITMAP_LEVELS - 1] = cur & (cur - 1);
164     item = ((uint64_t)hbi->pos << BITS_PER_LEVEL) + ctzl(cur);
165 
166     return item << hbi->granularity;
167 }
168 
169 void hbitmap_iter_init(HBitmapIter *hbi, const HBitmap *hb, uint64_t first)
170 {
171     unsigned i, bit;
172     uint64_t pos;
173 
174     hbi->hb = hb;
175     pos = first >> hb->granularity;
176     assert(pos < hb->size);
177     hbi->pos = pos >> BITS_PER_LEVEL;
178     hbi->granularity = hb->granularity;
179 
180     for (i = HBITMAP_LEVELS; i-- > 0; ) {
181         bit = pos & (BITS_PER_LONG - 1);
182         pos >>= BITS_PER_LEVEL;
183 
184         /* Drop bits representing items before first.  */
185         hbi->cur[i] = hb->levels[i][pos] & ~((1UL << bit) - 1);
186 
187         /* We have already added level i+1, so the lowest set bit has
188          * been processed.  Clear it.
189          */
190         if (i != HBITMAP_LEVELS - 1) {
191             hbi->cur[i] &= ~(1UL << bit);
192         }
193     }
194 }
195 
196 int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start, uint64_t count)
197 {
198     size_t pos = (start >> hb->granularity) >> BITS_PER_LEVEL;
199     unsigned long *last_lev = hb->levels[HBITMAP_LEVELS - 1];
200     unsigned long cur = last_lev[pos];
201     unsigned start_bit_offset;
202     uint64_t end_bit, sz;
203     int64_t res;
204 
205     if (start >= hb->orig_size || count == 0) {
206         return -1;
207     }
208 
209     end_bit = count > hb->orig_size - start ?
210                 hb->size :
211                 ((start + count - 1) >> hb->granularity) + 1;
212     sz = (end_bit + BITS_PER_LONG - 1) >> BITS_PER_LEVEL;
213 
214     /* There may be some zero bits in @cur before @start. We are not interested
215      * in them, let's set them.
216      */
217     start_bit_offset = (start >> hb->granularity) & (BITS_PER_LONG - 1);
218     cur |= (1UL << start_bit_offset) - 1;
219     assert((start >> hb->granularity) < hb->size);
220 
221     if (cur == (unsigned long)-1) {
222         do {
223             pos++;
224         } while (pos < sz && last_lev[pos] == (unsigned long)-1);
225 
226         if (pos >= sz) {
227             return -1;
228         }
229 
230         cur = last_lev[pos];
231     }
232 
233     res = (pos << BITS_PER_LEVEL) + ctol(cur);
234     if (res >= end_bit) {
235         return -1;
236     }
237 
238     res = res << hb->granularity;
239     if (res < start) {
240         assert(((start - res) >> hb->granularity) == 0);
241         return start;
242     }
243 
244     return res;
245 }
246 
247 bool hbitmap_next_dirty_area(const HBitmap *hb, uint64_t *start,
248                              uint64_t *count)
249 {
250     HBitmapIter hbi;
251     int64_t firt_dirty_off, area_end;
252     uint32_t granularity = 1UL << hb->granularity;
253     uint64_t end;
254 
255     if (*start >= hb->orig_size || *count == 0) {
256         return false;
257     }
258 
259     end = *count > hb->orig_size - *start ? hb->orig_size : *start + *count;
260 
261     hbitmap_iter_init(&hbi, hb, *start);
262     firt_dirty_off = hbitmap_iter_next(&hbi);
263 
264     if (firt_dirty_off < 0 || firt_dirty_off >= end) {
265         return false;
266     }
267 
268     if (firt_dirty_off + granularity >= end) {
269         area_end = end;
270     } else {
271         area_end = hbitmap_next_zero(hb, firt_dirty_off + granularity,
272                                      end - firt_dirty_off - granularity);
273         if (area_end < 0) {
274             area_end = end;
275         }
276     }
277 
278     if (firt_dirty_off > *start) {
279         *start = firt_dirty_off;
280     }
281     *count = area_end - *start;
282 
283     return true;
284 }
285 
286 bool hbitmap_empty(const HBitmap *hb)
287 {
288     return hb->count == 0;
289 }
290 
291 int hbitmap_granularity(const HBitmap *hb)
292 {
293     return hb->granularity;
294 }
295 
296 uint64_t hbitmap_count(const HBitmap *hb)
297 {
298     return hb->count << hb->granularity;
299 }
300 
301 /* Count the number of set bits between start and end, not accounting for
302  * the granularity.  Also an example of how to use hbitmap_iter_next_word.
303  */
304 static uint64_t hb_count_between(HBitmap *hb, uint64_t start, uint64_t last)
305 {
306     HBitmapIter hbi;
307     uint64_t count = 0;
308     uint64_t end = last + 1;
309     unsigned long cur;
310     size_t pos;
311 
312     hbitmap_iter_init(&hbi, hb, start << hb->granularity);
313     for (;;) {
314         pos = hbitmap_iter_next_word(&hbi, &cur);
315         if (pos >= (end >> BITS_PER_LEVEL)) {
316             break;
317         }
318         count += ctpopl(cur);
319     }
320 
321     if (pos == (end >> BITS_PER_LEVEL)) {
322         /* Drop bits representing the END-th and subsequent items.  */
323         int bit = end & (BITS_PER_LONG - 1);
324         cur &= (1UL << bit) - 1;
325         count += ctpopl(cur);
326     }
327 
328     return count;
329 }
330 
331 /* Setting starts at the last layer and propagates up if an element
332  * changes.
333  */
334 static inline bool hb_set_elem(unsigned long *elem, uint64_t start, uint64_t last)
335 {
336     unsigned long mask;
337     unsigned long old;
338 
339     assert((last >> BITS_PER_LEVEL) == (start >> BITS_PER_LEVEL));
340     assert(start <= last);
341 
342     mask = 2UL << (last & (BITS_PER_LONG - 1));
343     mask -= 1UL << (start & (BITS_PER_LONG - 1));
344     old = *elem;
345     *elem |= mask;
346     return old != *elem;
347 }
348 
349 /* The recursive workhorse (the depth is limited to HBITMAP_LEVELS)...
350  * Returns true if at least one bit is changed. */
351 static bool hb_set_between(HBitmap *hb, int level, uint64_t start,
352                            uint64_t last)
353 {
354     size_t pos = start >> BITS_PER_LEVEL;
355     size_t lastpos = last >> BITS_PER_LEVEL;
356     bool changed = false;
357     size_t i;
358 
359     i = pos;
360     if (i < lastpos) {
361         uint64_t next = (start | (BITS_PER_LONG - 1)) + 1;
362         changed |= hb_set_elem(&hb->levels[level][i], start, next - 1);
363         for (;;) {
364             start = next;
365             next += BITS_PER_LONG;
366             if (++i == lastpos) {
367                 break;
368             }
369             changed |= (hb->levels[level][i] == 0);
370             hb->levels[level][i] = ~0UL;
371         }
372     }
373     changed |= hb_set_elem(&hb->levels[level][i], start, last);
374 
375     /* If there was any change in this layer, we may have to update
376      * the one above.
377      */
378     if (level > 0 && changed) {
379         hb_set_between(hb, level - 1, pos, lastpos);
380     }
381     return changed;
382 }
383 
384 void hbitmap_set(HBitmap *hb, uint64_t start, uint64_t count)
385 {
386     /* Compute range in the last layer.  */
387     uint64_t first, n;
388     uint64_t last = start + count - 1;
389 
390     if (count == 0) {
391         return;
392     }
393 
394     trace_hbitmap_set(hb, start, count,
395                       start >> hb->granularity, last >> hb->granularity);
396 
397     first = start >> hb->granularity;
398     last >>= hb->granularity;
399     assert(last < hb->size);
400     n = last - first + 1;
401 
402     hb->count += n - hb_count_between(hb, first, last);
403     if (hb_set_between(hb, HBITMAP_LEVELS - 1, first, last) &&
404         hb->meta) {
405         hbitmap_set(hb->meta, start, count);
406     }
407 }
408 
409 /* Resetting works the other way round: propagate up if the new
410  * value is zero.
411  */
412 static inline bool hb_reset_elem(unsigned long *elem, uint64_t start, uint64_t last)
413 {
414     unsigned long mask;
415     bool blanked;
416 
417     assert((last >> BITS_PER_LEVEL) == (start >> BITS_PER_LEVEL));
418     assert(start <= last);
419 
420     mask = 2UL << (last & (BITS_PER_LONG - 1));
421     mask -= 1UL << (start & (BITS_PER_LONG - 1));
422     blanked = *elem != 0 && ((*elem & ~mask) == 0);
423     *elem &= ~mask;
424     return blanked;
425 }
426 
427 /* The recursive workhorse (the depth is limited to HBITMAP_LEVELS)...
428  * Returns true if at least one bit is changed. */
429 static bool hb_reset_between(HBitmap *hb, int level, uint64_t start,
430                              uint64_t last)
431 {
432     size_t pos = start >> BITS_PER_LEVEL;
433     size_t lastpos = last >> BITS_PER_LEVEL;
434     bool changed = false;
435     size_t i;
436 
437     i = pos;
438     if (i < lastpos) {
439         uint64_t next = (start | (BITS_PER_LONG - 1)) + 1;
440 
441         /* Here we need a more complex test than when setting bits.  Even if
442          * something was changed, we must not blank bits in the upper level
443          * unless the lower-level word became entirely zero.  So, remove pos
444          * from the upper-level range if bits remain set.
445          */
446         if (hb_reset_elem(&hb->levels[level][i], start, next - 1)) {
447             changed = true;
448         } else {
449             pos++;
450         }
451 
452         for (;;) {
453             start = next;
454             next += BITS_PER_LONG;
455             if (++i == lastpos) {
456                 break;
457             }
458             changed |= (hb->levels[level][i] != 0);
459             hb->levels[level][i] = 0UL;
460         }
461     }
462 
463     /* Same as above, this time for lastpos.  */
464     if (hb_reset_elem(&hb->levels[level][i], start, last)) {
465         changed = true;
466     } else {
467         lastpos--;
468     }
469 
470     if (level > 0 && changed) {
471         hb_reset_between(hb, level - 1, pos, lastpos);
472     }
473 
474     return changed;
475 
476 }
477 
478 void hbitmap_reset(HBitmap *hb, uint64_t start, uint64_t count)
479 {
480     /* Compute range in the last layer.  */
481     uint64_t first;
482     uint64_t last = start + count - 1;
483     uint64_t gran = 1ULL << hb->granularity;
484 
485     if (count == 0) {
486         return;
487     }
488 
489     assert(QEMU_IS_ALIGNED(start, gran));
490     assert(QEMU_IS_ALIGNED(count, gran) || (start + count == hb->orig_size));
491 
492     trace_hbitmap_reset(hb, start, count,
493                         start >> hb->granularity, last >> hb->granularity);
494 
495     first = start >> hb->granularity;
496     last >>= hb->granularity;
497     assert(last < hb->size);
498 
499     hb->count -= hb_count_between(hb, first, last);
500     if (hb_reset_between(hb, HBITMAP_LEVELS - 1, first, last) &&
501         hb->meta) {
502         hbitmap_set(hb->meta, start, count);
503     }
504 }
505 
506 void hbitmap_reset_all(HBitmap *hb)
507 {
508     unsigned int i;
509 
510     /* Same as hbitmap_alloc() except for memset() instead of malloc() */
511     for (i = HBITMAP_LEVELS; --i >= 1; ) {
512         memset(hb->levels[i], 0, hb->sizes[i] * sizeof(unsigned long));
513     }
514 
515     hb->levels[0][0] = 1UL << (BITS_PER_LONG - 1);
516     hb->count = 0;
517 }
518 
519 bool hbitmap_is_serializable(const HBitmap *hb)
520 {
521     /* Every serialized chunk must be aligned to 64 bits so that endianness
522      * requirements can be fulfilled on both 64 bit and 32 bit hosts.
523      * We have hbitmap_serialization_align() which converts this
524      * alignment requirement from bitmap bits to items covered (e.g. sectors).
525      * That value is:
526      *    64 << hb->granularity
527      * Since this value must not exceed UINT64_MAX, hb->granularity must be
528      * less than 58 (== 64 - 6, where 6 is ld(64), i.e. 1 << 6 == 64).
529      *
530      * In order for hbitmap_serialization_align() to always return a
531      * meaningful value, bitmaps that are to be serialized must have a
532      * granularity of less than 58. */
533 
534     return hb->granularity < 58;
535 }
536 
537 bool hbitmap_get(const HBitmap *hb, uint64_t item)
538 {
539     /* Compute position and bit in the last layer.  */
540     uint64_t pos = item >> hb->granularity;
541     unsigned long bit = 1UL << (pos & (BITS_PER_LONG - 1));
542     assert(pos < hb->size);
543 
544     return (hb->levels[HBITMAP_LEVELS - 1][pos >> BITS_PER_LEVEL] & bit) != 0;
545 }
546 
547 uint64_t hbitmap_serialization_align(const HBitmap *hb)
548 {
549     assert(hbitmap_is_serializable(hb));
550 
551     /* Require at least 64 bit granularity to be safe on both 64 bit and 32 bit
552      * hosts. */
553     return UINT64_C(64) << hb->granularity;
554 }
555 
556 /* Start should be aligned to serialization granularity, chunk size should be
557  * aligned to serialization granularity too, except for last chunk.
558  */
559 static void serialization_chunk(const HBitmap *hb,
560                                 uint64_t start, uint64_t count,
561                                 unsigned long **first_el, uint64_t *el_count)
562 {
563     uint64_t last = start + count - 1;
564     uint64_t gran = hbitmap_serialization_align(hb);
565 
566     assert((start & (gran - 1)) == 0);
567     assert((last >> hb->granularity) < hb->size);
568     if ((last >> hb->granularity) != hb->size - 1) {
569         assert((count & (gran - 1)) == 0);
570     }
571 
572     start = (start >> hb->granularity) >> BITS_PER_LEVEL;
573     last = (last >> hb->granularity) >> BITS_PER_LEVEL;
574 
575     *first_el = &hb->levels[HBITMAP_LEVELS - 1][start];
576     *el_count = last - start + 1;
577 }
578 
579 uint64_t hbitmap_serialization_size(const HBitmap *hb,
580                                     uint64_t start, uint64_t count)
581 {
582     uint64_t el_count;
583     unsigned long *cur;
584 
585     if (!count) {
586         return 0;
587     }
588     serialization_chunk(hb, start, count, &cur, &el_count);
589 
590     return el_count * sizeof(unsigned long);
591 }
592 
593 void hbitmap_serialize_part(const HBitmap *hb, uint8_t *buf,
594                             uint64_t start, uint64_t count)
595 {
596     uint64_t el_count;
597     unsigned long *cur, *end;
598 
599     if (!count) {
600         return;
601     }
602     serialization_chunk(hb, start, count, &cur, &el_count);
603     end = cur + el_count;
604 
605     while (cur != end) {
606         unsigned long el =
607             (BITS_PER_LONG == 32 ? cpu_to_le32(*cur) : cpu_to_le64(*cur));
608 
609         memcpy(buf, &el, sizeof(el));
610         buf += sizeof(el);
611         cur++;
612     }
613 }
614 
615 void hbitmap_deserialize_part(HBitmap *hb, uint8_t *buf,
616                               uint64_t start, uint64_t count,
617                               bool finish)
618 {
619     uint64_t el_count;
620     unsigned long *cur, *end;
621 
622     if (!count) {
623         return;
624     }
625     serialization_chunk(hb, start, count, &cur, &el_count);
626     end = cur + el_count;
627 
628     while (cur != end) {
629         memcpy(cur, buf, sizeof(*cur));
630 
631         if (BITS_PER_LONG == 32) {
632             le32_to_cpus((uint32_t *)cur);
633         } else {
634             le64_to_cpus((uint64_t *)cur);
635         }
636 
637         buf += sizeof(unsigned long);
638         cur++;
639     }
640     if (finish) {
641         hbitmap_deserialize_finish(hb);
642     }
643 }
644 
645 void hbitmap_deserialize_zeroes(HBitmap *hb, uint64_t start, uint64_t count,
646                                 bool finish)
647 {
648     uint64_t el_count;
649     unsigned long *first;
650 
651     if (!count) {
652         return;
653     }
654     serialization_chunk(hb, start, count, &first, &el_count);
655 
656     memset(first, 0, el_count * sizeof(unsigned long));
657     if (finish) {
658         hbitmap_deserialize_finish(hb);
659     }
660 }
661 
662 void hbitmap_deserialize_ones(HBitmap *hb, uint64_t start, uint64_t count,
663                               bool finish)
664 {
665     uint64_t el_count;
666     unsigned long *first;
667 
668     if (!count) {
669         return;
670     }
671     serialization_chunk(hb, start, count, &first, &el_count);
672 
673     memset(first, 0xff, el_count * sizeof(unsigned long));
674     if (finish) {
675         hbitmap_deserialize_finish(hb);
676     }
677 }
678 
679 void hbitmap_deserialize_finish(HBitmap *bitmap)
680 {
681     int64_t i, size, prev_size;
682     int lev;
683 
684     /* restore levels starting from penultimate to zero level, assuming
685      * that the last level is ok */
686     size = MAX((bitmap->size + BITS_PER_LONG - 1) >> BITS_PER_LEVEL, 1);
687     for (lev = HBITMAP_LEVELS - 1; lev-- > 0; ) {
688         prev_size = size;
689         size = MAX((size + BITS_PER_LONG - 1) >> BITS_PER_LEVEL, 1);
690         memset(bitmap->levels[lev], 0, size * sizeof(unsigned long));
691 
692         for (i = 0; i < prev_size; ++i) {
693             if (bitmap->levels[lev + 1][i]) {
694                 bitmap->levels[lev][i >> BITS_PER_LEVEL] |=
695                     1UL << (i & (BITS_PER_LONG - 1));
696             }
697         }
698     }
699 
700     bitmap->levels[0][0] |= 1UL << (BITS_PER_LONG - 1);
701     bitmap->count = hb_count_between(bitmap, 0, bitmap->size - 1);
702 }
703 
704 void hbitmap_free(HBitmap *hb)
705 {
706     unsigned i;
707     assert(!hb->meta);
708     for (i = HBITMAP_LEVELS; i-- > 0; ) {
709         g_free(hb->levels[i]);
710     }
711     g_free(hb);
712 }
713 
714 HBitmap *hbitmap_alloc(uint64_t size, int granularity)
715 {
716     HBitmap *hb = g_new0(struct HBitmap, 1);
717     unsigned i;
718 
719     hb->orig_size = size;
720 
721     assert(granularity >= 0 && granularity < 64);
722     size = (size + (1ULL << granularity) - 1) >> granularity;
723     assert(size <= ((uint64_t)1 << HBITMAP_LOG_MAX_SIZE));
724 
725     hb->size = size;
726     hb->granularity = granularity;
727     for (i = HBITMAP_LEVELS; i-- > 0; ) {
728         size = MAX((size + BITS_PER_LONG - 1) >> BITS_PER_LEVEL, 1);
729         hb->sizes[i] = size;
730         hb->levels[i] = g_new0(unsigned long, size);
731     }
732 
733     /* We necessarily have free bits in level 0 due to the definition
734      * of HBITMAP_LEVELS, so use one for a sentinel.  This speeds up
735      * hbitmap_iter_skip_words.
736      */
737     assert(size == 1);
738     hb->levels[0][0] |= 1UL << (BITS_PER_LONG - 1);
739     return hb;
740 }
741 
742 void hbitmap_truncate(HBitmap *hb, uint64_t size)
743 {
744     bool shrink;
745     unsigned i;
746     uint64_t num_elements = size;
747     uint64_t old;
748 
749     hb->orig_size = size;
750 
751     /* Size comes in as logical elements, adjust for granularity. */
752     size = (size + (1ULL << hb->granularity) - 1) >> hb->granularity;
753     assert(size <= ((uint64_t)1 << HBITMAP_LOG_MAX_SIZE));
754     shrink = size < hb->size;
755 
756     /* bit sizes are identical; nothing to do. */
757     if (size == hb->size) {
758         return;
759     }
760 
761     /* If we're losing bits, let's clear those bits before we invalidate all of
762      * our invariants. This helps keep the bitcount consistent, and will prevent
763      * us from carrying around garbage bits beyond the end of the map.
764      */
765     if (shrink) {
766         /* Don't clear partial granularity groups;
767          * start at the first full one. */
768         uint64_t start = ROUND_UP(num_elements, UINT64_C(1) << hb->granularity);
769         uint64_t fix_count = (hb->size << hb->granularity) - start;
770 
771         assert(fix_count);
772         hbitmap_reset(hb, start, fix_count);
773     }
774 
775     hb->size = size;
776     for (i = HBITMAP_LEVELS; i-- > 0; ) {
777         size = MAX(BITS_TO_LONGS(size), 1);
778         if (hb->sizes[i] == size) {
779             break;
780         }
781         old = hb->sizes[i];
782         hb->sizes[i] = size;
783         hb->levels[i] = g_realloc(hb->levels[i], size * sizeof(unsigned long));
784         if (!shrink) {
785             memset(&hb->levels[i][old], 0x00,
786                    (size - old) * sizeof(*hb->levels[i]));
787         }
788     }
789     if (hb->meta) {
790         hbitmap_truncate(hb->meta, hb->size << hb->granularity);
791     }
792 }
793 
794 bool hbitmap_can_merge(const HBitmap *a, const HBitmap *b)
795 {
796     return (a->orig_size == b->orig_size);
797 }
798 
799 /**
800  * hbitmap_sparse_merge: performs dst = dst | src
801  * works with differing granularities.
802  * best used when src is sparsely populated.
803  */
804 static void hbitmap_sparse_merge(HBitmap *dst, const HBitmap *src)
805 {
806     uint64_t offset = 0;
807     uint64_t count = src->orig_size;
808 
809     while (hbitmap_next_dirty_area(src, &offset, &count)) {
810         hbitmap_set(dst, offset, count);
811         offset += count;
812         if (offset >= src->orig_size) {
813             break;
814         }
815         count = src->orig_size - offset;
816     }
817 }
818 
819 /**
820  * Given HBitmaps A and B, let R := A (BITOR) B.
821  * Bitmaps A and B will not be modified,
822  *     except when bitmap R is an alias of A or B.
823  *
824  * @return true if the merge was successful,
825  *         false if it was not attempted.
826  */
827 bool hbitmap_merge(const HBitmap *a, const HBitmap *b, HBitmap *result)
828 {
829     int i;
830     uint64_t j;
831 
832     if (!hbitmap_can_merge(a, b) || !hbitmap_can_merge(a, result)) {
833         return false;
834     }
835     assert(hbitmap_can_merge(b, result));
836 
837     if ((!hbitmap_count(a) && result == b) ||
838         (!hbitmap_count(b) && result == a)) {
839         return true;
840     }
841 
842     if (!hbitmap_count(a) && !hbitmap_count(b)) {
843         hbitmap_reset_all(result);
844         return true;
845     }
846 
847     if (a->granularity != b->granularity) {
848         if ((a != result) && (b != result)) {
849             hbitmap_reset_all(result);
850         }
851         if (a != result) {
852             hbitmap_sparse_merge(result, a);
853         }
854         if (b != result) {
855             hbitmap_sparse_merge(result, b);
856         }
857         return true;
858     }
859 
860     /* This merge is O(size), as BITS_PER_LONG and HBITMAP_LEVELS are constant.
861      * It may be possible to improve running times for sparsely populated maps
862      * by using hbitmap_iter_next, but this is suboptimal for dense maps.
863      */
864     assert(a->size == b->size);
865     for (i = HBITMAP_LEVELS - 1; i >= 0; i--) {
866         for (j = 0; j < a->sizes[i]; j++) {
867             result->levels[i][j] = a->levels[i][j] | b->levels[i][j];
868         }
869     }
870 
871     /* Recompute the dirty count */
872     result->count = hb_count_between(result, 0, result->size - 1);
873 
874     return true;
875 }
876 
877 HBitmap *hbitmap_create_meta(HBitmap *hb, int chunk_size)
878 {
879     assert(!(chunk_size & (chunk_size - 1)));
880     assert(!hb->meta);
881     hb->meta = hbitmap_alloc(hb->size << hb->granularity,
882                              hb->granularity + ctz32(chunk_size));
883     return hb->meta;
884 }
885 
886 void hbitmap_free_meta(HBitmap *hb)
887 {
888     assert(hb->meta);
889     hbitmap_free(hb->meta);
890     hb->meta = NULL;
891 }
892 
893 char *hbitmap_sha256(const HBitmap *bitmap, Error **errp)
894 {
895     size_t size = bitmap->sizes[HBITMAP_LEVELS - 1] * sizeof(unsigned long);
896     char *data = (char *)bitmap->levels[HBITMAP_LEVELS - 1];
897     char *hash = NULL;
898     qcrypto_hash_digest(QCRYPTO_HASH_ALG_SHA256, data, size, &hash, errp);
899 
900     return hash;
901 }
902