Lines Matching +full:cluster +full:- +full:index

4  * Copyright (c) 2004-2006 Fabrice Bellard
26 #include "block/block-io.h"
43 static uint64_t get_refcount_ro0(const void *refcount_array, uint64_t index);
44 static uint64_t get_refcount_ro1(const void *refcount_array, uint64_t index);
45 static uint64_t get_refcount_ro2(const void *refcount_array, uint64_t index);
46 static uint64_t get_refcount_ro3(const void *refcount_array, uint64_t index);
47 static uint64_t get_refcount_ro4(const void *refcount_array, uint64_t index);
48 static uint64_t get_refcount_ro5(const void *refcount_array, uint64_t index);
49 static uint64_t get_refcount_ro6(const void *refcount_array, uint64_t index);
51 static void set_refcount_ro0(void *refcount_array, uint64_t index,
53 static void set_refcount_ro1(void *refcount_array, uint64_t index,
55 static void set_refcount_ro2(void *refcount_array, uint64_t index,
57 static void set_refcount_ro3(void *refcount_array, uint64_t index,
59 static void set_refcount_ro4(void *refcount_array, uint64_t index,
61 static void set_refcount_ro5(void *refcount_array, uint64_t index,
63 static void set_refcount_ro6(void *refcount_array, uint64_t index,
93 unsigned i = s->refcount_table_size - 1; in update_max_refcount_table_index()
94 while (i > 0 && (s->refcount_table[i] & REFT_OFFSET_MASK) == 0) { in update_max_refcount_table_index()
95 i--; in update_max_refcount_table_index()
97 /* Set s->max_refcount_table_index to the index of the last used entry */ in update_max_refcount_table_index()
98 s->max_refcount_table_index = i; in update_max_refcount_table_index()
103 BDRVQcow2State *s = bs->opaque; in qcow2_refcount_init()
107 assert(s->refcount_order >= 0 && s->refcount_order <= 6); in qcow2_refcount_init()
109 s->get_refcount = get_refcount_funcs[s->refcount_order]; in qcow2_refcount_init()
110 s->set_refcount = set_refcount_funcs[s->refcount_order]; in qcow2_refcount_init()
112 assert(s->refcount_table_size <= INT_MAX / REFTABLE_ENTRY_SIZE); in qcow2_refcount_init()
113 refcount_table_size2 = s->refcount_table_size * REFTABLE_ENTRY_SIZE; in qcow2_refcount_init()
114 s->refcount_table = g_try_malloc(refcount_table_size2); in qcow2_refcount_init()
116 if (s->refcount_table_size > 0) { in qcow2_refcount_init()
117 if (s->refcount_table == NULL) { in qcow2_refcount_init()
118 ret = -ENOMEM; in qcow2_refcount_init()
121 BLKDBG_CO_EVENT(bs->file, BLKDBG_REFTABLE_LOAD); in qcow2_refcount_init()
122 ret = bdrv_co_pread(bs->file, s->refcount_table_offset, in qcow2_refcount_init()
123 refcount_table_size2, s->refcount_table, 0); in qcow2_refcount_init()
127 for(i = 0; i < s->refcount_table_size; i++) in qcow2_refcount_init()
128 be64_to_cpus(&s->refcount_table[i]); in qcow2_refcount_init()
138 BDRVQcow2State *s = bs->opaque; in qcow2_refcount_close()
139 g_free(s->refcount_table); in qcow2_refcount_close()
143 static uint64_t get_refcount_ro0(const void *refcount_array, uint64_t index) in get_refcount_ro0() argument
145 return (((const uint8_t *)refcount_array)[index / 8] >> (index % 8)) & 0x1; in get_refcount_ro0()
148 static void set_refcount_ro0(void *refcount_array, uint64_t index, in set_refcount_ro0() argument
152 ((uint8_t *)refcount_array)[index / 8] &= ~(0x1 << (index % 8)); in set_refcount_ro0()
153 ((uint8_t *)refcount_array)[index / 8] |= value << (index % 8); in set_refcount_ro0()
156 static uint64_t get_refcount_ro1(const void *refcount_array, uint64_t index) in get_refcount_ro1() argument
158 return (((const uint8_t *)refcount_array)[index / 4] >> (2 * (index % 4))) in get_refcount_ro1()
162 static void set_refcount_ro1(void *refcount_array, uint64_t index, in set_refcount_ro1() argument
166 ((uint8_t *)refcount_array)[index / 4] &= ~(0x3 << (2 * (index % 4))); in set_refcount_ro1()
167 ((uint8_t *)refcount_array)[index / 4] |= value << (2 * (index % 4)); in set_refcount_ro1()
170 static uint64_t get_refcount_ro2(const void *refcount_array, uint64_t index) in get_refcount_ro2() argument
172 return (((const uint8_t *)refcount_array)[index / 2] >> (4 * (index % 2))) in get_refcount_ro2()
176 static void set_refcount_ro2(void *refcount_array, uint64_t index, in set_refcount_ro2() argument
180 ((uint8_t *)refcount_array)[index / 2] &= ~(0xf << (4 * (index % 2))); in set_refcount_ro2()
181 ((uint8_t *)refcount_array)[index / 2] |= value << (4 * (index % 2)); in set_refcount_ro2()
184 static uint64_t get_refcount_ro3(const void *refcount_array, uint64_t index) in get_refcount_ro3() argument
186 return ((const uint8_t *)refcount_array)[index]; in get_refcount_ro3()
189 static void set_refcount_ro3(void *refcount_array, uint64_t index, in set_refcount_ro3() argument
193 ((uint8_t *)refcount_array)[index] = value; in set_refcount_ro3()
196 static uint64_t get_refcount_ro4(const void *refcount_array, uint64_t index) in get_refcount_ro4() argument
198 return be16_to_cpu(((const uint16_t *)refcount_array)[index]); in get_refcount_ro4()
201 static void set_refcount_ro4(void *refcount_array, uint64_t index, in set_refcount_ro4() argument
205 ((uint16_t *)refcount_array)[index] = cpu_to_be16(value); in set_refcount_ro4()
208 static uint64_t get_refcount_ro5(const void *refcount_array, uint64_t index) in get_refcount_ro5() argument
210 return be32_to_cpu(((const uint32_t *)refcount_array)[index]); in get_refcount_ro5()
213 static void set_refcount_ro5(void *refcount_array, uint64_t index, in set_refcount_ro5() argument
217 ((uint32_t *)refcount_array)[index] = cpu_to_be32(value); in set_refcount_ro5()
220 static uint64_t get_refcount_ro6(const void *refcount_array, uint64_t index) in get_refcount_ro6() argument
222 return be64_to_cpu(((const uint64_t *)refcount_array)[index]); in get_refcount_ro6()
225 static void set_refcount_ro6(void *refcount_array, uint64_t index, in set_refcount_ro6() argument
228 ((uint64_t *)refcount_array)[index] = cpu_to_be64(value); in set_refcount_ro6()
236 BDRVQcow2State *s = bs->opaque; in load_refcount_block()
238 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_LOAD); in load_refcount_block()
239 return qcow2_cache_get(bs, s->refcount_block_cache, refcount_block_offset, in load_refcount_block()
244 * Retrieves the refcount of the cluster given by its index and stores it in
245 * *refcount. Returns 0 on success and -errno on failure.
250 BDRVQcow2State *s = bs->opaque; in qcow2_get_refcount()
256 refcount_table_index = cluster_index >> s->refcount_block_bits; in qcow2_get_refcount()
257 if (refcount_table_index >= s->refcount_table_size) { in qcow2_get_refcount()
262 s->refcount_table[refcount_table_index] & REFT_OFFSET_MASK; in qcow2_get_refcount()
269 qcow2_signal_corruption(bs, true, -1, -1, "Refblock offset %#" PRIx64 in qcow2_get_refcount()
270 " unaligned (reftable index: %#" PRIx64 ")", in qcow2_get_refcount()
272 return -EIO; in qcow2_get_refcount()
275 ret = qcow2_cache_get(bs, s->refcount_block_cache, refcount_block_offset, in qcow2_get_refcount()
281 block_index = cluster_index & (s->refcount_block_size - 1); in qcow2_get_refcount()
282 *refcount = s->get_refcount(refcount_block, block_index); in qcow2_get_refcount()
284 qcow2_cache_put(s->refcount_block_cache, &refcount_block); in qcow2_get_refcount()
293 uint64_t block_a = offset_a >> (s->cluster_bits + s->refcount_block_bits); in in_same_refcount_block()
294 uint64_t block_b = offset_b >> (s->cluster_bits + s->refcount_block_bits); in in_same_refcount_block()
303 * Returns 0 on success or -errno in error case
309 BDRVQcow2State *s = bs->opaque; in alloc_refcount_block()
313 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC); in alloc_refcount_block()
315 /* Find the refcount block for the given cluster */ in alloc_refcount_block()
316 refcount_table_index = cluster_index >> s->refcount_block_bits; in alloc_refcount_block()
318 if (refcount_table_index < s->refcount_table_size) { in alloc_refcount_block()
321 s->refcount_table[refcount_table_index] & REFT_OFFSET_MASK; in alloc_refcount_block()
326 qcow2_signal_corruption(bs, true, -1, -1, "Refblock offset %#" in alloc_refcount_block()
327 PRIx64 " unaligned (reftable index: " in alloc_refcount_block()
330 return -EIO; in alloc_refcount_block()
340 * a cluster for the new refcount block. It may also include a new refcount in alloc_refcount_block()
345 * - We can't use the normal qcow2_alloc_clusters(), it would try to in alloc_refcount_block()
350 * - We need to consider that at this point we are inside update_refcounts in alloc_refcount_block()
354 * need to return -EAGAIN to signal the caller that it needs to restart in alloc_refcount_block()
357 * - alloc_clusters_noref and qcow2_free_clusters may load a different in alloc_refcount_block()
364 ret = qcow2_cache_flush(bs, s->l2_table_cache); in alloc_refcount_block()
370 int64_t new_block = alloc_clusters_noref(bs, s->cluster_size, INT64_MAX); in alloc_refcount_block()
380 qcow2_signal_corruption(bs, true, -1, -1, "Preventing invalid " in alloc_refcount_block()
382 return -EIO; in alloc_refcount_block()
388 refcount_table_index, cluster_index << s->cluster_bits, new_block); in alloc_refcount_block()
391 if (in_same_refcount_block(s, new_block, cluster_index << s->cluster_bits)) { in alloc_refcount_block()
393 ret = qcow2_cache_get_empty(bs, s->refcount_block_cache, new_block, in alloc_refcount_block()
399 memset(*refcount_block, 0, s->cluster_size); in alloc_refcount_block()
402 int block_index = (new_block >> s->cluster_bits) & in alloc_refcount_block()
403 (s->refcount_block_size - 1); in alloc_refcount_block()
404 s->set_refcount(*refcount_block, block_index, 1); in alloc_refcount_block()
408 ret = update_refcount(bs, new_block, s->cluster_size, 1, false, in alloc_refcount_block()
414 ret = qcow2_cache_flush(bs, s->refcount_block_cache); in alloc_refcount_block()
421 ret = qcow2_cache_get_empty(bs, s->refcount_block_cache, new_block, in alloc_refcount_block()
427 memset(*refcount_block, 0, s->cluster_size); in alloc_refcount_block()
431 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_WRITE); in alloc_refcount_block()
432 qcow2_cache_entry_mark_dirty(s->refcount_block_cache, *refcount_block); in alloc_refcount_block()
433 ret = qcow2_cache_flush(bs, s->refcount_block_cache); in alloc_refcount_block()
439 if (refcount_table_index < s->refcount_table_size) { in alloc_refcount_block()
441 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_HOOKUP); in alloc_refcount_block()
442 ret = bdrv_pwrite_sync(bs->file, s->refcount_table_offset + in alloc_refcount_block()
449 s->refcount_table[refcount_table_index] = new_block; in alloc_refcount_block()
450 /* If there's a hole in s->refcount_table then it can happen in alloc_refcount_block()
451 * that refcount_table_index < s->max_refcount_table_index */ in alloc_refcount_block()
452 s->max_refcount_table_index = in alloc_refcount_block()
453 MAX(s->max_refcount_table_index, refcount_table_index); in alloc_refcount_block()
457 return -EAGAIN; in alloc_refcount_block()
460 qcow2_cache_put(s->refcount_block_cache, refcount_block); in alloc_refcount_block()
473 BLKDBG_EVENT(bs->file, BLKDBG_REFTABLE_GROW); in alloc_refcount_block()
476 * basis for calculating the index of the first cluster used for the in alloc_refcount_block()
477 * self-describing refcount structures which we are about to create. in alloc_refcount_block()
481 * allocated to describe that cluster (and it will assume this role later in alloc_refcount_block()
482 * on), we cannot use that index; also, new_block may actually have a higher in alloc_refcount_block()
483 * cluster index than cluster_index, so it needs to be taken into account in alloc_refcount_block()
484 * here (and 1 needs to be added to its value because that cluster is used). in alloc_refcount_block()
487 (new_block >> s->cluster_bits) + 1), in alloc_refcount_block()
488 s->refcount_block_size); in alloc_refcount_block()
491 uint64_t meta_offset = (blocks_used * s->refcount_block_size) * in alloc_refcount_block()
492 s->cluster_size; in alloc_refcount_block()
505 /* If we were trying to do the initial refcount update for some cluster in alloc_refcount_block()
508 return -EAGAIN; in alloc_refcount_block()
512 qcow2_cache_put(s->refcount_block_cache, refcount_block); in alloc_refcount_block()
518 * Starting at @start_offset, this function creates new self-covering refcount
526 * block that should be entered into the new refcount table at index
530 * @additional_clusters may be placed) on success, -errno on error.
537 BDRVQcow2State *s = bs->opaque; in qcow2_refcount_area()
545 assert(!(start_offset % s->cluster_size)); in qcow2_refcount_area()
547 qcow2_refcount_metadata_size(start_offset / s->cluster_size + in qcow2_refcount_area()
549 s->cluster_size, s->refcount_order, in qcow2_refcount_area()
552 return -EFBIG; in qcow2_refcount_area()
556 /* Index in the refcount table of the first refcount block to cover the area in qcow2_refcount_area()
560 area_reftable_index = (start_offset / s->cluster_size) / in qcow2_refcount_area()
561 s->refcount_block_size; in qcow2_refcount_area()
570 table_size = ROUND_UP(table_size, s->cluster_size / REFTABLE_ENTRY_SIZE); in qcow2_refcount_area()
571 table_clusters = (table_size * REFTABLE_ENTRY_SIZE) / s->cluster_size; in qcow2_refcount_area()
574 return -EFBIG; in qcow2_refcount_area()
581 ret = -ENOMEM; in qcow2_refcount_area()
586 if (table_size > s->max_refcount_table_index) { in qcow2_refcount_area()
588 memcpy(new_table, s->refcount_table, in qcow2_refcount_area()
589 (s->max_refcount_table_index + 1) * REFTABLE_ENTRY_SIZE); in qcow2_refcount_area()
595 memcpy(new_table, s->refcount_table, table_size * REFTABLE_ENTRY_SIZE); in qcow2_refcount_area()
611 table_offset = start_offset + additional_refblock_count * s->cluster_size; in qcow2_refcount_area()
612 end_offset = table_offset + table_clusters * s->cluster_size; in qcow2_refcount_area()
622 ret = qcow2_cache_get(bs, s->refcount_block_cache, new_table[i], in qcow2_refcount_area()
628 ret = qcow2_cache_get_empty(bs, s->refcount_block_cache, in qcow2_refcount_area()
633 memset(refblock_data, 0, s->cluster_size); in qcow2_refcount_area()
634 qcow2_cache_entry_mark_dirty(s->refcount_block_cache, in qcow2_refcount_area()
638 block_offset += s->cluster_size; in qcow2_refcount_area()
642 first_offset_covered = (uint64_t)i * s->refcount_block_size * in qcow2_refcount_area()
643 s->cluster_size; in qcow2_refcount_area()
651 j = (start_offset - first_offset_covered) / s->cluster_size; in qcow2_refcount_area()
652 assert(j < s->refcount_block_size); in qcow2_refcount_area()
657 end_index = MIN((end_offset - first_offset_covered) / in qcow2_refcount_area()
658 s->cluster_size, in qcow2_refcount_area()
659 s->refcount_block_size); in qcow2_refcount_area()
663 assert(s->get_refcount(refblock_data, j) == 0); in qcow2_refcount_area()
664 s->set_refcount(refblock_data, j, 1); in qcow2_refcount_area()
667 qcow2_cache_entry_mark_dirty(s->refcount_block_cache, in qcow2_refcount_area()
671 qcow2_cache_put(s->refcount_block_cache, &refblock_data); in qcow2_refcount_area()
677 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_WRITE_BLOCKS); in qcow2_refcount_area()
678 ret = qcow2_cache_flush(bs, s->refcount_block_cache); in qcow2_refcount_area()
688 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_WRITE_TABLE); in qcow2_refcount_area()
689 ret = bdrv_pwrite_sync(bs->file, table_offset, in qcow2_refcount_area()
706 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_SWITCH_TABLE); in qcow2_refcount_area()
707 ret = bdrv_pwrite_sync(bs->file, in qcow2_refcount_area()
715 uint64_t old_table_offset = s->refcount_table_offset; in qcow2_refcount_area()
716 uint64_t old_table_size = s->refcount_table_size; in qcow2_refcount_area()
718 g_free(s->refcount_table); in qcow2_refcount_area()
719 s->refcount_table = new_table; in qcow2_refcount_area()
720 s->refcount_table_size = table_size; in qcow2_refcount_area()
721 s->refcount_table_offset = table_offset; in qcow2_refcount_area()
738 BDRVQcow2State *s = bs->opaque; in qcow2_process_discards()
741 QTAILQ_FOREACH_SAFE(d, &s->discards, next, next) { in qcow2_process_discards()
742 QTAILQ_REMOVE(&s->discards, d, next); in qcow2_process_discards()
746 int r2 = bdrv_pdiscard(bs->file, d->offset, d->bytes); in qcow2_process_discards()
748 trace_qcow2_process_discards_failed_region(d->offset, d->bytes, in qcow2_process_discards()
760 BDRVQcow2State *s = bs->opaque; in update_refcount_discard()
763 QTAILQ_FOREACH(d, &s->discards, next) { in update_refcount_discard()
764 uint64_t new_start = MIN(offset, d->offset); in update_refcount_discard()
765 uint64_t new_end = MAX(offset + length, d->offset + d->bytes); in update_refcount_discard()
767 if (new_end - new_start <= length + d->bytes) { in update_refcount_discard()
771 assert(d->bytes + length == new_end - new_start); in update_refcount_discard()
772 d->offset = new_start; in update_refcount_discard()
773 d->bytes = new_end - new_start; in update_refcount_discard()
784 QTAILQ_INSERT_TAIL(&s->discards, d, next); in update_refcount_discard()
788 QTAILQ_FOREACH_SAFE(p, &s->discards, next, next) { in update_refcount_discard()
790 || p->offset > d->offset + d->bytes in update_refcount_discard()
791 || d->offset > p->offset + p->bytes) in update_refcount_discard()
797 assert(p->offset == d->offset + d->bytes in update_refcount_discard()
798 || d->offset == p->offset + p->bytes); in update_refcount_discard()
800 QTAILQ_REMOVE(&s->discards, p, next); in update_refcount_discard()
801 d->offset = MIN(d->offset, p->offset); in update_refcount_discard()
802 d->bytes += p->bytes; in update_refcount_discard()
814 BDRVQcow2State *s = bs->opaque; in update_refcount()
817 int64_t old_table_index = -1; in update_refcount()
822 " addend=%s%" PRIu64 "\n", offset, length, decrease ? "-" : "", in update_refcount()
826 return -EINVAL; in update_refcount()
832 qcow2_cache_set_dependency(bs, s->refcount_block_cache, in update_refcount()
833 s->l2_table_cache); in update_refcount()
837 last = start_of_cluster(s, offset + length - 1); in update_refcount()
839 cluster_offset += s->cluster_size) in update_refcount()
843 int64_t cluster_index = cluster_offset >> s->cluster_bits; in update_refcount()
844 int64_t table_index = cluster_index >> s->refcount_block_bits; in update_refcount()
849 qcow2_cache_put(s->refcount_block_cache, &refcount_block); in update_refcount()
854 if (ret == -EAGAIN) { in update_refcount()
855 if (s->free_cluster_index > (start >> s->cluster_bits)) { in update_refcount()
856 s->free_cluster_index = (start >> s->cluster_bits); in update_refcount()
865 qcow2_cache_entry_mark_dirty(s->refcount_block_cache, refcount_block); in update_refcount()
868 block_index = cluster_index & (s->refcount_block_size - 1); in update_refcount()
870 refcount = s->get_refcount(refcount_block, block_index); in update_refcount()
871 if (decrease ? (refcount - addend > refcount) in update_refcount()
873 refcount + addend > s->refcount_max)) in update_refcount()
875 ret = -EINVAL; in update_refcount()
879 refcount -= addend; in update_refcount()
883 if (refcount == 0 && cluster_index < s->free_cluster_index) { in update_refcount()
884 s->free_cluster_index = cluster_index; in update_refcount()
886 s->set_refcount(refcount_block, block_index, refcount); in update_refcount()
891 table = qcow2_cache_is_table_offset(s->refcount_block_cache, in update_refcount()
894 qcow2_cache_put(s->refcount_block_cache, &refcount_block); in update_refcount()
895 old_table_index = -1; in update_refcount()
896 qcow2_cache_discard(s->refcount_block_cache, table); in update_refcount()
899 table = qcow2_cache_is_table_offset(s->l2_table_cache, offset); in update_refcount()
901 qcow2_cache_discard(s->l2_table_cache, table); in update_refcount()
904 if (s->discard_passthrough[type]) { in update_refcount()
905 update_refcount_discard(bs, cluster_offset, s->cluster_size); in update_refcount()
912 if (!s->cache_discards) { in update_refcount()
918 qcow2_cache_put(s->refcount_block_cache, &refcount_block); in update_refcount()
927 dummy = update_refcount(bs, offset, cluster_offset - offset, addend, in update_refcount()
936 * Increases or decreases the refcount of a given cluster.
941 * On success 0 is returned; on failure -errno is returned.
948 BDRVQcow2State *s = bs->opaque; in qcow2_update_cluster_refcount()
951 ret = update_refcount(bs, cluster_index << s->cluster_bits, 1, addend, in qcow2_update_cluster_refcount()
963 /* cluster allocation functions */
971 BDRVQcow2State *s = bs->opaque; in alloc_clusters_noref()
976 if (s->cache_discards) { in alloc_clusters_noref()
983 uint64_t next_cluster_index = s->free_cluster_index++; in alloc_clusters_noref()
995 if (s->free_cluster_index > 0 && in alloc_clusters_noref()
996 s->free_cluster_index - 1 > (max >> s->cluster_bits)) in alloc_clusters_noref()
998 return -EFBIG; in alloc_clusters_noref()
1002 fprintf(stderr, "alloc_clusters: size=%" PRId64 " -> %" PRId64 "\n", in alloc_clusters_noref()
1004 (s->free_cluster_index - nb_clusters) << s->cluster_bits); in alloc_clusters_noref()
1006 return (s->free_cluster_index - nb_clusters) << s->cluster_bits; in alloc_clusters_noref()
1014 BLKDBG_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC); in qcow2_alloc_clusters()
1022 } while (ret == -EAGAIN); in qcow2_alloc_clusters()
1034 BDRVQcow2State *s = bs->opaque; in qcow2_alloc_clusters_at()
1046 cluster_index = offset >> s->cluster_bits; in qcow2_alloc_clusters_at()
1057 ret = update_refcount(bs, offset, i << s->cluster_bits, 1, false, in qcow2_alloc_clusters_at()
1059 } while (ret == -EAGAIN); in qcow2_alloc_clusters_at()
1072 BDRVQcow2State *s = bs->opaque; in qcow2_alloc_bytes()
1077 BLKDBG_CO_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC_BYTES); in qcow2_alloc_bytes()
1078 assert(size > 0 && size <= s->cluster_size); in qcow2_alloc_bytes()
1079 assert(!s->free_byte_offset || offset_into_cluster(s, s->free_byte_offset)); in qcow2_alloc_bytes()
1081 offset = s->free_byte_offset; in qcow2_alloc_bytes()
1085 ret = qcow2_get_refcount(bs, offset >> s->cluster_bits, &refcount); in qcow2_alloc_bytes()
1090 if (refcount == s->refcount_max) { in qcow2_alloc_bytes()
1095 free_in_cluster = s->cluster_size - offset_into_cluster(s, offset); in qcow2_alloc_bytes()
1100 new_cluster = alloc_clusters_noref(bs, s->cluster_size, in qcow2_alloc_bytes()
1101 MIN(s->cluster_offset_mask, in qcow2_alloc_bytes()
1108 qcow2_signal_corruption(bs, true, -1, -1, "Preventing invalid " in qcow2_alloc_bytes()
1109 "allocation of compressed cluster " in qcow2_alloc_bytes()
1111 return -EIO; in qcow2_alloc_bytes()
1114 if (!offset || ROUND_UP(offset, s->cluster_size) != new_cluster) { in qcow2_alloc_bytes()
1116 free_in_cluster = s->cluster_size; in qcow2_alloc_bytes()
1118 free_in_cluster += s->cluster_size; in qcow2_alloc_bytes()
1127 } while (ret == -EAGAIN); in qcow2_alloc_bytes()
1132 /* The cluster refcount was incremented; refcount blocks must be flushed in qcow2_alloc_bytes()
1134 qcow2_cache_set_dependency(bs, s->l2_table_cache, s->refcount_block_cache); in qcow2_alloc_bytes()
1136 s->free_byte_offset = offset + size; in qcow2_alloc_bytes()
1137 if (!offset_into_cluster(s, s->free_byte_offset)) { in qcow2_alloc_bytes()
1138 s->free_byte_offset = 0; in qcow2_alloc_bytes()
1150 BLKDBG_EVENT(bs->file, BLKDBG_CLUSTER_FREE); in qcow2_free_clusters()
1153 fprintf(stderr, "qcow2_free_clusters failed: %s\n", strerror(-ret)); in qcow2_free_clusters()
1159 * Free a cluster using its L2 entry (handles clusters of all types, e.g.
1160 * normal cluster, compressed cluster, etc.)
1165 BDRVQcow2State *s = bs->opaque; in qcow2_free_any_cluster()
1169 if (s->discard_passthrough[type] && in qcow2_free_any_cluster()
1173 bdrv_pdiscard(s->data_file, l2_entry & L2E_OFFSET_MASK, in qcow2_free_any_cluster()
1174 s->cluster_size); in qcow2_free_any_cluster()
1192 qcow2_signal_corruption(bs, false, -1, -1, in qcow2_free_any_cluster()
1193 "Cannot free unaligned cluster %#llx", in qcow2_free_any_cluster()
1197 s->cluster_size, type); in qcow2_free_any_cluster()
1210 BDRVQcow2State *s = bs->opaque; in qcow2_write_caches()
1213 ret = qcow2_cache_write(bs, s->l2_table_cache); in qcow2_write_caches()
1219 ret = qcow2_cache_write(bs, s->refcount_block_cache); in qcow2_write_caches()
1235 return bdrv_flush(bs->file->bs); in qcow2_flush_caches()
1247 BDRVQcow2State *s = bs->opaque; in qcow2_update_snapshot_refcount()
1255 assert(addend >= -1 && addend <= 1); in qcow2_update_snapshot_refcount()
1260 slice_size2 = s->l2_slice_size * l2_entry_size(s); in qcow2_update_snapshot_refcount()
1261 n_slices = s->cluster_size / slice_size2; in qcow2_update_snapshot_refcount()
1263 s->cache_discards = true; in qcow2_update_snapshot_refcount()
1266 * l1_table_offset when it is the current s->l1_table_offset! Be careful in qcow2_update_snapshot_refcount()
1268 if (l1_table_offset != s->l1_table_offset) { in qcow2_update_snapshot_refcount()
1271 ret = -ENOMEM; in qcow2_update_snapshot_refcount()
1276 ret = bdrv_pread(bs->file, l1_table_offset, l1_size2, l1_table, 0); in qcow2_update_snapshot_refcount()
1285 assert(l1_size == s->l1_size); in qcow2_update_snapshot_refcount()
1286 l1_table = s->l1_table; in qcow2_update_snapshot_refcount()
1297 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" in qcow2_update_snapshot_refcount()
1298 PRIx64 " unaligned (L1 index: %#x)", in qcow2_update_snapshot_refcount()
1300 ret = -EIO; in qcow2_update_snapshot_refcount()
1305 ret = qcow2_cache_get(bs, s->l2_table_cache, in qcow2_update_snapshot_refcount()
1312 for (j = 0; j < s->l2_slice_size; j++) { in qcow2_update_snapshot_refcount()
1344 /* Here l2_index means table (not slice) index */ in qcow2_update_snapshot_refcount()
1345 int l2_index = slice * s->l2_slice_size + j; in qcow2_update_snapshot_refcount()
1347 bs, true, -1, -1, "Cluster " in qcow2_update_snapshot_refcount()
1350 PRIx64 ", L2 index: %#x)", in qcow2_update_snapshot_refcount()
1352 ret = -EIO; in qcow2_update_snapshot_refcount()
1356 cluster_index = offset >> s->cluster_bits; in qcow2_update_snapshot_refcount()
1387 qcow2_cache_set_dependency(bs, s->l2_table_cache, in qcow2_update_snapshot_refcount()
1388 s->refcount_block_cache); in qcow2_update_snapshot_refcount()
1391 qcow2_cache_entry_mark_dirty(s->l2_table_cache, in qcow2_update_snapshot_refcount()
1396 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); in qcow2_update_snapshot_refcount()
1401 s->cluster_bits, in qcow2_update_snapshot_refcount()
1408 ret = qcow2_get_refcount(bs, l2_offset >> s->cluster_bits, in qcow2_update_snapshot_refcount()
1425 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); in qcow2_update_snapshot_refcount()
1428 s->cache_discards = false; in qcow2_update_snapshot_refcount()
1431 /* Update L1 only if it isn't deleted anyway (addend = -1) */ in qcow2_update_snapshot_refcount()
1437 ret = bdrv_pwrite_sync(bs->file, l1_table_offset, l1_size2, l1_table, in qcow2_update_snapshot_refcount()
1459 * 2^(64 - 9) clusters at once (with cluster size 512 = 2^9, and because in refcount_array_byte_size()
1460 * offsets have to be representable in bytes); due to every cluster in refcount_array_byte_size()
1462 assert(entries < (UINT64_C(1) << (64 - 9))); in refcount_array_byte_size()
1465 * s->refcount_order < 7. in refcount_array_byte_size()
1466 * (note: x << s->refcount_order == x * s->refcount_bits) */ in refcount_array_byte_size()
1467 return DIV_ROUND_UP(entries << s->refcount_order, 8); in refcount_array_byte_size()
1473 * and *size will not be modified and -errno will be returned. If the
1476 * refcount array buffer will be aligned to a cluster boundary, and the newly
1487 * s->cluster_size; in realloc_refcount_array()
1489 * s->cluster_size; in realloc_refcount_array()
1499 return -ENOMEM; in realloc_refcount_array()
1504 return -ENOMEM; in realloc_refcount_array()
1509 new_byte_size - old_byte_size); in realloc_refcount_array()
1531 BDRVQcow2State *s = bs->opaque; in qcow2_inc_refcounts_imrt()
1540 file_len = bdrv_co_getlength(bs->file->bs); in qcow2_inc_refcounts_imrt()
1546 * Last cluster of qcow2 image may be semi-allocated, so it may be OK to in qcow2_inc_refcounts_imrt()
1548 * cluster. in qcow2_inc_refcounts_imrt()
1550 if (offset + size - file_len >= s->cluster_size) { in qcow2_inc_refcounts_imrt()
1552 "end of the file by one cluster or more: offset 0x%" PRIx64 in qcow2_inc_refcounts_imrt()
1554 res->corruptions++; in qcow2_inc_refcounts_imrt()
1559 last = start_of_cluster(s, offset + size - 1); in qcow2_inc_refcounts_imrt()
1561 cluster_offset += s->cluster_size) { in qcow2_inc_refcounts_imrt()
1562 k = cluster_offset >> s->cluster_bits; in qcow2_inc_refcounts_imrt()
1567 res->check_errors++; in qcow2_inc_refcounts_imrt()
1572 refcount = s->get_refcount(*refcount_table, k); in qcow2_inc_refcounts_imrt()
1573 if (refcount == s->refcount_max) { in qcow2_inc_refcounts_imrt()
1574 fprintf(stderr, "ERROR: overflow cluster offset=0x%" PRIx64 in qcow2_inc_refcounts_imrt()
1576 fprintf(stderr, "Use qemu-img amend to increase the refcount entry " in qcow2_inc_refcounts_imrt()
1577 "width or qemu-img convert to create a clean copy if the " in qcow2_inc_refcounts_imrt()
1579 res->corruptions++; in qcow2_inc_refcounts_imrt()
1582 s->set_refcount(*refcount_table, k, refcount + 1); in qcow2_inc_refcounts_imrt()
1597 * This function decrements res->corruptions on success, so the caller is
1598 * responsible to increment res->corruptions prior to the call.
1600 * On failure in-memory @l2_table may be modified.
1608 BDRVQcow2State *s = bs->opaque; in fix_l2_entry_by_zero()
1637 ret = bdrv_co_pwrite_sync(bs->file, l2e_offset, l2_entry_size(s), in fix_l2_entry_by_zero()
1641 "table entry: %s\n", strerror(-ret)); in fix_l2_entry_by_zero()
1645 res->corruptions--; in fix_l2_entry_by_zero()
1646 res->corruptions_fixed++; in fix_l2_entry_by_zero()
1650 res->check_errors++; in fix_l2_entry_by_zero()
1659 * Returns the number of errors found by the checks or -errno if an internal
1668 BDRVQcow2State *s = bs->opaque; in check_refcounts_l2()
1672 size_t l2_size_bytes = s->l2_size * l2_entry_size(s); in check_refcounts_l2()
1677 ret = bdrv_co_pread(bs->file, l2_offset, l2_size_bytes, l2_table, 0); in check_refcounts_l2()
1680 res->check_errors++; in check_refcounts_l2()
1685 for (i = 0; i < s->l2_size; i++) { in check_refcounts_l2()
1695 /* Check reserved bits of Standard Cluster Descriptor */ in check_refcounts_l2()
1699 res->corruptions++; in check_refcounts_l2()
1709 "clusters\n", l2_entry & s->cluster_offset_mask); in check_refcounts_l2()
1711 res->corruptions++; in check_refcounts_l2()
1715 fprintf(stderr, "ERROR compressed cluster %d with data file, " in check_refcounts_l2()
1717 res->corruptions++; in check_refcounts_l2()
1722 fprintf(stderr, "ERROR compressed cluster %d with non-zero " in check_refcounts_l2()
1725 res->corruptions++; in check_refcounts_l2()
1729 /* Mark cluster as used */ in check_refcounts_l2()
1738 res->bfi.allocated_clusters++; in check_refcounts_l2()
1739 res->bfi.compressed_clusters++; in check_refcounts_l2()
1743 * take up sub-sector space but we only have sector granularity in check_refcounts_l2()
1744 * I/O we need to re-read the same sectors even for adjacent in check_refcounts_l2()
1747 res->bfi.fragmented_clusters++; in check_refcounts_l2()
1757 res->corruptions++; in check_refcounts_l2()
1759 "cluster has corrupted subcluster allocation bitmap\n", in check_refcounts_l2()
1763 /* Correct offsets are cluster aligned */ in check_refcounts_l2()
1766 res->corruptions++; in check_refcounts_l2()
1776 "cluster is not properly aligned; L2 entry " in check_refcounts_l2()
1794 * Skip marking the cluster as used in check_refcounts_l2()
1807 fprintf(stderr, "ERROR offset=%" PRIx64 ": Data cluster is " in check_refcounts_l2()
1813 res->bfi.allocated_clusters++; in check_refcounts_l2()
1816 res->bfi.fragmented_clusters++; in check_refcounts_l2()
1818 next_contiguous_offset = offset + s->cluster_size; in check_refcounts_l2()
1821 /* Mark cluster as used */ in check_refcounts_l2()
1825 offset, s->cluster_size); in check_refcounts_l2()
1840 res->corruptions++; in check_refcounts_l2()
1842 "cluster has non-zero subcluster allocation map\n"); in check_refcounts_l2()
1859 * Returns the number of errors found by the checks or -errno if an internal
1868 BDRVQcow2State *s = bs->opaque; in check_refcounts_l1()
1887 res->check_errors++; in check_refcounts_l1()
1888 return -ENOMEM; in check_refcounts_l1()
1892 ret = bdrv_co_pread(bs->file, l1_table_offset, l1_size_bytes, l1_table, 0); in check_refcounts_l1()
1895 res->check_errors++; in check_refcounts_l1()
1912 res->corruptions++; in check_refcounts_l1()
1920 l2_offset, s->cluster_size); in check_refcounts_l1()
1925 /* L2 tables are cluster aligned */ in check_refcounts_l1()
1928 "cluster aligned; L1 entry corrupted\n", l2_offset); in check_refcounts_l1()
1929 res->corruptions++; in check_refcounts_l1()
1955 BDRVQcow2State *s = bs->opaque; in check_oflag_copied()
1956 uint64_t *l2_table = qemu_blockalign(bs, s->cluster_size); in check_oflag_copied()
1969 repair = !res->check_errors && !res->corruptions && !res->leaks; in check_oflag_copied()
1974 for (i = 0; i < s->l1_size; i++) { in check_oflag_copied()
1975 uint64_t l1_entry = s->l1_table[i]; in check_oflag_copied()
1983 ret = qcow2_get_refcount(bs, l2_offset >> s->cluster_bits, in check_oflag_copied()
1990 res->corruptions++; in check_oflag_copied()
1991 fprintf(stderr, "%s OFLAG_COPIED L2 cluster: l1_index=%d " in check_oflag_copied()
1995 s->l1_table[i] = refcount == 1 in check_oflag_copied()
2000 res->check_errors++; in check_oflag_copied()
2003 res->corruptions--; in check_oflag_copied()
2004 res->corruptions_fixed++; in check_oflag_copied()
2008 ret = bdrv_co_pread(bs->file, l2_offset, s->l2_size * l2_entry_size(s), in check_oflag_copied()
2012 strerror(-ret)); in check_oflag_copied()
2013 res->check_errors++; in check_oflag_copied()
2017 for (j = 0; j < s->l2_size; j++) { in check_oflag_copied()
2028 data_offset >> s->cluster_bits, in check_oflag_copied()
2036 res->corruptions++; in check_oflag_copied()
2037 fprintf(stderr, "%s OFLAG_COPIED data cluster: " in check_oflag_copied()
2053 l2_offset, s->cluster_size, in check_oflag_copied()
2057 "overlap check failed: %s\n", strerror(-ret)); in check_oflag_copied()
2058 res->check_errors++; in check_oflag_copied()
2062 ret = bdrv_co_pwrite(bs->file, l2_offset, s->cluster_size, l2_table, 0); in check_oflag_copied()
2065 strerror(-ret)); in check_oflag_copied()
2066 res->check_errors++; in check_oflag_copied()
2069 res->corruptions -= l2_dirty; in check_oflag_copied()
2070 res->corruptions_fixed += l2_dirty; in check_oflag_copied()
2090 BDRVQcow2State *s = bs->opaque; in check_refblocks()
2094 for(i = 0; i < s->refcount_table_size; i++) { in check_refblocks()
2095 uint64_t offset, cluster; in check_refblocks() local
2096 offset = s->refcount_table[i] & REFT_OFFSET_MASK; in check_refblocks()
2097 cluster = offset >> s->cluster_bits; in check_refblocks()
2099 if (s->refcount_table[i] & REFT_RESERVED_MASK) { in check_refblocks()
2102 res->corruptions++; in check_refblocks()
2107 /* Refcount blocks are cluster aligned */ in check_refblocks()
2110 "cluster aligned; refcount table entry corrupted\n", i); in check_refblocks()
2111 res->corruptions++; in check_refblocks()
2116 if (cluster >= *nb_clusters) { in check_refblocks()
2117 res->corruptions++; in check_refblocks()
2125 if (offset > INT64_MAX - s->cluster_size) { in check_refblocks()
2126 ret = -EINVAL; in check_refblocks()
2130 ret = bdrv_co_truncate(bs->file, offset + s->cluster_size, false, in check_refblocks()
2136 size = bdrv_co_getlength(bs->file->bs); in check_refblocks()
2148 res->check_errors++; in check_refblocks()
2152 if (cluster >= *nb_clusters) { in check_refblocks()
2153 ret = -EINVAL; in check_refblocks()
2157 res->corruptions--; in check_refblocks()
2158 res->corruptions_fixed++; in check_refblocks()
2161 offset, s->cluster_size); in check_refblocks()
2173 strerror(-ret)); in check_refblocks()
2180 offset, s->cluster_size); in check_refblocks()
2184 if (s->get_refcount(*refcount_table, cluster) != 1) { in check_refblocks()
2187 s->get_refcount(*refcount_table, cluster)); in check_refblocks()
2188 res->corruptions++; in check_refblocks()
2198 * Calculates an in-memory refcount table.
2205 BDRVQcow2State *s = bs->opaque; in calculate_refcounts()
2215 res->check_errors++; in calculate_refcounts()
2222 0, s->cluster_size); in calculate_refcounts()
2229 s->l1_table_offset, s->l1_size, CHECK_FRAG_INFO, in calculate_refcounts()
2236 if (has_data_file(bs) && s->nb_snapshots) { in calculate_refcounts()
2238 s->nb_snapshots); in calculate_refcounts()
2239 res->corruptions++; in calculate_refcounts()
2242 for (i = 0; i < s->nb_snapshots; i++) { in calculate_refcounts()
2243 sn = s->snapshots + i; in calculate_refcounts()
2244 if (offset_into_cluster(s, sn->l1_table_offset)) { in calculate_refcounts()
2246 "L1 table is not cluster aligned; snapshot table entry " in calculate_refcounts()
2247 "corrupted\n", sn->id_str, sn->name, sn->l1_table_offset); in calculate_refcounts()
2248 res->corruptions++; in calculate_refcounts()
2251 if (sn->l1_size > QCOW_MAX_L1_SIZE / L1E_SIZE) { in calculate_refcounts()
2254 sn->id_str, sn->name, sn->l1_size); in calculate_refcounts()
2255 res->corruptions++; in calculate_refcounts()
2259 sn->l1_table_offset, sn->l1_size, 0, fix, in calculate_refcounts()
2266 s->snapshots_offset, s->snapshots_size); in calculate_refcounts()
2273 s->refcount_table_offset, in calculate_refcounts()
2274 s->refcount_table_size * in calculate_refcounts()
2281 if (s->crypto_header.length) { in calculate_refcounts()
2283 s->crypto_header.offset, in calculate_refcounts()
2284 s->crypto_header.length); in calculate_refcounts()
2300 * Compares the actual reference count for each cluster in the image against the
2301 * refcount as reported by the refcount structures on-disk.
2309 BDRVQcow2State *s = bs->opaque; in compare_refcounts()
2317 fprintf(stderr, "Can't get refcount for cluster %" PRId64 ": %s\n", in compare_refcounts()
2318 i, strerror(-ret)); in compare_refcounts()
2319 res->check_errors++; in compare_refcounts()
2323 refcount2 = s->get_refcount(refcount_table, i); in compare_refcounts()
2335 num_fixed = &res->leaks_fixed; in compare_refcounts()
2337 num_fixed = &res->corruptions_fixed; in compare_refcounts()
2340 fprintf(stderr, "%s cluster %" PRId64 " refcount=%" PRIu64 in compare_refcounts()
2348 ret = update_refcount(bs, i << s->cluster_bits, 1, in compare_refcounts()
2360 res->corruptions++; in compare_refcounts()
2362 res->leaks++; in compare_refcounts()
2369 * Allocates clusters using an in-memory refcount table (IMRT) in contrast to
2370 * the on-disk refcount structures.
2373 * actually be a free cluster; the returned offset will not be before that
2374 * cluster. On output, *first_free_cluster points to the first gap found, even
2377 * Note that *first_free_cluster is a cluster index whereas the return value is
2386 BDRVQcow2State *s = bs->opaque; in alloc_clusters_imrt()
2387 int64_t cluster = *first_free_cluster, i; in alloc_clusters_imrt() local
2395 cluster < *imrt_nb_clusters && in alloc_clusters_imrt()
2397 cluster++) in alloc_clusters_imrt()
2399 if (!s->get_refcount(*refcount_table, cluster)) { in alloc_clusters_imrt()
2402 /* If this is the first free cluster found, update in alloc_clusters_imrt()
2404 *first_free_cluster = cluster; in alloc_clusters_imrt()
2413 * of continuously free clusters until the current cluster; the first free in alloc_clusters_imrt()
2414 * cluster in the current "gap" is therefore in alloc_clusters_imrt()
2415 * cluster - contiguous_free_clusters */ in alloc_clusters_imrt()
2417 /* If no such range could be found, grow the in-memory refcount table in alloc_clusters_imrt()
2422 * cluster_count - contiguous_free_clusters new clusters at the end of in alloc_clusters_imrt()
2423 * the image (which is the current value of cluster; note that cluster in alloc_clusters_imrt()
2427 cluster + cluster_count in alloc_clusters_imrt()
2428 - contiguous_free_clusters); in alloc_clusters_imrt()
2434 /* Go back to the first free cluster */ in alloc_clusters_imrt()
2435 cluster -= contiguous_free_clusters; in alloc_clusters_imrt()
2437 s->set_refcount(*refcount_table, cluster + i, 1); in alloc_clusters_imrt()
2440 return cluster << s->cluster_bits; in alloc_clusters_imrt()
2448 * and allocation data is taken from the in-memory refcount table
2452 * For these refblocks, clusters are allocated using said in-memory
2465 * Return whether the on-disk reftable array was resized (true/false),
2466 * or -errno on error.
2476 BDRVQcow2State *s = bs->opaque; in rebuild_refcounts_write_refblocks()
2477 int64_t cluster; in rebuild_refcounts_write_refblocks() local
2486 for (cluster = first_cluster; cluster < end_cluster; cluster++) { in rebuild_refcounts_write_refblocks()
2487 /* Check all clusters to find refblocks that contain non-zero entries */ in rebuild_refcounts_write_refblocks()
2488 if (!s->get_refcount(*refcount_table, cluster)) { in rebuild_refcounts_write_refblocks()
2493 * This cluster is allocated, so we need to create a refblock in rebuild_refcounts_write_refblocks()
2501 refblock_index = cluster >> s->refcount_block_bits; in rebuild_refcounts_write_refblocks()
2502 refblock_start = refblock_index << s->refcount_block_bits; in rebuild_refcounts_write_refblocks()
2516 /* Don't allocate a cluster in a refblock already written to disk */ in rebuild_refcounts_write_refblocks()
2524 error_setg_errno(errp, -refblock_offset, in rebuild_refcounts_write_refblocks()
2529 refblock_cluster_index = refblock_offset / s->cluster_size; in rebuild_refcounts_write_refblocks()
2541 s->cluster_size) / REFTABLE_ENTRY_SIZE; in rebuild_refcounts_write_refblocks()
2548 return -ENOMEM; in rebuild_refcounts_write_refblocks()
2552 (on_disk_reftable_entries - in rebuild_refcounts_write_refblocks()
2569 s->cluster_size, false); in rebuild_refcounts_write_refblocks()
2571 error_setg_errno(errp, -ret, "ERROR writing refblock"); in rebuild_refcounts_write_refblocks()
2579 * out-of-bounds accesses. in rebuild_refcounts_write_refblocks()
2582 refblock_index * s->cluster_size); in rebuild_refcounts_write_refblocks()
2584 ret = bdrv_co_pwrite(bs->file, refblock_offset, s->cluster_size, in rebuild_refcounts_write_refblocks()
2587 error_setg_errno(errp, -ret, "ERROR writing refblock"); in rebuild_refcounts_write_refblocks()
2592 cluster = refblock_start + s->refcount_block_size - 1; in rebuild_refcounts_write_refblocks()
2599 * Creates a new refcount structure based solely on the in-memory information
2600 * given through *refcount_table (this in-memory information is basically just
2612 BDRVQcow2State *s = bs->opaque; in rebuild_refcount_structure()
2613 int64_t reftable_offset = -1; in rebuild_refcount_structure()
2626 qcow2_cache_empty(bs, s->refcount_block_cache); in rebuild_refcount_structure()
2630 * cluster (in the in-memory refcount table) and write its offset in rebuild_refcount_structure()
2632 * disk (as a slice of the in-memory refcount table). in rebuild_refcount_structure()
2636 * reftable. This will dirty the in-memory refcount table (i.e. in rebuild_refcount_structure()
2645 * (This loop will terminate, because with every cluster the in rebuild_refcount_structure()
2650 * We then convert the reftable to big-endian and write it to disk. in rebuild_refcount_structure()
2663 res->check_errors++; in rebuild_refcount_structure()
2685 error_setg_errno(errp, -reftable_offset, in rebuild_refcount_structure()
2687 res->check_errors++; in rebuild_refcount_structure()
2693 * We need to update the affected refblocks, so re-run the in rebuild_refcount_structure()
2697 reftable_start_cluster = reftable_offset / s->cluster_size; in rebuild_refcount_structure()
2706 res->check_errors++; in rebuild_refcount_structure()
2722 * reftable to big-endian and write it to disk. in rebuild_refcount_structure()
2734 error_setg_errno(errp, -ret, "ERROR writing reftable"); in rebuild_refcount_structure()
2739 ret = bdrv_co_pwrite(bs->file, reftable_offset, reftable_length, in rebuild_refcount_structure()
2742 error_setg_errno(errp, -ret, "ERROR writing reftable"); in rebuild_refcount_structure()
2750 ret = bdrv_co_pwrite_sync(bs->file, in rebuild_refcount_structure()
2755 error_setg_errno(errp, -ret, "ERROR setting reftable"); in rebuild_refcount_structure()
2764 s->refcount_table = on_disk_reftable; in rebuild_refcount_structure()
2765 s->refcount_table_offset = reftable_offset; in rebuild_refcount_structure()
2766 s->refcount_table_size = on_disk_reftable_entries; in rebuild_refcount_structure()
2780 * detected as corrupted, and -errno when an internal error occurred.
2785 BDRVQcow2State *s = bs->opaque; in qcow2_check_refcounts()
2792 size = bdrv_co_getlength(bs->file->bs); in qcow2_check_refcounts()
2794 res->check_errors++; in qcow2_check_refcounts()
2800 res->check_errors++; in qcow2_check_refcounts()
2801 return -EFBIG; in qcow2_check_refcounts()
2804 res->bfi.total_clusters = in qcow2_check_refcounts()
2805 size_to_clusters(s, bs->total_sectors * BDRV_SECTOR_SIZE); in qcow2_check_refcounts()
2833 res->corruptions = 0; in qcow2_check_refcounts()
2834 res->leaks = 0; in qcow2_check_refcounts()
2863 fresh_leaks = res->leaks; in qcow2_check_refcounts()
2867 if (res->corruptions < old_res.corruptions) { in qcow2_check_refcounts()
2868 res->corruptions_fixed += old_res.corruptions - res->corruptions; in qcow2_check_refcounts()
2870 if (res->leaks < old_res.leaks) { in qcow2_check_refcounts()
2871 res->leaks_fixed += old_res.leaks - res->leaks; in qcow2_check_refcounts()
2873 res->leaks += fresh_leaks; in qcow2_check_refcounts()
2877 res->check_errors++; in qcow2_check_refcounts()
2878 ret = -EIO; in qcow2_check_refcounts()
2882 if (res->leaks || res->corruptions) { in qcow2_check_refcounts()
2895 res->image_end_offset = (highest_cluster + 1) * s->cluster_size; in qcow2_check_refcounts()
2916 * - 0 if writing to this offset will not affect the mentioned metadata
2917 * - a positive QCow2MetadataOverlap value indicating one overlapping section
2918 * - a negative value (-errno) indicating an error while performing a check,
2924 BDRVQcow2State *s = bs->opaque; in qcow2_check_metadata_overlap()
2925 int chk = s->overlap_check & ~ign; in qcow2_check_metadata_overlap()
2933 if (offset < s->cluster_size) { in qcow2_check_metadata_overlap()
2938 /* align range to test to cluster boundaries */ in qcow2_check_metadata_overlap()
2939 size = ROUND_UP(offset_into_cluster(s, offset) + size, s->cluster_size); in qcow2_check_metadata_overlap()
2942 if ((chk & QCOW2_OL_ACTIVE_L1) && s->l1_size) { in qcow2_check_metadata_overlap()
2943 if (overlaps_with(s->l1_table_offset, s->l1_size * L1E_SIZE)) { in qcow2_check_metadata_overlap()
2948 if ((chk & QCOW2_OL_REFCOUNT_TABLE) && s->refcount_table_size) { in qcow2_check_metadata_overlap()
2949 if (overlaps_with(s->refcount_table_offset, in qcow2_check_metadata_overlap()
2950 s->refcount_table_size * REFTABLE_ENTRY_SIZE)) { in qcow2_check_metadata_overlap()
2955 if ((chk & QCOW2_OL_SNAPSHOT_TABLE) && s->snapshots_size) { in qcow2_check_metadata_overlap()
2956 if (overlaps_with(s->snapshots_offset, s->snapshots_size)) { in qcow2_check_metadata_overlap()
2961 if ((chk & QCOW2_OL_INACTIVE_L1) && s->snapshots) { in qcow2_check_metadata_overlap()
2962 for (i = 0; i < s->nb_snapshots; i++) { in qcow2_check_metadata_overlap()
2963 if (s->snapshots[i].l1_size && in qcow2_check_metadata_overlap()
2964 overlaps_with(s->snapshots[i].l1_table_offset, in qcow2_check_metadata_overlap()
2965 s->snapshots[i].l1_size * L1E_SIZE)) { in qcow2_check_metadata_overlap()
2971 if ((chk & QCOW2_OL_ACTIVE_L2) && s->l1_table) { in qcow2_check_metadata_overlap()
2972 for (i = 0; i < s->l1_size; i++) { in qcow2_check_metadata_overlap()
2973 if ((s->l1_table[i] & L1E_OFFSET_MASK) && in qcow2_check_metadata_overlap()
2974 overlaps_with(s->l1_table[i] & L1E_OFFSET_MASK, in qcow2_check_metadata_overlap()
2975 s->cluster_size)) { in qcow2_check_metadata_overlap()
2981 if ((chk & QCOW2_OL_REFCOUNT_BLOCK) && s->refcount_table) { in qcow2_check_metadata_overlap()
2982 unsigned last_entry = s->max_refcount_table_index; in qcow2_check_metadata_overlap()
2983 assert(last_entry < s->refcount_table_size); in qcow2_check_metadata_overlap()
2984 assert(last_entry + 1 == s->refcount_table_size || in qcow2_check_metadata_overlap()
2985 (s->refcount_table[last_entry + 1] & REFT_OFFSET_MASK) == 0); in qcow2_check_metadata_overlap()
2987 if ((s->refcount_table[i] & REFT_OFFSET_MASK) && in qcow2_check_metadata_overlap()
2988 overlaps_with(s->refcount_table[i] & REFT_OFFSET_MASK, in qcow2_check_metadata_overlap()
2989 s->cluster_size)) { in qcow2_check_metadata_overlap()
2995 if ((chk & QCOW2_OL_INACTIVE_L2) && s->snapshots) { in qcow2_check_metadata_overlap()
2996 for (i = 0; i < s->nb_snapshots; i++) { in qcow2_check_metadata_overlap()
2997 uint64_t l1_ofs = s->snapshots[i].l1_table_offset; in qcow2_check_metadata_overlap()
2998 uint32_t l1_sz = s->snapshots[i].l1_size; in qcow2_check_metadata_overlap()
3012 return -ENOMEM; in qcow2_check_metadata_overlap()
3015 ret = bdrv_pread(bs->file, l1_ofs, l1_sz2, l1, 0); in qcow2_check_metadata_overlap()
3023 if (l2_ofs && overlaps_with(l2_ofs, s->cluster_size)) { in qcow2_check_metadata_overlap()
3034 (s->autoclear_features & QCOW2_AUTOCLEAR_BITMAPS)) in qcow2_check_metadata_overlap()
3036 if (overlaps_with(s->bitmap_directory_offset, in qcow2_check_metadata_overlap()
3037 s->bitmap_directory_size)) in qcow2_check_metadata_overlap()
3064 * and -EIO returned.
3067 * overlaps; or a negative value (-errno) on error.
3088 return -EIO; in qcow2_pre_write_overlap_check()
3102 * @allocated should be set to true if a new cluster has been allocated.
3121 BDRVQcow2State *s = bs->opaque; in alloc_refblock()
3129 s->cluster_size / REFTABLE_ENTRY_SIZE); in alloc_refblock()
3134 return -ENOTSUP; in alloc_refblock()
3141 return -ENOMEM; in alloc_refblock()
3145 (new_reftable_size - *reftable_size) * REFTABLE_ENTRY_SIZE); in alloc_refblock()
3152 offset = qcow2_alloc_clusters(bs, s->cluster_size); in alloc_refblock()
3154 error_setg_errno(errp, -offset, "Failed to allocate refblock"); in alloc_refblock()
3175 BDRVQcow2State *s = bs->opaque; in flush_refblock()
3182 ret = qcow2_pre_write_overlap_check(bs, 0, offset, s->cluster_size, in flush_refblock()
3185 error_setg_errno(errp, -ret, "Overlap check failed"); in flush_refblock()
3189 ret = bdrv_pwrite(bs->file, offset, s->cluster_size, refblock, 0); in flush_refblock()
3191 error_setg_errno(errp, -ret, "Failed to write refblock"); in flush_refblock()
3203 * if @new_set_refcount is non-NULL, it is called for every refcount entry to
3208 * @index is the index of the walk_over_reftable() calls and @total is the total
3212 * @allocated is set to true if a new cluster has been allocated.
3223 void *cb_opaque, int index, int total, in walk_over_reftable() argument
3226 BDRVQcow2State *s = bs->opaque; in walk_over_reftable()
3233 for (reftable_index = 0; reftable_index < s->refcount_table_size; in walk_over_reftable()
3236 uint64_t refblock_offset = s->refcount_table[reftable_index] in walk_over_reftable()
3239 status_cb(bs, (uint64_t)index * s->refcount_table_size + reftable_index, in walk_over_reftable()
3240 (uint64_t)total * s->refcount_table_size, cb_opaque); in walk_over_reftable()
3246 qcow2_signal_corruption(bs, true, -1, -1, "Refblock offset %#" in walk_over_reftable()
3247 PRIx64 " unaligned (reftable index: %#" in walk_over_reftable()
3252 return -EIO; in walk_over_reftable()
3255 ret = qcow2_cache_get(bs, s->refcount_block_cache, refblock_offset, in walk_over_reftable()
3258 error_setg_errno(errp, -ret, "Failed to retrieve refblock"); in walk_over_reftable()
3262 for (refblock_index = 0; refblock_index < s->refcount_block_size; in walk_over_reftable()
3273 qcow2_cache_put(s->refcount_block_cache, &refblock); in walk_over_reftable()
3282 refcount = s->get_refcount(refblock, refblock_index); in walk_over_reftable()
3286 qcow2_cache_put(s->refcount_block_cache, &refblock); in walk_over_reftable()
3288 offset = ((reftable_index << s->refcount_block_bits) in walk_over_reftable()
3289 + refblock_index) << s->cluster_bits; in walk_over_reftable()
3292 "%i bits: Cluster at offset %#" PRIx64 " has a " in walk_over_reftable()
3295 return -EINVAL; in walk_over_reftable()
3307 qcow2_cache_put(s->refcount_block_cache, &refblock); in walk_over_reftable()
3310 for (refblock_index = 0; refblock_index < s->refcount_block_size; in walk_over_reftable()
3356 status_cb(bs, (uint64_t)(index + 1) * s->refcount_table_size, in walk_over_reftable()
3357 (uint64_t)total * s->refcount_table_size, cb_opaque); in walk_over_reftable()
3366 BDRVQcow2State *s = bs->opaque; in qcow2_change_refcount_order()
3369 void *new_refblock = qemu_blockalign(bs->file->bs, s->cluster_size); in qcow2_change_refcount_order()
3381 assert(s->qcow_version >= 3); in qcow2_change_refcount_order()
3385 new_refblock_size = 1 << (s->cluster_bits - (refcount_order - 3)); in qcow2_change_refcount_order()
3427 error_setg_errno(errp, -new_reftable_offset, in qcow2_change_refcount_order()
3454 error_setg_errno(errp, -ret, "Overlap check failed"); in qcow2_change_refcount_order()
3462 ret = bdrv_pwrite(bs->file, new_reftable_offset, in qcow2_change_refcount_order()
3471 error_setg_errno(errp, -ret, "Failed to write the new reftable"); in qcow2_change_refcount_order()
3477 ret = qcow2_cache_flush(bs, s->refcount_block_cache); in qcow2_change_refcount_order()
3479 error_setg_errno(errp, -ret, "Failed to flush the refblock cache"); in qcow2_change_refcount_order()
3485 * such as s->refcount_table or s->refcount_bits stay stale for now in qcow2_change_refcount_order()
3487 old_refcount_order = s->refcount_order; in qcow2_change_refcount_order()
3488 old_reftable_size = s->refcount_table_size; in qcow2_change_refcount_order()
3489 old_reftable_offset = s->refcount_table_offset; in qcow2_change_refcount_order()
3491 s->refcount_order = refcount_order; in qcow2_change_refcount_order()
3492 s->refcount_table_size = new_reftable_size; in qcow2_change_refcount_order()
3493 s->refcount_table_offset = new_reftable_offset; in qcow2_change_refcount_order()
3497 s->refcount_order = old_refcount_order; in qcow2_change_refcount_order()
3498 s->refcount_table_size = old_reftable_size; in qcow2_change_refcount_order()
3499 s->refcount_table_offset = old_reftable_offset; in qcow2_change_refcount_order()
3500 error_setg_errno(errp, -ret, "Failed to update the qcow2 header"); in qcow2_change_refcount_order()
3504 /* Now update the rest of the in-memory information */ in qcow2_change_refcount_order()
3505 old_reftable = s->refcount_table; in qcow2_change_refcount_order()
3506 s->refcount_table = new_reftable; in qcow2_change_refcount_order()
3509 s->refcount_bits = 1 << refcount_order; in qcow2_change_refcount_order()
3510 s->refcount_max = UINT64_C(1) << (s->refcount_bits - 1); in qcow2_change_refcount_order()
3511 s->refcount_max += s->refcount_max - 1; in qcow2_change_refcount_order()
3513 s->refcount_block_bits = s->cluster_bits - (refcount_order - 3); in qcow2_change_refcount_order()
3514 s->refcount_block_size = 1 << s->refcount_block_bits; in qcow2_change_refcount_order()
3516 s->get_refcount = new_get_refcount; in qcow2_change_refcount_order()
3517 s->set_refcount = new_set_refcount; in qcow2_change_refcount_order()
3533 qcow2_free_clusters(bs, offset, s->cluster_size, in qcow2_change_refcount_order()
3553 BDRVQcow2State *s = bs->opaque; in get_refblock_offset()
3554 uint32_t index = offset_to_reftable_index(s, offset); in get_refblock_offset() local
3557 if (index < s->refcount_table_size) { in get_refblock_offset()
3558 covering_refblock_offset = s->refcount_table[index] & REFT_OFFSET_MASK; in get_refblock_offset()
3561 qcow2_signal_corruption(bs, true, -1, -1, "Refblock at %#" PRIx64 " is " in get_refblock_offset()
3564 return -EIO; in get_refblock_offset()
3573 BDRVQcow2State *s = bs->opaque; in qcow2_discard_refcount_block()
3575 uint64_t cluster_index = discard_block_offs >> s->cluster_bits; in qcow2_discard_refcount_block()
3576 uint32_t block_index = cluster_index & (s->refcount_block_size - 1); in qcow2_discard_refcount_block()
3587 ret = qcow2_cache_get(bs, s->refcount_block_cache, refblock_offs, in qcow2_discard_refcount_block()
3593 if (s->get_refcount(refblock, block_index) != 1) { in qcow2_discard_refcount_block()
3594 qcow2_signal_corruption(bs, true, -1, -1, "Invalid refcount:" in qcow2_discard_refcount_block()
3596 ", reftable index %u" in qcow2_discard_refcount_block()
3602 s->get_refcount(refblock, block_index)); in qcow2_discard_refcount_block()
3603 qcow2_cache_put(s->refcount_block_cache, &refblock); in qcow2_discard_refcount_block()
3604 return -EINVAL; in qcow2_discard_refcount_block()
3606 s->set_refcount(refblock, block_index, 0); in qcow2_discard_refcount_block()
3608 qcow2_cache_entry_mark_dirty(s->refcount_block_cache, refblock); in qcow2_discard_refcount_block()
3610 qcow2_cache_put(s->refcount_block_cache, &refblock); in qcow2_discard_refcount_block()
3612 if (cluster_index < s->free_cluster_index) { in qcow2_discard_refcount_block()
3613 s->free_cluster_index = cluster_index; in qcow2_discard_refcount_block()
3616 refblock = qcow2_cache_is_table_offset(s->refcount_block_cache, in qcow2_discard_refcount_block()
3620 qcow2_cache_discard(s->refcount_block_cache, refblock); in qcow2_discard_refcount_block()
3622 update_refcount_discard(bs, discard_block_offs, s->cluster_size); in qcow2_discard_refcount_block()
3629 BDRVQcow2State *s = bs->opaque; in qcow2_shrink_reftable()
3631 g_malloc(s->refcount_table_size * REFTABLE_ENTRY_SIZE); in qcow2_shrink_reftable()
3634 for (i = 0; i < s->refcount_table_size; i++) { in qcow2_shrink_reftable()
3635 int64_t refblock_offs = s->refcount_table[i] & REFT_OFFSET_MASK; in qcow2_shrink_reftable()
3643 ret = qcow2_cache_get(bs, s->refcount_block_cache, refblock_offs, in qcow2_shrink_reftable()
3651 uint64_t block_index = (refblock_offs >> s->cluster_bits) & in qcow2_shrink_reftable()
3652 (s->refcount_block_size - 1); in qcow2_shrink_reftable()
3653 uint64_t refcount = s->get_refcount(refblock, block_index); in qcow2_shrink_reftable()
3655 s->set_refcount(refblock, block_index, 0); in qcow2_shrink_reftable()
3657 unused_block = buffer_is_zero(refblock, s->cluster_size); in qcow2_shrink_reftable()
3659 s->set_refcount(refblock, block_index, refcount); in qcow2_shrink_reftable()
3661 unused_block = buffer_is_zero(refblock, s->cluster_size); in qcow2_shrink_reftable()
3663 qcow2_cache_put(s->refcount_block_cache, &refblock); in qcow2_shrink_reftable()
3665 reftable_tmp[i] = unused_block ? 0 : cpu_to_be64(s->refcount_table[i]); in qcow2_shrink_reftable()
3668 ret = bdrv_co_pwrite_sync(bs->file, s->refcount_table_offset, in qcow2_shrink_reftable()
3669 s->refcount_table_size * REFTABLE_ENTRY_SIZE, in qcow2_shrink_reftable()
3676 for (i = 0; i < s->refcount_table_size; i++) { in qcow2_shrink_reftable()
3677 if (s->refcount_table[i] && !reftable_tmp[i]) { in qcow2_shrink_reftable()
3679 ret = qcow2_discard_refcount_block(bs, s->refcount_table[i] & in qcow2_shrink_reftable()
3682 s->refcount_table[i] = 0; in qcow2_shrink_reftable()
3686 if (!s->cache_discards) { in qcow2_shrink_reftable()
3697 BDRVQcow2State *s = bs->opaque; in qcow2_get_last_cluster()
3700 for (i = size_to_clusters(s, size) - 1; i >= 0; i--) { in qcow2_get_last_cluster()
3704 fprintf(stderr, "Can't get refcount for cluster %" PRId64 ": %s\n", in qcow2_get_last_cluster()
3705 i, strerror(-ret)); in qcow2_get_last_cluster()
3712 qcow2_signal_corruption(bs, true, -1, -1, in qcow2_get_last_cluster()
3714 return -EIO; in qcow2_get_last_cluster()
3720 BDRVQcow2State *s = bs->opaque; in qcow2_detect_metadata_preallocation()
3724 qemu_co_mutex_assert_locked(&s->lock); in qcow2_detect_metadata_preallocation()
3726 file_length = bdrv_co_getlength(bs->file->bs); in qcow2_detect_metadata_preallocation()
3731 real_allocation = bdrv_co_get_allocated_file_size(bs->file->bs); in qcow2_detect_metadata_preallocation()
3736 real_clusters = real_allocation / s->cluster_size; in qcow2_detect_metadata_preallocation()