xref: /openbmc/qemu/block/qcow2-cluster.c (revision ec603b55)
1 /*
2  * Block driver for the QCOW version 2 format
3  *
4  * Copyright (c) 2004-2006 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 #include "qemu/osdep.h"
26 #include <zlib.h>
27 
28 #include "qapi/error.h"
29 #include "qemu-common.h"
30 #include "block/block_int.h"
31 #include "block/qcow2.h"
32 #include "qemu/bswap.h"
33 #include "trace.h"
34 
35 int qcow2_shrink_l1_table(BlockDriverState *bs, uint64_t exact_size)
36 {
37     BDRVQcow2State *s = bs->opaque;
38     int new_l1_size, i, ret;
39 
40     if (exact_size >= s->l1_size) {
41         return 0;
42     }
43 
44     new_l1_size = exact_size;
45 
46 #ifdef DEBUG_ALLOC2
47     fprintf(stderr, "shrink l1_table from %d to %d\n", s->l1_size, new_l1_size);
48 #endif
49 
50     BLKDBG_EVENT(bs->file, BLKDBG_L1_SHRINK_WRITE_TABLE);
51     ret = bdrv_pwrite_zeroes(bs->file, s->l1_table_offset +
52                                        new_l1_size * sizeof(uint64_t),
53                              (s->l1_size - new_l1_size) * sizeof(uint64_t), 0);
54     if (ret < 0) {
55         goto fail;
56     }
57 
58     ret = bdrv_flush(bs->file->bs);
59     if (ret < 0) {
60         goto fail;
61     }
62 
63     BLKDBG_EVENT(bs->file, BLKDBG_L1_SHRINK_FREE_L2_CLUSTERS);
64     for (i = s->l1_size - 1; i > new_l1_size - 1; i--) {
65         if ((s->l1_table[i] & L1E_OFFSET_MASK) == 0) {
66             continue;
67         }
68         qcow2_free_clusters(bs, s->l1_table[i] & L1E_OFFSET_MASK,
69                             s->cluster_size, QCOW2_DISCARD_ALWAYS);
70         s->l1_table[i] = 0;
71     }
72     return 0;
73 
74 fail:
75     /*
76      * If the write in the l1_table failed the image may contain a partially
77      * overwritten l1_table. In this case it would be better to clear the
78      * l1_table in memory to avoid possible image corruption.
79      */
80     memset(s->l1_table + new_l1_size, 0,
81            (s->l1_size - new_l1_size) * sizeof(uint64_t));
82     return ret;
83 }
84 
85 int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size,
86                         bool exact_size)
87 {
88     BDRVQcow2State *s = bs->opaque;
89     int new_l1_size2, ret, i;
90     uint64_t *new_l1_table;
91     int64_t old_l1_table_offset, old_l1_size;
92     int64_t new_l1_table_offset, new_l1_size;
93     uint8_t data[12];
94 
95     if (min_size <= s->l1_size)
96         return 0;
97 
98     /* Do a sanity check on min_size before trying to calculate new_l1_size
99      * (this prevents overflows during the while loop for the calculation of
100      * new_l1_size) */
101     if (min_size > INT_MAX / sizeof(uint64_t)) {
102         return -EFBIG;
103     }
104 
105     if (exact_size) {
106         new_l1_size = min_size;
107     } else {
108         /* Bump size up to reduce the number of times we have to grow */
109         new_l1_size = s->l1_size;
110         if (new_l1_size == 0) {
111             new_l1_size = 1;
112         }
113         while (min_size > new_l1_size) {
114             new_l1_size = DIV_ROUND_UP(new_l1_size * 3, 2);
115         }
116     }
117 
118     QEMU_BUILD_BUG_ON(QCOW_MAX_L1_SIZE > INT_MAX);
119     if (new_l1_size > QCOW_MAX_L1_SIZE / sizeof(uint64_t)) {
120         return -EFBIG;
121     }
122 
123 #ifdef DEBUG_ALLOC2
124     fprintf(stderr, "grow l1_table from %d to %" PRId64 "\n",
125             s->l1_size, new_l1_size);
126 #endif
127 
128     new_l1_size2 = sizeof(uint64_t) * new_l1_size;
129     new_l1_table = qemu_try_blockalign(bs->file->bs,
130                                        align_offset(new_l1_size2, 512));
131     if (new_l1_table == NULL) {
132         return -ENOMEM;
133     }
134     memset(new_l1_table, 0, align_offset(new_l1_size2, 512));
135 
136     if (s->l1_size) {
137         memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t));
138     }
139 
140     /* write new table (align to cluster) */
141     BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ALLOC_TABLE);
142     new_l1_table_offset = qcow2_alloc_clusters(bs, new_l1_size2);
143     if (new_l1_table_offset < 0) {
144         qemu_vfree(new_l1_table);
145         return new_l1_table_offset;
146     }
147 
148     ret = qcow2_cache_flush(bs, s->refcount_block_cache);
149     if (ret < 0) {
150         goto fail;
151     }
152 
153     /* the L1 position has not yet been updated, so these clusters must
154      * indeed be completely free */
155     ret = qcow2_pre_write_overlap_check(bs, 0, new_l1_table_offset,
156                                         new_l1_size2);
157     if (ret < 0) {
158         goto fail;
159     }
160 
161     BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_WRITE_TABLE);
162     for(i = 0; i < s->l1_size; i++)
163         new_l1_table[i] = cpu_to_be64(new_l1_table[i]);
164     ret = bdrv_pwrite_sync(bs->file, new_l1_table_offset,
165                            new_l1_table, new_l1_size2);
166     if (ret < 0)
167         goto fail;
168     for(i = 0; i < s->l1_size; i++)
169         new_l1_table[i] = be64_to_cpu(new_l1_table[i]);
170 
171     /* set new table */
172     BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ACTIVATE_TABLE);
173     stl_be_p(data, new_l1_size);
174     stq_be_p(data + 4, new_l1_table_offset);
175     ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_size),
176                            data, sizeof(data));
177     if (ret < 0) {
178         goto fail;
179     }
180     qemu_vfree(s->l1_table);
181     old_l1_table_offset = s->l1_table_offset;
182     s->l1_table_offset = new_l1_table_offset;
183     s->l1_table = new_l1_table;
184     old_l1_size = s->l1_size;
185     s->l1_size = new_l1_size;
186     qcow2_free_clusters(bs, old_l1_table_offset, old_l1_size * sizeof(uint64_t),
187                         QCOW2_DISCARD_OTHER);
188     return 0;
189  fail:
190     qemu_vfree(new_l1_table);
191     qcow2_free_clusters(bs, new_l1_table_offset, new_l1_size2,
192                         QCOW2_DISCARD_OTHER);
193     return ret;
194 }
195 
196 /*
197  * l2_load
198  *
199  * Loads a L2 table into memory. If the table is in the cache, the cache
200  * is used; otherwise the L2 table is loaded from the image file.
201  *
202  * Returns a pointer to the L2 table on success, or NULL if the read from
203  * the image file failed.
204  */
205 
206 static int l2_load(BlockDriverState *bs, uint64_t l2_offset,
207     uint64_t **l2_table)
208 {
209     BDRVQcow2State *s = bs->opaque;
210 
211     return qcow2_cache_get(bs, s->l2_table_cache, l2_offset,
212                            (void **)l2_table);
213 }
214 
215 /*
216  * Writes one sector of the L1 table to the disk (can't update single entries
217  * and we really don't want bdrv_pread to perform a read-modify-write)
218  */
219 #define L1_ENTRIES_PER_SECTOR (512 / 8)
220 int qcow2_write_l1_entry(BlockDriverState *bs, int l1_index)
221 {
222     BDRVQcow2State *s = bs->opaque;
223     uint64_t buf[L1_ENTRIES_PER_SECTOR] = { 0 };
224     int l1_start_index;
225     int i, ret;
226 
227     l1_start_index = l1_index & ~(L1_ENTRIES_PER_SECTOR - 1);
228     for (i = 0; i < L1_ENTRIES_PER_SECTOR && l1_start_index + i < s->l1_size;
229          i++)
230     {
231         buf[i] = cpu_to_be64(s->l1_table[l1_start_index + i]);
232     }
233 
234     ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_ACTIVE_L1,
235             s->l1_table_offset + 8 * l1_start_index, sizeof(buf));
236     if (ret < 0) {
237         return ret;
238     }
239 
240     BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE);
241     ret = bdrv_pwrite_sync(bs->file,
242                            s->l1_table_offset + 8 * l1_start_index,
243                            buf, sizeof(buf));
244     if (ret < 0) {
245         return ret;
246     }
247 
248     return 0;
249 }
250 
251 /*
252  * l2_allocate
253  *
254  * Allocate a new l2 entry in the file. If l1_index points to an already
255  * used entry in the L2 table (i.e. we are doing a copy on write for the L2
256  * table) copy the contents of the old L2 table into the newly allocated one.
257  * Otherwise the new table is initialized with zeros.
258  *
259  */
260 
261 static int l2_allocate(BlockDriverState *bs, int l1_index, uint64_t **table)
262 {
263     BDRVQcow2State *s = bs->opaque;
264     uint64_t old_l2_offset;
265     uint64_t *l2_table = NULL;
266     int64_t l2_offset;
267     int ret;
268 
269     old_l2_offset = s->l1_table[l1_index];
270 
271     trace_qcow2_l2_allocate(bs, l1_index);
272 
273     /* allocate a new l2 entry */
274 
275     l2_offset = qcow2_alloc_clusters(bs, s->l2_size * sizeof(uint64_t));
276     if (l2_offset < 0) {
277         ret = l2_offset;
278         goto fail;
279     }
280 
281     /* If we're allocating the table at offset 0 then something is wrong */
282     if (l2_offset == 0) {
283         qcow2_signal_corruption(bs, true, -1, -1, "Preventing invalid "
284                                 "allocation of L2 table at offset 0");
285         ret = -EIO;
286         goto fail;
287     }
288 
289     ret = qcow2_cache_flush(bs, s->refcount_block_cache);
290     if (ret < 0) {
291         goto fail;
292     }
293 
294     /* allocate a new entry in the l2 cache */
295 
296     trace_qcow2_l2_allocate_get_empty(bs, l1_index);
297     ret = qcow2_cache_get_empty(bs, s->l2_table_cache, l2_offset, (void**) table);
298     if (ret < 0) {
299         goto fail;
300     }
301 
302     l2_table = *table;
303 
304     if ((old_l2_offset & L1E_OFFSET_MASK) == 0) {
305         /* if there was no old l2 table, clear the new table */
306         memset(l2_table, 0, s->l2_size * sizeof(uint64_t));
307     } else {
308         uint64_t* old_table;
309 
310         /* if there was an old l2 table, read it from the disk */
311         BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_COW_READ);
312         ret = qcow2_cache_get(bs, s->l2_table_cache,
313             old_l2_offset & L1E_OFFSET_MASK,
314             (void**) &old_table);
315         if (ret < 0) {
316             goto fail;
317         }
318 
319         memcpy(l2_table, old_table, s->cluster_size);
320 
321         qcow2_cache_put(bs, s->l2_table_cache, (void **) &old_table);
322     }
323 
324     /* write the l2 table to the file */
325     BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_WRITE);
326 
327     trace_qcow2_l2_allocate_write_l2(bs, l1_index);
328     qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table);
329     ret = qcow2_cache_flush(bs, s->l2_table_cache);
330     if (ret < 0) {
331         goto fail;
332     }
333 
334     /* update the L1 entry */
335     trace_qcow2_l2_allocate_write_l1(bs, l1_index);
336     s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED;
337     ret = qcow2_write_l1_entry(bs, l1_index);
338     if (ret < 0) {
339         goto fail;
340     }
341 
342     *table = l2_table;
343     trace_qcow2_l2_allocate_done(bs, l1_index, 0);
344     return 0;
345 
346 fail:
347     trace_qcow2_l2_allocate_done(bs, l1_index, ret);
348     if (l2_table != NULL) {
349         qcow2_cache_put(bs, s->l2_table_cache, (void**) table);
350     }
351     s->l1_table[l1_index] = old_l2_offset;
352     if (l2_offset > 0) {
353         qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t),
354                             QCOW2_DISCARD_ALWAYS);
355     }
356     return ret;
357 }
358 
359 /*
360  * Checks how many clusters in a given L2 table are contiguous in the image
361  * file. As soon as one of the flags in the bitmask stop_flags changes compared
362  * to the first cluster, the search is stopped and the cluster is not counted
363  * as contiguous. (This allows it, for example, to stop at the first compressed
364  * cluster which may require a different handling)
365  */
366 static int count_contiguous_clusters(int nb_clusters, int cluster_size,
367         uint64_t *l2_table, uint64_t stop_flags)
368 {
369     int i;
370     QCow2ClusterType first_cluster_type;
371     uint64_t mask = stop_flags | L2E_OFFSET_MASK | QCOW_OFLAG_COMPRESSED;
372     uint64_t first_entry = be64_to_cpu(l2_table[0]);
373     uint64_t offset = first_entry & mask;
374 
375     if (!offset) {
376         return 0;
377     }
378 
379     /* must be allocated */
380     first_cluster_type = qcow2_get_cluster_type(first_entry);
381     assert(first_cluster_type == QCOW2_CLUSTER_NORMAL ||
382            first_cluster_type == QCOW2_CLUSTER_ZERO_ALLOC);
383 
384     for (i = 0; i < nb_clusters; i++) {
385         uint64_t l2_entry = be64_to_cpu(l2_table[i]) & mask;
386         if (offset + (uint64_t) i * cluster_size != l2_entry) {
387             break;
388         }
389     }
390 
391 	return i;
392 }
393 
394 /*
395  * Checks how many consecutive unallocated clusters in a given L2
396  * table have the same cluster type.
397  */
398 static int count_contiguous_clusters_unallocated(int nb_clusters,
399                                                  uint64_t *l2_table,
400                                                  QCow2ClusterType wanted_type)
401 {
402     int i;
403 
404     assert(wanted_type == QCOW2_CLUSTER_ZERO_PLAIN ||
405            wanted_type == QCOW2_CLUSTER_UNALLOCATED);
406     for (i = 0; i < nb_clusters; i++) {
407         uint64_t entry = be64_to_cpu(l2_table[i]);
408         QCow2ClusterType type = qcow2_get_cluster_type(entry);
409 
410         if (type != wanted_type) {
411             break;
412         }
413     }
414 
415     return i;
416 }
417 
418 static int coroutine_fn do_perform_cow_read(BlockDriverState *bs,
419                                             uint64_t src_cluster_offset,
420                                             unsigned offset_in_cluster,
421                                             QEMUIOVector *qiov)
422 {
423     int ret;
424 
425     if (qiov->size == 0) {
426         return 0;
427     }
428 
429     BLKDBG_EVENT(bs->file, BLKDBG_COW_READ);
430 
431     if (!bs->drv) {
432         return -ENOMEDIUM;
433     }
434 
435     /* Call .bdrv_co_readv() directly instead of using the public block-layer
436      * interface.  This avoids double I/O throttling and request tracking,
437      * which can lead to deadlock when block layer copy-on-read is enabled.
438      */
439     ret = bs->drv->bdrv_co_preadv(bs, src_cluster_offset + offset_in_cluster,
440                                   qiov->size, qiov, 0);
441     if (ret < 0) {
442         return ret;
443     }
444 
445     return 0;
446 }
447 
448 static bool coroutine_fn do_perform_cow_encrypt(BlockDriverState *bs,
449                                                 uint64_t src_cluster_offset,
450                                                 uint64_t cluster_offset,
451                                                 unsigned offset_in_cluster,
452                                                 uint8_t *buffer,
453                                                 unsigned bytes)
454 {
455     if (bytes && bs->encrypted) {
456         BDRVQcow2State *s = bs->opaque;
457         int64_t offset = (s->crypt_physical_offset ?
458                           (cluster_offset + offset_in_cluster) :
459                           (src_cluster_offset + offset_in_cluster));
460         assert((offset_in_cluster & ~BDRV_SECTOR_MASK) == 0);
461         assert((bytes & ~BDRV_SECTOR_MASK) == 0);
462         assert(s->crypto);
463         if (qcrypto_block_encrypt(s->crypto, offset, buffer, bytes, NULL) < 0) {
464             return false;
465         }
466     }
467     return true;
468 }
469 
470 static int coroutine_fn do_perform_cow_write(BlockDriverState *bs,
471                                              uint64_t cluster_offset,
472                                              unsigned offset_in_cluster,
473                                              QEMUIOVector *qiov)
474 {
475     int ret;
476 
477     if (qiov->size == 0) {
478         return 0;
479     }
480 
481     ret = qcow2_pre_write_overlap_check(bs, 0,
482             cluster_offset + offset_in_cluster, qiov->size);
483     if (ret < 0) {
484         return ret;
485     }
486 
487     BLKDBG_EVENT(bs->file, BLKDBG_COW_WRITE);
488     ret = bdrv_co_pwritev(bs->file, cluster_offset + offset_in_cluster,
489                           qiov->size, qiov, 0);
490     if (ret < 0) {
491         return ret;
492     }
493 
494     return 0;
495 }
496 
497 
498 /*
499  * get_cluster_offset
500  *
501  * For a given offset of the virtual disk, find the cluster type and offset in
502  * the qcow2 file. The offset is stored in *cluster_offset.
503  *
504  * On entry, *bytes is the maximum number of contiguous bytes starting at
505  * offset that we are interested in.
506  *
507  * On exit, *bytes is the number of bytes starting at offset that have the same
508  * cluster type and (if applicable) are stored contiguously in the image file.
509  * Compressed clusters are always returned one by one.
510  *
511  * Returns the cluster type (QCOW2_CLUSTER_*) on success, -errno in error
512  * cases.
513  */
514 int qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset,
515                              unsigned int *bytes, uint64_t *cluster_offset)
516 {
517     BDRVQcow2State *s = bs->opaque;
518     unsigned int l2_index;
519     uint64_t l1_index, l2_offset, *l2_table;
520     int l1_bits, c;
521     unsigned int offset_in_cluster;
522     uint64_t bytes_available, bytes_needed, nb_clusters;
523     QCow2ClusterType type;
524     int ret;
525 
526     offset_in_cluster = offset_into_cluster(s, offset);
527     bytes_needed = (uint64_t) *bytes + offset_in_cluster;
528 
529     l1_bits = s->l2_bits + s->cluster_bits;
530 
531     /* compute how many bytes there are between the start of the cluster
532      * containing offset and the end of the l1 entry */
533     bytes_available = (1ULL << l1_bits) - (offset & ((1ULL << l1_bits) - 1))
534                     + offset_in_cluster;
535 
536     if (bytes_needed > bytes_available) {
537         bytes_needed = bytes_available;
538     }
539 
540     *cluster_offset = 0;
541 
542     /* seek to the l2 offset in the l1 table */
543 
544     l1_index = offset >> l1_bits;
545     if (l1_index >= s->l1_size) {
546         type = QCOW2_CLUSTER_UNALLOCATED;
547         goto out;
548     }
549 
550     l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK;
551     if (!l2_offset) {
552         type = QCOW2_CLUSTER_UNALLOCATED;
553         goto out;
554     }
555 
556     if (offset_into_cluster(s, l2_offset)) {
557         qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64
558                                 " unaligned (L1 index: %#" PRIx64 ")",
559                                 l2_offset, l1_index);
560         return -EIO;
561     }
562 
563     /* load the l2 table in memory */
564 
565     ret = l2_load(bs, l2_offset, &l2_table);
566     if (ret < 0) {
567         return ret;
568     }
569 
570     /* find the cluster offset for the given disk offset */
571 
572     l2_index = offset_to_l2_index(s, offset);
573     *cluster_offset = be64_to_cpu(l2_table[l2_index]);
574 
575     nb_clusters = size_to_clusters(s, bytes_needed);
576     /* bytes_needed <= *bytes + offset_in_cluster, both of which are unsigned
577      * integers; the minimum cluster size is 512, so this assertion is always
578      * true */
579     assert(nb_clusters <= INT_MAX);
580 
581     type = qcow2_get_cluster_type(*cluster_offset);
582     if (s->qcow_version < 3 && (type == QCOW2_CLUSTER_ZERO_PLAIN ||
583                                 type == QCOW2_CLUSTER_ZERO_ALLOC)) {
584         qcow2_signal_corruption(bs, true, -1, -1, "Zero cluster entry found"
585                                 " in pre-v3 image (L2 offset: %#" PRIx64
586                                 ", L2 index: %#x)", l2_offset, l2_index);
587         ret = -EIO;
588         goto fail;
589     }
590     switch (type) {
591     case QCOW2_CLUSTER_COMPRESSED:
592         /* Compressed clusters can only be processed one by one */
593         c = 1;
594         *cluster_offset &= L2E_COMPRESSED_OFFSET_SIZE_MASK;
595         break;
596     case QCOW2_CLUSTER_ZERO_PLAIN:
597     case QCOW2_CLUSTER_UNALLOCATED:
598         /* how many empty clusters ? */
599         c = count_contiguous_clusters_unallocated(nb_clusters,
600                                                   &l2_table[l2_index], type);
601         *cluster_offset = 0;
602         break;
603     case QCOW2_CLUSTER_ZERO_ALLOC:
604     case QCOW2_CLUSTER_NORMAL:
605         /* how many allocated clusters ? */
606         c = count_contiguous_clusters(nb_clusters, s->cluster_size,
607                                       &l2_table[l2_index], QCOW_OFLAG_ZERO);
608         *cluster_offset &= L2E_OFFSET_MASK;
609         if (offset_into_cluster(s, *cluster_offset)) {
610             qcow2_signal_corruption(bs, true, -1, -1,
611                                     "Cluster allocation offset %#"
612                                     PRIx64 " unaligned (L2 offset: %#" PRIx64
613                                     ", L2 index: %#x)", *cluster_offset,
614                                     l2_offset, l2_index);
615             ret = -EIO;
616             goto fail;
617         }
618         break;
619     default:
620         abort();
621     }
622 
623     qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
624 
625     bytes_available = (int64_t)c * s->cluster_size;
626 
627 out:
628     if (bytes_available > bytes_needed) {
629         bytes_available = bytes_needed;
630     }
631 
632     /* bytes_available <= bytes_needed <= *bytes + offset_in_cluster;
633      * subtracting offset_in_cluster will therefore definitely yield something
634      * not exceeding UINT_MAX */
635     assert(bytes_available - offset_in_cluster <= UINT_MAX);
636     *bytes = bytes_available - offset_in_cluster;
637 
638     return type;
639 
640 fail:
641     qcow2_cache_put(bs, s->l2_table_cache, (void **)&l2_table);
642     return ret;
643 }
644 
645 /*
646  * get_cluster_table
647  *
648  * for a given disk offset, load (and allocate if needed)
649  * the l2 table.
650  *
651  * the l2 table offset in the qcow2 file and the cluster index
652  * in the l2 table are given to the caller.
653  *
654  * Returns 0 on success, -errno in failure case
655  */
656 static int get_cluster_table(BlockDriverState *bs, uint64_t offset,
657                              uint64_t **new_l2_table,
658                              int *new_l2_index)
659 {
660     BDRVQcow2State *s = bs->opaque;
661     unsigned int l2_index;
662     uint64_t l1_index, l2_offset;
663     uint64_t *l2_table = NULL;
664     int ret;
665 
666     /* seek to the l2 offset in the l1 table */
667 
668     l1_index = offset >> (s->l2_bits + s->cluster_bits);
669     if (l1_index >= s->l1_size) {
670         ret = qcow2_grow_l1_table(bs, l1_index + 1, false);
671         if (ret < 0) {
672             return ret;
673         }
674     }
675 
676     assert(l1_index < s->l1_size);
677     l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK;
678     if (offset_into_cluster(s, l2_offset)) {
679         qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64
680                                 " unaligned (L1 index: %#" PRIx64 ")",
681                                 l2_offset, l1_index);
682         return -EIO;
683     }
684 
685     /* seek the l2 table of the given l2 offset */
686 
687     if (s->l1_table[l1_index] & QCOW_OFLAG_COPIED) {
688         /* load the l2 table in memory */
689         ret = l2_load(bs, l2_offset, &l2_table);
690         if (ret < 0) {
691             return ret;
692         }
693     } else {
694         /* First allocate a new L2 table (and do COW if needed) */
695         ret = l2_allocate(bs, l1_index, &l2_table);
696         if (ret < 0) {
697             return ret;
698         }
699 
700         /* Then decrease the refcount of the old table */
701         if (l2_offset) {
702             qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t),
703                                 QCOW2_DISCARD_OTHER);
704         }
705     }
706 
707     /* find the cluster offset for the given disk offset */
708 
709     l2_index = offset_to_l2_index(s, offset);
710 
711     *new_l2_table = l2_table;
712     *new_l2_index = l2_index;
713 
714     return 0;
715 }
716 
717 /*
718  * alloc_compressed_cluster_offset
719  *
720  * For a given offset of the disk image, return cluster offset in
721  * qcow2 file.
722  *
723  * If the offset is not found, allocate a new compressed cluster.
724  *
725  * Return the cluster offset if successful,
726  * Return 0, otherwise.
727  *
728  */
729 
730 uint64_t qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs,
731                                                uint64_t offset,
732                                                int compressed_size)
733 {
734     BDRVQcow2State *s = bs->opaque;
735     int l2_index, ret;
736     uint64_t *l2_table;
737     int64_t cluster_offset;
738     int nb_csectors;
739 
740     ret = get_cluster_table(bs, offset, &l2_table, &l2_index);
741     if (ret < 0) {
742         return 0;
743     }
744 
745     /* Compression can't overwrite anything. Fail if the cluster was already
746      * allocated. */
747     cluster_offset = be64_to_cpu(l2_table[l2_index]);
748     if (cluster_offset & L2E_OFFSET_MASK) {
749         qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
750         return 0;
751     }
752 
753     cluster_offset = qcow2_alloc_bytes(bs, compressed_size);
754     if (cluster_offset < 0) {
755         qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
756         return 0;
757     }
758 
759     nb_csectors = ((cluster_offset + compressed_size - 1) >> 9) -
760                   (cluster_offset >> 9);
761 
762     cluster_offset |= QCOW_OFLAG_COMPRESSED |
763                       ((uint64_t)nb_csectors << s->csize_shift);
764 
765     /* update L2 table */
766 
767     /* compressed clusters never have the copied flag */
768 
769     BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED);
770     qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table);
771     l2_table[l2_index] = cpu_to_be64(cluster_offset);
772     qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
773 
774     return cluster_offset;
775 }
776 
777 static int perform_cow(BlockDriverState *bs, QCowL2Meta *m)
778 {
779     BDRVQcow2State *s = bs->opaque;
780     Qcow2COWRegion *start = &m->cow_start;
781     Qcow2COWRegion *end = &m->cow_end;
782     unsigned buffer_size;
783     unsigned data_bytes = end->offset - (start->offset + start->nb_bytes);
784     bool merge_reads;
785     uint8_t *start_buffer, *end_buffer;
786     QEMUIOVector qiov;
787     int ret;
788 
789     assert(start->nb_bytes <= UINT_MAX - end->nb_bytes);
790     assert(start->nb_bytes + end->nb_bytes <= UINT_MAX - data_bytes);
791     assert(start->offset + start->nb_bytes <= end->offset);
792     assert(!m->data_qiov || m->data_qiov->size == data_bytes);
793 
794     if (start->nb_bytes == 0 && end->nb_bytes == 0) {
795         return 0;
796     }
797 
798     /* If we have to read both the start and end COW regions and the
799      * middle region is not too large then perform just one read
800      * operation */
801     merge_reads = start->nb_bytes && end->nb_bytes && data_bytes <= 16384;
802     if (merge_reads) {
803         buffer_size = start->nb_bytes + data_bytes + end->nb_bytes;
804     } else {
805         /* If we have to do two reads, add some padding in the middle
806          * if necessary to make sure that the end region is optimally
807          * aligned. */
808         size_t align = bdrv_opt_mem_align(bs);
809         assert(align > 0 && align <= UINT_MAX);
810         assert(QEMU_ALIGN_UP(start->nb_bytes, align) <=
811                UINT_MAX - end->nb_bytes);
812         buffer_size = QEMU_ALIGN_UP(start->nb_bytes, align) + end->nb_bytes;
813     }
814 
815     /* Reserve a buffer large enough to store all the data that we're
816      * going to read */
817     start_buffer = qemu_try_blockalign(bs, buffer_size);
818     if (start_buffer == NULL) {
819         return -ENOMEM;
820     }
821     /* The part of the buffer where the end region is located */
822     end_buffer = start_buffer + buffer_size - end->nb_bytes;
823 
824     qemu_iovec_init(&qiov, 2 + (m->data_qiov ? m->data_qiov->niov : 0));
825 
826     qemu_co_mutex_unlock(&s->lock);
827     /* First we read the existing data from both COW regions. We
828      * either read the whole region in one go, or the start and end
829      * regions separately. */
830     if (merge_reads) {
831         qemu_iovec_add(&qiov, start_buffer, buffer_size);
832         ret = do_perform_cow_read(bs, m->offset, start->offset, &qiov);
833     } else {
834         qemu_iovec_add(&qiov, start_buffer, start->nb_bytes);
835         ret = do_perform_cow_read(bs, m->offset, start->offset, &qiov);
836         if (ret < 0) {
837             goto fail;
838         }
839 
840         qemu_iovec_reset(&qiov);
841         qemu_iovec_add(&qiov, end_buffer, end->nb_bytes);
842         ret = do_perform_cow_read(bs, m->offset, end->offset, &qiov);
843     }
844     if (ret < 0) {
845         goto fail;
846     }
847 
848     /* Encrypt the data if necessary before writing it */
849     if (bs->encrypted) {
850         if (!do_perform_cow_encrypt(bs, m->offset, m->alloc_offset,
851                                     start->offset, start_buffer,
852                                     start->nb_bytes) ||
853             !do_perform_cow_encrypt(bs, m->offset, m->alloc_offset,
854                                     end->offset, end_buffer, end->nb_bytes)) {
855             ret = -EIO;
856             goto fail;
857         }
858     }
859 
860     /* And now we can write everything. If we have the guest data we
861      * can write everything in one single operation */
862     if (m->data_qiov) {
863         qemu_iovec_reset(&qiov);
864         if (start->nb_bytes) {
865             qemu_iovec_add(&qiov, start_buffer, start->nb_bytes);
866         }
867         qemu_iovec_concat(&qiov, m->data_qiov, 0, data_bytes);
868         if (end->nb_bytes) {
869             qemu_iovec_add(&qiov, end_buffer, end->nb_bytes);
870         }
871         /* NOTE: we have a write_aio blkdebug event here followed by
872          * a cow_write one in do_perform_cow_write(), but there's only
873          * one single I/O operation */
874         BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO);
875         ret = do_perform_cow_write(bs, m->alloc_offset, start->offset, &qiov);
876     } else {
877         /* If there's no guest data then write both COW regions separately */
878         qemu_iovec_reset(&qiov);
879         qemu_iovec_add(&qiov, start_buffer, start->nb_bytes);
880         ret = do_perform_cow_write(bs, m->alloc_offset, start->offset, &qiov);
881         if (ret < 0) {
882             goto fail;
883         }
884 
885         qemu_iovec_reset(&qiov);
886         qemu_iovec_add(&qiov, end_buffer, end->nb_bytes);
887         ret = do_perform_cow_write(bs, m->alloc_offset, end->offset, &qiov);
888     }
889 
890 fail:
891     qemu_co_mutex_lock(&s->lock);
892 
893     /*
894      * Before we update the L2 table to actually point to the new cluster, we
895      * need to be sure that the refcounts have been increased and COW was
896      * handled.
897      */
898     if (ret == 0) {
899         qcow2_cache_depends_on_flush(s->l2_table_cache);
900     }
901 
902     qemu_vfree(start_buffer);
903     qemu_iovec_destroy(&qiov);
904     return ret;
905 }
906 
907 int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m)
908 {
909     BDRVQcow2State *s = bs->opaque;
910     int i, j = 0, l2_index, ret;
911     uint64_t *old_cluster, *l2_table;
912     uint64_t cluster_offset = m->alloc_offset;
913 
914     trace_qcow2_cluster_link_l2(qemu_coroutine_self(), m->nb_clusters);
915     assert(m->nb_clusters > 0);
916 
917     old_cluster = g_try_new(uint64_t, m->nb_clusters);
918     if (old_cluster == NULL) {
919         ret = -ENOMEM;
920         goto err;
921     }
922 
923     /* copy content of unmodified sectors */
924     ret = perform_cow(bs, m);
925     if (ret < 0) {
926         goto err;
927     }
928 
929     /* Update L2 table. */
930     if (s->use_lazy_refcounts) {
931         qcow2_mark_dirty(bs);
932     }
933     if (qcow2_need_accurate_refcounts(s)) {
934         qcow2_cache_set_dependency(bs, s->l2_table_cache,
935                                    s->refcount_block_cache);
936     }
937 
938     ret = get_cluster_table(bs, m->offset, &l2_table, &l2_index);
939     if (ret < 0) {
940         goto err;
941     }
942     qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table);
943 
944     assert(l2_index + m->nb_clusters <= s->l2_size);
945     for (i = 0; i < m->nb_clusters; i++) {
946         /* if two concurrent writes happen to the same unallocated cluster
947          * each write allocates separate cluster and writes data concurrently.
948          * The first one to complete updates l2 table with pointer to its
949          * cluster the second one has to do RMW (which is done above by
950          * perform_cow()), update l2 table with its cluster pointer and free
951          * old cluster. This is what this loop does */
952         if (l2_table[l2_index + i] != 0) {
953             old_cluster[j++] = l2_table[l2_index + i];
954         }
955 
956         l2_table[l2_index + i] = cpu_to_be64((cluster_offset +
957                     (i << s->cluster_bits)) | QCOW_OFLAG_COPIED);
958      }
959 
960 
961     qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
962 
963     /*
964      * If this was a COW, we need to decrease the refcount of the old cluster.
965      *
966      * Don't discard clusters that reach a refcount of 0 (e.g. compressed
967      * clusters), the next write will reuse them anyway.
968      */
969     if (!m->keep_old_clusters && j != 0) {
970         for (i = 0; i < j; i++) {
971             qcow2_free_any_clusters(bs, be64_to_cpu(old_cluster[i]), 1,
972                                     QCOW2_DISCARD_NEVER);
973         }
974     }
975 
976     ret = 0;
977 err:
978     g_free(old_cluster);
979     return ret;
980  }
981 
982 /*
983  * Returns the number of contiguous clusters that can be used for an allocating
984  * write, but require COW to be performed (this includes yet unallocated space,
985  * which must copy from the backing file)
986  */
987 static int count_cow_clusters(BDRVQcow2State *s, int nb_clusters,
988     uint64_t *l2_table, int l2_index)
989 {
990     int i;
991 
992     for (i = 0; i < nb_clusters; i++) {
993         uint64_t l2_entry = be64_to_cpu(l2_table[l2_index + i]);
994         QCow2ClusterType cluster_type = qcow2_get_cluster_type(l2_entry);
995 
996         switch(cluster_type) {
997         case QCOW2_CLUSTER_NORMAL:
998             if (l2_entry & QCOW_OFLAG_COPIED) {
999                 goto out;
1000             }
1001             break;
1002         case QCOW2_CLUSTER_UNALLOCATED:
1003         case QCOW2_CLUSTER_COMPRESSED:
1004         case QCOW2_CLUSTER_ZERO_PLAIN:
1005         case QCOW2_CLUSTER_ZERO_ALLOC:
1006             break;
1007         default:
1008             abort();
1009         }
1010     }
1011 
1012 out:
1013     assert(i <= nb_clusters);
1014     return i;
1015 }
1016 
1017 /*
1018  * Check if there already is an AIO write request in flight which allocates
1019  * the same cluster. In this case we need to wait until the previous
1020  * request has completed and updated the L2 table accordingly.
1021  *
1022  * Returns:
1023  *   0       if there was no dependency. *cur_bytes indicates the number of
1024  *           bytes from guest_offset that can be read before the next
1025  *           dependency must be processed (or the request is complete)
1026  *
1027  *   -EAGAIN if we had to wait for another request, previously gathered
1028  *           information on cluster allocation may be invalid now. The caller
1029  *           must start over anyway, so consider *cur_bytes undefined.
1030  */
1031 static int handle_dependencies(BlockDriverState *bs, uint64_t guest_offset,
1032     uint64_t *cur_bytes, QCowL2Meta **m)
1033 {
1034     BDRVQcow2State *s = bs->opaque;
1035     QCowL2Meta *old_alloc;
1036     uint64_t bytes = *cur_bytes;
1037 
1038     QLIST_FOREACH(old_alloc, &s->cluster_allocs, next_in_flight) {
1039 
1040         uint64_t start = guest_offset;
1041         uint64_t end = start + bytes;
1042         uint64_t old_start = l2meta_cow_start(old_alloc);
1043         uint64_t old_end = l2meta_cow_end(old_alloc);
1044 
1045         if (end <= old_start || start >= old_end) {
1046             /* No intersection */
1047         } else {
1048             if (start < old_start) {
1049                 /* Stop at the start of a running allocation */
1050                 bytes = old_start - start;
1051             } else {
1052                 bytes = 0;
1053             }
1054 
1055             /* Stop if already an l2meta exists. After yielding, it wouldn't
1056              * be valid any more, so we'd have to clean up the old L2Metas
1057              * and deal with requests depending on them before starting to
1058              * gather new ones. Not worth the trouble. */
1059             if (bytes == 0 && *m) {
1060                 *cur_bytes = 0;
1061                 return 0;
1062             }
1063 
1064             if (bytes == 0) {
1065                 /* Wait for the dependency to complete. We need to recheck
1066                  * the free/allocated clusters when we continue. */
1067                 qemu_co_queue_wait(&old_alloc->dependent_requests, &s->lock);
1068                 return -EAGAIN;
1069             }
1070         }
1071     }
1072 
1073     /* Make sure that existing clusters and new allocations are only used up to
1074      * the next dependency if we shortened the request above */
1075     *cur_bytes = bytes;
1076 
1077     return 0;
1078 }
1079 
1080 /*
1081  * Checks how many already allocated clusters that don't require a copy on
1082  * write there are at the given guest_offset (up to *bytes). If
1083  * *host_offset is not zero, only physically contiguous clusters beginning at
1084  * this host offset are counted.
1085  *
1086  * Note that guest_offset may not be cluster aligned. In this case, the
1087  * returned *host_offset points to exact byte referenced by guest_offset and
1088  * therefore isn't cluster aligned as well.
1089  *
1090  * Returns:
1091  *   0:     if no allocated clusters are available at the given offset.
1092  *          *bytes is normally unchanged. It is set to 0 if the cluster
1093  *          is allocated and doesn't need COW, but doesn't have the right
1094  *          physical offset.
1095  *
1096  *   1:     if allocated clusters that don't require a COW are available at
1097  *          the requested offset. *bytes may have decreased and describes
1098  *          the length of the area that can be written to.
1099  *
1100  *  -errno: in error cases
1101  */
1102 static int handle_copied(BlockDriverState *bs, uint64_t guest_offset,
1103     uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m)
1104 {
1105     BDRVQcow2State *s = bs->opaque;
1106     int l2_index;
1107     uint64_t cluster_offset;
1108     uint64_t *l2_table;
1109     uint64_t nb_clusters;
1110     unsigned int keep_clusters;
1111     int ret;
1112 
1113     trace_qcow2_handle_copied(qemu_coroutine_self(), guest_offset, *host_offset,
1114                               *bytes);
1115 
1116     assert(*host_offset == 0 ||    offset_into_cluster(s, guest_offset)
1117                                 == offset_into_cluster(s, *host_offset));
1118 
1119     /*
1120      * Calculate the number of clusters to look for. We stop at L2 table
1121      * boundaries to keep things simple.
1122      */
1123     nb_clusters =
1124         size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes);
1125 
1126     l2_index = offset_to_l2_index(s, guest_offset);
1127     nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
1128     assert(nb_clusters <= INT_MAX);
1129 
1130     /* Find L2 entry for the first involved cluster */
1131     ret = get_cluster_table(bs, guest_offset, &l2_table, &l2_index);
1132     if (ret < 0) {
1133         return ret;
1134     }
1135 
1136     cluster_offset = be64_to_cpu(l2_table[l2_index]);
1137 
1138     /* Check how many clusters are already allocated and don't need COW */
1139     if (qcow2_get_cluster_type(cluster_offset) == QCOW2_CLUSTER_NORMAL
1140         && (cluster_offset & QCOW_OFLAG_COPIED))
1141     {
1142         /* If a specific host_offset is required, check it */
1143         bool offset_matches =
1144             (cluster_offset & L2E_OFFSET_MASK) == *host_offset;
1145 
1146         if (offset_into_cluster(s, cluster_offset & L2E_OFFSET_MASK)) {
1147             qcow2_signal_corruption(bs, true, -1, -1, "Data cluster offset "
1148                                     "%#llx unaligned (guest offset: %#" PRIx64
1149                                     ")", cluster_offset & L2E_OFFSET_MASK,
1150                                     guest_offset);
1151             ret = -EIO;
1152             goto out;
1153         }
1154 
1155         if (*host_offset != 0 && !offset_matches) {
1156             *bytes = 0;
1157             ret = 0;
1158             goto out;
1159         }
1160 
1161         /* We keep all QCOW_OFLAG_COPIED clusters */
1162         keep_clusters =
1163             count_contiguous_clusters(nb_clusters, s->cluster_size,
1164                                       &l2_table[l2_index],
1165                                       QCOW_OFLAG_COPIED | QCOW_OFLAG_ZERO);
1166         assert(keep_clusters <= nb_clusters);
1167 
1168         *bytes = MIN(*bytes,
1169                  keep_clusters * s->cluster_size
1170                  - offset_into_cluster(s, guest_offset));
1171 
1172         ret = 1;
1173     } else {
1174         ret = 0;
1175     }
1176 
1177     /* Cleanup */
1178 out:
1179     qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
1180 
1181     /* Only return a host offset if we actually made progress. Otherwise we
1182      * would make requirements for handle_alloc() that it can't fulfill */
1183     if (ret > 0) {
1184         *host_offset = (cluster_offset & L2E_OFFSET_MASK)
1185                      + offset_into_cluster(s, guest_offset);
1186     }
1187 
1188     return ret;
1189 }
1190 
1191 /*
1192  * Allocates new clusters for the given guest_offset.
1193  *
1194  * At most *nb_clusters are allocated, and on return *nb_clusters is updated to
1195  * contain the number of clusters that have been allocated and are contiguous
1196  * in the image file.
1197  *
1198  * If *host_offset is non-zero, it specifies the offset in the image file at
1199  * which the new clusters must start. *nb_clusters can be 0 on return in this
1200  * case if the cluster at host_offset is already in use. If *host_offset is
1201  * zero, the clusters can be allocated anywhere in the image file.
1202  *
1203  * *host_offset is updated to contain the offset into the image file at which
1204  * the first allocated cluster starts.
1205  *
1206  * Return 0 on success and -errno in error cases. -EAGAIN means that the
1207  * function has been waiting for another request and the allocation must be
1208  * restarted, but the whole request should not be failed.
1209  */
1210 static int do_alloc_cluster_offset(BlockDriverState *bs, uint64_t guest_offset,
1211                                    uint64_t *host_offset, uint64_t *nb_clusters)
1212 {
1213     BDRVQcow2State *s = bs->opaque;
1214 
1215     trace_qcow2_do_alloc_clusters_offset(qemu_coroutine_self(), guest_offset,
1216                                          *host_offset, *nb_clusters);
1217 
1218     /* Allocate new clusters */
1219     trace_qcow2_cluster_alloc_phys(qemu_coroutine_self());
1220     if (*host_offset == 0) {
1221         int64_t cluster_offset =
1222             qcow2_alloc_clusters(bs, *nb_clusters * s->cluster_size);
1223         if (cluster_offset < 0) {
1224             return cluster_offset;
1225         }
1226         *host_offset = cluster_offset;
1227         return 0;
1228     } else {
1229         int64_t ret = qcow2_alloc_clusters_at(bs, *host_offset, *nb_clusters);
1230         if (ret < 0) {
1231             return ret;
1232         }
1233         *nb_clusters = ret;
1234         return 0;
1235     }
1236 }
1237 
1238 /*
1239  * Allocates new clusters for an area that either is yet unallocated or needs a
1240  * copy on write. If *host_offset is non-zero, clusters are only allocated if
1241  * the new allocation can match the specified host offset.
1242  *
1243  * Note that guest_offset may not be cluster aligned. In this case, the
1244  * returned *host_offset points to exact byte referenced by guest_offset and
1245  * therefore isn't cluster aligned as well.
1246  *
1247  * Returns:
1248  *   0:     if no clusters could be allocated. *bytes is set to 0,
1249  *          *host_offset is left unchanged.
1250  *
1251  *   1:     if new clusters were allocated. *bytes may be decreased if the
1252  *          new allocation doesn't cover all of the requested area.
1253  *          *host_offset is updated to contain the host offset of the first
1254  *          newly allocated cluster.
1255  *
1256  *  -errno: in error cases
1257  */
1258 static int handle_alloc(BlockDriverState *bs, uint64_t guest_offset,
1259     uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m)
1260 {
1261     BDRVQcow2State *s = bs->opaque;
1262     int l2_index;
1263     uint64_t *l2_table;
1264     uint64_t entry;
1265     uint64_t nb_clusters;
1266     int ret;
1267     bool keep_old_clusters = false;
1268 
1269     uint64_t alloc_cluster_offset = 0;
1270 
1271     trace_qcow2_handle_alloc(qemu_coroutine_self(), guest_offset, *host_offset,
1272                              *bytes);
1273     assert(*bytes > 0);
1274 
1275     /*
1276      * Calculate the number of clusters to look for. We stop at L2 table
1277      * boundaries to keep things simple.
1278      */
1279     nb_clusters =
1280         size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes);
1281 
1282     l2_index = offset_to_l2_index(s, guest_offset);
1283     nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
1284     assert(nb_clusters <= INT_MAX);
1285 
1286     /* Find L2 entry for the first involved cluster */
1287     ret = get_cluster_table(bs, guest_offset, &l2_table, &l2_index);
1288     if (ret < 0) {
1289         return ret;
1290     }
1291 
1292     entry = be64_to_cpu(l2_table[l2_index]);
1293 
1294     /* For the moment, overwrite compressed clusters one by one */
1295     if (entry & QCOW_OFLAG_COMPRESSED) {
1296         nb_clusters = 1;
1297     } else {
1298         nb_clusters = count_cow_clusters(s, nb_clusters, l2_table, l2_index);
1299     }
1300 
1301     /* This function is only called when there were no non-COW clusters, so if
1302      * we can't find any unallocated or COW clusters either, something is
1303      * wrong with our code. */
1304     assert(nb_clusters > 0);
1305 
1306     if (qcow2_get_cluster_type(entry) == QCOW2_CLUSTER_ZERO_ALLOC &&
1307         (entry & QCOW_OFLAG_COPIED) &&
1308         (!*host_offset ||
1309          start_of_cluster(s, *host_offset) == (entry & L2E_OFFSET_MASK)))
1310     {
1311         /* Try to reuse preallocated zero clusters; contiguous normal clusters
1312          * would be fine, too, but count_cow_clusters() above has limited
1313          * nb_clusters already to a range of COW clusters */
1314         int preallocated_nb_clusters =
1315             count_contiguous_clusters(nb_clusters, s->cluster_size,
1316                                       &l2_table[l2_index], QCOW_OFLAG_COPIED);
1317         assert(preallocated_nb_clusters > 0);
1318 
1319         nb_clusters = preallocated_nb_clusters;
1320         alloc_cluster_offset = entry & L2E_OFFSET_MASK;
1321 
1322         /* We want to reuse these clusters, so qcow2_alloc_cluster_link_l2()
1323          * should not free them. */
1324         keep_old_clusters = true;
1325     }
1326 
1327     qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
1328 
1329     if (!alloc_cluster_offset) {
1330         /* Allocate, if necessary at a given offset in the image file */
1331         alloc_cluster_offset = start_of_cluster(s, *host_offset);
1332         ret = do_alloc_cluster_offset(bs, guest_offset, &alloc_cluster_offset,
1333                                       &nb_clusters);
1334         if (ret < 0) {
1335             goto fail;
1336         }
1337 
1338         /* Can't extend contiguous allocation */
1339         if (nb_clusters == 0) {
1340             *bytes = 0;
1341             return 0;
1342         }
1343 
1344         /* !*host_offset would overwrite the image header and is reserved for
1345          * "no host offset preferred". If 0 was a valid host offset, it'd
1346          * trigger the following overlap check; do that now to avoid having an
1347          * invalid value in *host_offset. */
1348         if (!alloc_cluster_offset) {
1349             ret = qcow2_pre_write_overlap_check(bs, 0, alloc_cluster_offset,
1350                                                 nb_clusters * s->cluster_size);
1351             assert(ret < 0);
1352             goto fail;
1353         }
1354     }
1355 
1356     /*
1357      * Save info needed for meta data update.
1358      *
1359      * requested_bytes: Number of bytes from the start of the first
1360      * newly allocated cluster to the end of the (possibly shortened
1361      * before) write request.
1362      *
1363      * avail_bytes: Number of bytes from the start of the first
1364      * newly allocated to the end of the last newly allocated cluster.
1365      *
1366      * nb_bytes: The number of bytes from the start of the first
1367      * newly allocated cluster to the end of the area that the write
1368      * request actually writes to (excluding COW at the end)
1369      */
1370     uint64_t requested_bytes = *bytes + offset_into_cluster(s, guest_offset);
1371     int avail_bytes = MIN(INT_MAX, nb_clusters << s->cluster_bits);
1372     int nb_bytes = MIN(requested_bytes, avail_bytes);
1373     QCowL2Meta *old_m = *m;
1374 
1375     *m = g_malloc0(sizeof(**m));
1376 
1377     **m = (QCowL2Meta) {
1378         .next           = old_m,
1379 
1380         .alloc_offset   = alloc_cluster_offset,
1381         .offset         = start_of_cluster(s, guest_offset),
1382         .nb_clusters    = nb_clusters,
1383 
1384         .keep_old_clusters  = keep_old_clusters,
1385 
1386         .cow_start = {
1387             .offset     = 0,
1388             .nb_bytes   = offset_into_cluster(s, guest_offset),
1389         },
1390         .cow_end = {
1391             .offset     = nb_bytes,
1392             .nb_bytes   = avail_bytes - nb_bytes,
1393         },
1394     };
1395     qemu_co_queue_init(&(*m)->dependent_requests);
1396     QLIST_INSERT_HEAD(&s->cluster_allocs, *m, next_in_flight);
1397 
1398     *host_offset = alloc_cluster_offset + offset_into_cluster(s, guest_offset);
1399     *bytes = MIN(*bytes, nb_bytes - offset_into_cluster(s, guest_offset));
1400     assert(*bytes != 0);
1401 
1402     return 1;
1403 
1404 fail:
1405     if (*m && (*m)->nb_clusters > 0) {
1406         QLIST_REMOVE(*m, next_in_flight);
1407     }
1408     return ret;
1409 }
1410 
1411 /*
1412  * alloc_cluster_offset
1413  *
1414  * For a given offset on the virtual disk, find the cluster offset in qcow2
1415  * file. If the offset is not found, allocate a new cluster.
1416  *
1417  * If the cluster was already allocated, m->nb_clusters is set to 0 and
1418  * other fields in m are meaningless.
1419  *
1420  * If the cluster is newly allocated, m->nb_clusters is set to the number of
1421  * contiguous clusters that have been allocated. In this case, the other
1422  * fields of m are valid and contain information about the first allocated
1423  * cluster.
1424  *
1425  * If the request conflicts with another write request in flight, the coroutine
1426  * is queued and will be reentered when the dependency has completed.
1427  *
1428  * Return 0 on success and -errno in error cases
1429  */
1430 int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset,
1431                                unsigned int *bytes, uint64_t *host_offset,
1432                                QCowL2Meta **m)
1433 {
1434     BDRVQcow2State *s = bs->opaque;
1435     uint64_t start, remaining;
1436     uint64_t cluster_offset;
1437     uint64_t cur_bytes;
1438     int ret;
1439 
1440     trace_qcow2_alloc_clusters_offset(qemu_coroutine_self(), offset, *bytes);
1441 
1442 again:
1443     start = offset;
1444     remaining = *bytes;
1445     cluster_offset = 0;
1446     *host_offset = 0;
1447     cur_bytes = 0;
1448     *m = NULL;
1449 
1450     while (true) {
1451 
1452         if (!*host_offset) {
1453             *host_offset = start_of_cluster(s, cluster_offset);
1454         }
1455 
1456         assert(remaining >= cur_bytes);
1457 
1458         start           += cur_bytes;
1459         remaining       -= cur_bytes;
1460         cluster_offset  += cur_bytes;
1461 
1462         if (remaining == 0) {
1463             break;
1464         }
1465 
1466         cur_bytes = remaining;
1467 
1468         /*
1469          * Now start gathering as many contiguous clusters as possible:
1470          *
1471          * 1. Check for overlaps with in-flight allocations
1472          *
1473          *      a) Overlap not in the first cluster -> shorten this request and
1474          *         let the caller handle the rest in its next loop iteration.
1475          *
1476          *      b) Real overlaps of two requests. Yield and restart the search
1477          *         for contiguous clusters (the situation could have changed
1478          *         while we were sleeping)
1479          *
1480          *      c) TODO: Request starts in the same cluster as the in-flight
1481          *         allocation ends. Shorten the COW of the in-fight allocation,
1482          *         set cluster_offset to write to the same cluster and set up
1483          *         the right synchronisation between the in-flight request and
1484          *         the new one.
1485          */
1486         ret = handle_dependencies(bs, start, &cur_bytes, m);
1487         if (ret == -EAGAIN) {
1488             /* Currently handle_dependencies() doesn't yield if we already had
1489              * an allocation. If it did, we would have to clean up the L2Meta
1490              * structs before starting over. */
1491             assert(*m == NULL);
1492             goto again;
1493         } else if (ret < 0) {
1494             return ret;
1495         } else if (cur_bytes == 0) {
1496             break;
1497         } else {
1498             /* handle_dependencies() may have decreased cur_bytes (shortened
1499              * the allocations below) so that the next dependency is processed
1500              * correctly during the next loop iteration. */
1501         }
1502 
1503         /*
1504          * 2. Count contiguous COPIED clusters.
1505          */
1506         ret = handle_copied(bs, start, &cluster_offset, &cur_bytes, m);
1507         if (ret < 0) {
1508             return ret;
1509         } else if (ret) {
1510             continue;
1511         } else if (cur_bytes == 0) {
1512             break;
1513         }
1514 
1515         /*
1516          * 3. If the request still hasn't completed, allocate new clusters,
1517          *    considering any cluster_offset of steps 1c or 2.
1518          */
1519         ret = handle_alloc(bs, start, &cluster_offset, &cur_bytes, m);
1520         if (ret < 0) {
1521             return ret;
1522         } else if (ret) {
1523             continue;
1524         } else {
1525             assert(cur_bytes == 0);
1526             break;
1527         }
1528     }
1529 
1530     *bytes -= remaining;
1531     assert(*bytes > 0);
1532     assert(*host_offset != 0);
1533 
1534     return 0;
1535 }
1536 
1537 static int decompress_buffer(uint8_t *out_buf, int out_buf_size,
1538                              const uint8_t *buf, int buf_size)
1539 {
1540     z_stream strm1, *strm = &strm1;
1541     int ret, out_len;
1542 
1543     memset(strm, 0, sizeof(*strm));
1544 
1545     strm->next_in = (uint8_t *)buf;
1546     strm->avail_in = buf_size;
1547     strm->next_out = out_buf;
1548     strm->avail_out = out_buf_size;
1549 
1550     ret = inflateInit2(strm, -12);
1551     if (ret != Z_OK)
1552         return -1;
1553     ret = inflate(strm, Z_FINISH);
1554     out_len = strm->next_out - out_buf;
1555     if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) ||
1556         out_len != out_buf_size) {
1557         inflateEnd(strm);
1558         return -1;
1559     }
1560     inflateEnd(strm);
1561     return 0;
1562 }
1563 
1564 int qcow2_decompress_cluster(BlockDriverState *bs, uint64_t cluster_offset)
1565 {
1566     BDRVQcow2State *s = bs->opaque;
1567     int ret, csize, nb_csectors, sector_offset;
1568     uint64_t coffset;
1569 
1570     coffset = cluster_offset & s->cluster_offset_mask;
1571     if (s->cluster_cache_offset != coffset) {
1572         nb_csectors = ((cluster_offset >> s->csize_shift) & s->csize_mask) + 1;
1573         sector_offset = coffset & 511;
1574         csize = nb_csectors * 512 - sector_offset;
1575 
1576         /* Allocate buffers on first decompress operation, most images are
1577          * uncompressed and the memory overhead can be avoided.  The buffers
1578          * are freed in .bdrv_close().
1579          */
1580         if (!s->cluster_data) {
1581             /* one more sector for decompressed data alignment */
1582             s->cluster_data = qemu_try_blockalign(bs->file->bs,
1583                     QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size + 512);
1584             if (!s->cluster_data) {
1585                 return -ENOMEM;
1586             }
1587         }
1588         if (!s->cluster_cache) {
1589             s->cluster_cache = g_malloc(s->cluster_size);
1590         }
1591 
1592         BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED);
1593         ret = bdrv_read(bs->file, coffset >> 9, s->cluster_data,
1594                         nb_csectors);
1595         if (ret < 0) {
1596             return ret;
1597         }
1598         if (decompress_buffer(s->cluster_cache, s->cluster_size,
1599                               s->cluster_data + sector_offset, csize) < 0) {
1600             return -EIO;
1601         }
1602         s->cluster_cache_offset = coffset;
1603     }
1604     return 0;
1605 }
1606 
1607 /*
1608  * This discards as many clusters of nb_clusters as possible at once (i.e.
1609  * all clusters in the same L2 table) and returns the number of discarded
1610  * clusters.
1611  */
1612 static int discard_single_l2(BlockDriverState *bs, uint64_t offset,
1613                              uint64_t nb_clusters, enum qcow2_discard_type type,
1614                              bool full_discard)
1615 {
1616     BDRVQcow2State *s = bs->opaque;
1617     uint64_t *l2_table;
1618     int l2_index;
1619     int ret;
1620     int i;
1621 
1622     ret = get_cluster_table(bs, offset, &l2_table, &l2_index);
1623     if (ret < 0) {
1624         return ret;
1625     }
1626 
1627     /* Limit nb_clusters to one L2 table */
1628     nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
1629     assert(nb_clusters <= INT_MAX);
1630 
1631     for (i = 0; i < nb_clusters; i++) {
1632         uint64_t old_l2_entry;
1633 
1634         old_l2_entry = be64_to_cpu(l2_table[l2_index + i]);
1635 
1636         /*
1637          * If full_discard is false, make sure that a discarded area reads back
1638          * as zeroes for v3 images (we cannot do it for v2 without actually
1639          * writing a zero-filled buffer). We can skip the operation if the
1640          * cluster is already marked as zero, or if it's unallocated and we
1641          * don't have a backing file.
1642          *
1643          * TODO We might want to use bdrv_block_status(bs) here, but we're
1644          * holding s->lock, so that doesn't work today.
1645          *
1646          * If full_discard is true, the sector should not read back as zeroes,
1647          * but rather fall through to the backing file.
1648          */
1649         switch (qcow2_get_cluster_type(old_l2_entry)) {
1650         case QCOW2_CLUSTER_UNALLOCATED:
1651             if (full_discard || !bs->backing) {
1652                 continue;
1653             }
1654             break;
1655 
1656         case QCOW2_CLUSTER_ZERO_PLAIN:
1657             if (!full_discard) {
1658                 continue;
1659             }
1660             break;
1661 
1662         case QCOW2_CLUSTER_ZERO_ALLOC:
1663         case QCOW2_CLUSTER_NORMAL:
1664         case QCOW2_CLUSTER_COMPRESSED:
1665             break;
1666 
1667         default:
1668             abort();
1669         }
1670 
1671         /* First remove L2 entries */
1672         qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table);
1673         if (!full_discard && s->qcow_version >= 3) {
1674             l2_table[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO);
1675         } else {
1676             l2_table[l2_index + i] = cpu_to_be64(0);
1677         }
1678 
1679         /* Then decrease the refcount */
1680         qcow2_free_any_clusters(bs, old_l2_entry, 1, type);
1681     }
1682 
1683     qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
1684 
1685     return nb_clusters;
1686 }
1687 
1688 int qcow2_cluster_discard(BlockDriverState *bs, uint64_t offset,
1689                           uint64_t bytes, enum qcow2_discard_type type,
1690                           bool full_discard)
1691 {
1692     BDRVQcow2State *s = bs->opaque;
1693     uint64_t end_offset = offset + bytes;
1694     uint64_t nb_clusters;
1695     int64_t cleared;
1696     int ret;
1697 
1698     /* Caller must pass aligned values, except at image end */
1699     assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
1700     assert(QEMU_IS_ALIGNED(end_offset, s->cluster_size) ||
1701            end_offset == bs->total_sectors << BDRV_SECTOR_BITS);
1702 
1703     nb_clusters = size_to_clusters(s, bytes);
1704 
1705     s->cache_discards = true;
1706 
1707     /* Each L2 table is handled by its own loop iteration */
1708     while (nb_clusters > 0) {
1709         cleared = discard_single_l2(bs, offset, nb_clusters, type,
1710                                     full_discard);
1711         if (cleared < 0) {
1712             ret = cleared;
1713             goto fail;
1714         }
1715 
1716         nb_clusters -= cleared;
1717         offset += (cleared * s->cluster_size);
1718     }
1719 
1720     ret = 0;
1721 fail:
1722     s->cache_discards = false;
1723     qcow2_process_discards(bs, ret);
1724 
1725     return ret;
1726 }
1727 
1728 /*
1729  * This zeroes as many clusters of nb_clusters as possible at once (i.e.
1730  * all clusters in the same L2 table) and returns the number of zeroed
1731  * clusters.
1732  */
1733 static int zero_single_l2(BlockDriverState *bs, uint64_t offset,
1734                           uint64_t nb_clusters, int flags)
1735 {
1736     BDRVQcow2State *s = bs->opaque;
1737     uint64_t *l2_table;
1738     int l2_index;
1739     int ret;
1740     int i;
1741     bool unmap = !!(flags & BDRV_REQ_MAY_UNMAP);
1742 
1743     ret = get_cluster_table(bs, offset, &l2_table, &l2_index);
1744     if (ret < 0) {
1745         return ret;
1746     }
1747 
1748     /* Limit nb_clusters to one L2 table */
1749     nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
1750     assert(nb_clusters <= INT_MAX);
1751 
1752     for (i = 0; i < nb_clusters; i++) {
1753         uint64_t old_offset;
1754         QCow2ClusterType cluster_type;
1755 
1756         old_offset = be64_to_cpu(l2_table[l2_index + i]);
1757 
1758         /*
1759          * Minimize L2 changes if the cluster already reads back as
1760          * zeroes with correct allocation.
1761          */
1762         cluster_type = qcow2_get_cluster_type(old_offset);
1763         if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN ||
1764             (cluster_type == QCOW2_CLUSTER_ZERO_ALLOC && !unmap)) {
1765             continue;
1766         }
1767 
1768         qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table);
1769         if (cluster_type == QCOW2_CLUSTER_COMPRESSED || unmap) {
1770             l2_table[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO);
1771             qcow2_free_any_clusters(bs, old_offset, 1, QCOW2_DISCARD_REQUEST);
1772         } else {
1773             l2_table[l2_index + i] |= cpu_to_be64(QCOW_OFLAG_ZERO);
1774         }
1775     }
1776 
1777     qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
1778 
1779     return nb_clusters;
1780 }
1781 
1782 int qcow2_cluster_zeroize(BlockDriverState *bs, uint64_t offset,
1783                           uint64_t bytes, int flags)
1784 {
1785     BDRVQcow2State *s = bs->opaque;
1786     uint64_t end_offset = offset + bytes;
1787     uint64_t nb_clusters;
1788     int64_t cleared;
1789     int ret;
1790 
1791     /* Caller must pass aligned values, except at image end */
1792     assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
1793     assert(QEMU_IS_ALIGNED(end_offset, s->cluster_size) ||
1794            end_offset == bs->total_sectors << BDRV_SECTOR_BITS);
1795 
1796     /* The zero flag is only supported by version 3 and newer */
1797     if (s->qcow_version < 3) {
1798         return -ENOTSUP;
1799     }
1800 
1801     /* Each L2 table is handled by its own loop iteration */
1802     nb_clusters = size_to_clusters(s, bytes);
1803 
1804     s->cache_discards = true;
1805 
1806     while (nb_clusters > 0) {
1807         cleared = zero_single_l2(bs, offset, nb_clusters, flags);
1808         if (cleared < 0) {
1809             ret = cleared;
1810             goto fail;
1811         }
1812 
1813         nb_clusters -= cleared;
1814         offset += (cleared * s->cluster_size);
1815     }
1816 
1817     ret = 0;
1818 fail:
1819     s->cache_discards = false;
1820     qcow2_process_discards(bs, ret);
1821 
1822     return ret;
1823 }
1824 
1825 /*
1826  * Expands all zero clusters in a specific L1 table (or deallocates them, for
1827  * non-backed non-pre-allocated zero clusters).
1828  *
1829  * l1_entries and *visited_l1_entries are used to keep track of progress for
1830  * status_cb(). l1_entries contains the total number of L1 entries and
1831  * *visited_l1_entries counts all visited L1 entries.
1832  */
1833 static int expand_zero_clusters_in_l1(BlockDriverState *bs, uint64_t *l1_table,
1834                                       int l1_size, int64_t *visited_l1_entries,
1835                                       int64_t l1_entries,
1836                                       BlockDriverAmendStatusCB *status_cb,
1837                                       void *cb_opaque)
1838 {
1839     BDRVQcow2State *s = bs->opaque;
1840     bool is_active_l1 = (l1_table == s->l1_table);
1841     uint64_t *l2_table = NULL;
1842     int ret;
1843     int i, j;
1844 
1845     if (!is_active_l1) {
1846         /* inactive L2 tables require a buffer to be stored in when loading
1847          * them from disk */
1848         l2_table = qemu_try_blockalign(bs->file->bs, s->cluster_size);
1849         if (l2_table == NULL) {
1850             return -ENOMEM;
1851         }
1852     }
1853 
1854     for (i = 0; i < l1_size; i++) {
1855         uint64_t l2_offset = l1_table[i] & L1E_OFFSET_MASK;
1856         bool l2_dirty = false;
1857         uint64_t l2_refcount;
1858 
1859         if (!l2_offset) {
1860             /* unallocated */
1861             (*visited_l1_entries)++;
1862             if (status_cb) {
1863                 status_cb(bs, *visited_l1_entries, l1_entries, cb_opaque);
1864             }
1865             continue;
1866         }
1867 
1868         if (offset_into_cluster(s, l2_offset)) {
1869             qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#"
1870                                     PRIx64 " unaligned (L1 index: %#x)",
1871                                     l2_offset, i);
1872             ret = -EIO;
1873             goto fail;
1874         }
1875 
1876         if (is_active_l1) {
1877             /* get active L2 tables from cache */
1878             ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset,
1879                     (void **)&l2_table);
1880         } else {
1881             /* load inactive L2 tables from disk */
1882             ret = bdrv_read(bs->file, l2_offset / BDRV_SECTOR_SIZE,
1883                             (void *)l2_table, s->cluster_sectors);
1884         }
1885         if (ret < 0) {
1886             goto fail;
1887         }
1888 
1889         ret = qcow2_get_refcount(bs, l2_offset >> s->cluster_bits,
1890                                  &l2_refcount);
1891         if (ret < 0) {
1892             goto fail;
1893         }
1894 
1895         for (j = 0; j < s->l2_size; j++) {
1896             uint64_t l2_entry = be64_to_cpu(l2_table[j]);
1897             int64_t offset = l2_entry & L2E_OFFSET_MASK;
1898             QCow2ClusterType cluster_type = qcow2_get_cluster_type(l2_entry);
1899 
1900             if (cluster_type != QCOW2_CLUSTER_ZERO_PLAIN &&
1901                 cluster_type != QCOW2_CLUSTER_ZERO_ALLOC) {
1902                 continue;
1903             }
1904 
1905             if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) {
1906                 if (!bs->backing) {
1907                     /* not backed; therefore we can simply deallocate the
1908                      * cluster */
1909                     l2_table[j] = 0;
1910                     l2_dirty = true;
1911                     continue;
1912                 }
1913 
1914                 offset = qcow2_alloc_clusters(bs, s->cluster_size);
1915                 if (offset < 0) {
1916                     ret = offset;
1917                     goto fail;
1918                 }
1919 
1920                 if (l2_refcount > 1) {
1921                     /* For shared L2 tables, set the refcount accordingly (it is
1922                      * already 1 and needs to be l2_refcount) */
1923                     ret = qcow2_update_cluster_refcount(bs,
1924                             offset >> s->cluster_bits,
1925                             refcount_diff(1, l2_refcount), false,
1926                             QCOW2_DISCARD_OTHER);
1927                     if (ret < 0) {
1928                         qcow2_free_clusters(bs, offset, s->cluster_size,
1929                                             QCOW2_DISCARD_OTHER);
1930                         goto fail;
1931                     }
1932                 }
1933             }
1934 
1935             if (offset_into_cluster(s, offset)) {
1936                 qcow2_signal_corruption(bs, true, -1, -1,
1937                                         "Cluster allocation offset "
1938                                         "%#" PRIx64 " unaligned (L2 offset: %#"
1939                                         PRIx64 ", L2 index: %#x)", offset,
1940                                         l2_offset, j);
1941                 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) {
1942                     qcow2_free_clusters(bs, offset, s->cluster_size,
1943                                         QCOW2_DISCARD_ALWAYS);
1944                 }
1945                 ret = -EIO;
1946                 goto fail;
1947             }
1948 
1949             ret = qcow2_pre_write_overlap_check(bs, 0, offset, s->cluster_size);
1950             if (ret < 0) {
1951                 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) {
1952                     qcow2_free_clusters(bs, offset, s->cluster_size,
1953                                         QCOW2_DISCARD_ALWAYS);
1954                 }
1955                 goto fail;
1956             }
1957 
1958             ret = bdrv_pwrite_zeroes(bs->file, offset, s->cluster_size, 0);
1959             if (ret < 0) {
1960                 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) {
1961                     qcow2_free_clusters(bs, offset, s->cluster_size,
1962                                         QCOW2_DISCARD_ALWAYS);
1963                 }
1964                 goto fail;
1965             }
1966 
1967             if (l2_refcount == 1) {
1968                 l2_table[j] = cpu_to_be64(offset | QCOW_OFLAG_COPIED);
1969             } else {
1970                 l2_table[j] = cpu_to_be64(offset);
1971             }
1972             l2_dirty = true;
1973         }
1974 
1975         if (is_active_l1) {
1976             if (l2_dirty) {
1977                 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table);
1978                 qcow2_cache_depends_on_flush(s->l2_table_cache);
1979             }
1980             qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
1981         } else {
1982             if (l2_dirty) {
1983                 ret = qcow2_pre_write_overlap_check(bs,
1984                         QCOW2_OL_INACTIVE_L2 | QCOW2_OL_ACTIVE_L2, l2_offset,
1985                         s->cluster_size);
1986                 if (ret < 0) {
1987                     goto fail;
1988                 }
1989 
1990                 ret = bdrv_write(bs->file, l2_offset / BDRV_SECTOR_SIZE,
1991                                  (void *)l2_table, s->cluster_sectors);
1992                 if (ret < 0) {
1993                     goto fail;
1994                 }
1995             }
1996         }
1997 
1998         (*visited_l1_entries)++;
1999         if (status_cb) {
2000             status_cb(bs, *visited_l1_entries, l1_entries, cb_opaque);
2001         }
2002     }
2003 
2004     ret = 0;
2005 
2006 fail:
2007     if (l2_table) {
2008         if (!is_active_l1) {
2009             qemu_vfree(l2_table);
2010         } else {
2011             qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
2012         }
2013     }
2014     return ret;
2015 }
2016 
2017 /*
2018  * For backed images, expands all zero clusters on the image. For non-backed
2019  * images, deallocates all non-pre-allocated zero clusters (and claims the
2020  * allocation for pre-allocated ones). This is important for downgrading to a
2021  * qcow2 version which doesn't yet support metadata zero clusters.
2022  */
2023 int qcow2_expand_zero_clusters(BlockDriverState *bs,
2024                                BlockDriverAmendStatusCB *status_cb,
2025                                void *cb_opaque)
2026 {
2027     BDRVQcow2State *s = bs->opaque;
2028     uint64_t *l1_table = NULL;
2029     int64_t l1_entries = 0, visited_l1_entries = 0;
2030     int ret;
2031     int i, j;
2032 
2033     if (status_cb) {
2034         l1_entries = s->l1_size;
2035         for (i = 0; i < s->nb_snapshots; i++) {
2036             l1_entries += s->snapshots[i].l1_size;
2037         }
2038     }
2039 
2040     ret = expand_zero_clusters_in_l1(bs, s->l1_table, s->l1_size,
2041                                      &visited_l1_entries, l1_entries,
2042                                      status_cb, cb_opaque);
2043     if (ret < 0) {
2044         goto fail;
2045     }
2046 
2047     /* Inactive L1 tables may point to active L2 tables - therefore it is
2048      * necessary to flush the L2 table cache before trying to access the L2
2049      * tables pointed to by inactive L1 entries (else we might try to expand
2050      * zero clusters that have already been expanded); furthermore, it is also
2051      * necessary to empty the L2 table cache, since it may contain tables which
2052      * are now going to be modified directly on disk, bypassing the cache.
2053      * qcow2_cache_empty() does both for us. */
2054     ret = qcow2_cache_empty(bs, s->l2_table_cache);
2055     if (ret < 0) {
2056         goto fail;
2057     }
2058 
2059     for (i = 0; i < s->nb_snapshots; i++) {
2060         int l1_sectors = DIV_ROUND_UP(s->snapshots[i].l1_size *
2061                                       sizeof(uint64_t), BDRV_SECTOR_SIZE);
2062 
2063         l1_table = g_realloc(l1_table, l1_sectors * BDRV_SECTOR_SIZE);
2064 
2065         ret = bdrv_read(bs->file,
2066                         s->snapshots[i].l1_table_offset / BDRV_SECTOR_SIZE,
2067                         (void *)l1_table, l1_sectors);
2068         if (ret < 0) {
2069             goto fail;
2070         }
2071 
2072         for (j = 0; j < s->snapshots[i].l1_size; j++) {
2073             be64_to_cpus(&l1_table[j]);
2074         }
2075 
2076         ret = expand_zero_clusters_in_l1(bs, l1_table, s->snapshots[i].l1_size,
2077                                          &visited_l1_entries, l1_entries,
2078                                          status_cb, cb_opaque);
2079         if (ret < 0) {
2080             goto fail;
2081         }
2082     }
2083 
2084     ret = 0;
2085 
2086 fail:
2087     g_free(l1_table);
2088     return ret;
2089 }
2090