xref: /openbmc/qemu/block/qcow2-cluster.c (revision b11ce33f)
1 /*
2  * Block driver for the QCOW version 2 format
3  *
4  * Copyright (c) 2004-2006 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 #include "qemu/osdep.h"
26 #include <zlib.h>
27 
28 #include "qapi/error.h"
29 #include "qemu-common.h"
30 #include "block/block_int.h"
31 #include "block/qcow2.h"
32 #include "qemu/bswap.h"
33 #include "trace.h"
34 
35 int qcow2_shrink_l1_table(BlockDriverState *bs, uint64_t exact_size)
36 {
37     BDRVQcow2State *s = bs->opaque;
38     int new_l1_size, i, ret;
39 
40     if (exact_size >= s->l1_size) {
41         return 0;
42     }
43 
44     new_l1_size = exact_size;
45 
46 #ifdef DEBUG_ALLOC2
47     fprintf(stderr, "shrink l1_table from %d to %d\n", s->l1_size, new_l1_size);
48 #endif
49 
50     BLKDBG_EVENT(bs->file, BLKDBG_L1_SHRINK_WRITE_TABLE);
51     ret = bdrv_pwrite_zeroes(bs->file, s->l1_table_offset +
52                                        new_l1_size * sizeof(uint64_t),
53                              (s->l1_size - new_l1_size) * sizeof(uint64_t), 0);
54     if (ret < 0) {
55         goto fail;
56     }
57 
58     ret = bdrv_flush(bs->file->bs);
59     if (ret < 0) {
60         goto fail;
61     }
62 
63     BLKDBG_EVENT(bs->file, BLKDBG_L1_SHRINK_FREE_L2_CLUSTERS);
64     for (i = s->l1_size - 1; i > new_l1_size - 1; i--) {
65         if ((s->l1_table[i] & L1E_OFFSET_MASK) == 0) {
66             continue;
67         }
68         qcow2_free_clusters(bs, s->l1_table[i] & L1E_OFFSET_MASK,
69                             s->cluster_size, QCOW2_DISCARD_ALWAYS);
70         s->l1_table[i] = 0;
71     }
72     return 0;
73 
74 fail:
75     /*
76      * If the write in the l1_table failed the image may contain a partially
77      * overwritten l1_table. In this case it would be better to clear the
78      * l1_table in memory to avoid possible image corruption.
79      */
80     memset(s->l1_table + new_l1_size, 0,
81            (s->l1_size - new_l1_size) * sizeof(uint64_t));
82     return ret;
83 }
84 
85 int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size,
86                         bool exact_size)
87 {
88     BDRVQcow2State *s = bs->opaque;
89     int new_l1_size2, ret, i;
90     uint64_t *new_l1_table;
91     int64_t old_l1_table_offset, old_l1_size;
92     int64_t new_l1_table_offset, new_l1_size;
93     uint8_t data[12];
94 
95     if (min_size <= s->l1_size)
96         return 0;
97 
98     /* Do a sanity check on min_size before trying to calculate new_l1_size
99      * (this prevents overflows during the while loop for the calculation of
100      * new_l1_size) */
101     if (min_size > INT_MAX / sizeof(uint64_t)) {
102         return -EFBIG;
103     }
104 
105     if (exact_size) {
106         new_l1_size = min_size;
107     } else {
108         /* Bump size up to reduce the number of times we have to grow */
109         new_l1_size = s->l1_size;
110         if (new_l1_size == 0) {
111             new_l1_size = 1;
112         }
113         while (min_size > new_l1_size) {
114             new_l1_size = DIV_ROUND_UP(new_l1_size * 3, 2);
115         }
116     }
117 
118     QEMU_BUILD_BUG_ON(QCOW_MAX_L1_SIZE > INT_MAX);
119     if (new_l1_size > QCOW_MAX_L1_SIZE / sizeof(uint64_t)) {
120         return -EFBIG;
121     }
122 
123 #ifdef DEBUG_ALLOC2
124     fprintf(stderr, "grow l1_table from %d to %" PRId64 "\n",
125             s->l1_size, new_l1_size);
126 #endif
127 
128     new_l1_size2 = sizeof(uint64_t) * new_l1_size;
129     new_l1_table = qemu_try_blockalign(bs->file->bs,
130                                        align_offset(new_l1_size2, 512));
131     if (new_l1_table == NULL) {
132         return -ENOMEM;
133     }
134     memset(new_l1_table, 0, align_offset(new_l1_size2, 512));
135 
136     if (s->l1_size) {
137         memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t));
138     }
139 
140     /* write new table (align to cluster) */
141     BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ALLOC_TABLE);
142     new_l1_table_offset = qcow2_alloc_clusters(bs, new_l1_size2);
143     if (new_l1_table_offset < 0) {
144         qemu_vfree(new_l1_table);
145         return new_l1_table_offset;
146     }
147 
148     ret = qcow2_cache_flush(bs, s->refcount_block_cache);
149     if (ret < 0) {
150         goto fail;
151     }
152 
153     /* the L1 position has not yet been updated, so these clusters must
154      * indeed be completely free */
155     ret = qcow2_pre_write_overlap_check(bs, 0, new_l1_table_offset,
156                                         new_l1_size2);
157     if (ret < 0) {
158         goto fail;
159     }
160 
161     BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_WRITE_TABLE);
162     for(i = 0; i < s->l1_size; i++)
163         new_l1_table[i] = cpu_to_be64(new_l1_table[i]);
164     ret = bdrv_pwrite_sync(bs->file, new_l1_table_offset,
165                            new_l1_table, new_l1_size2);
166     if (ret < 0)
167         goto fail;
168     for(i = 0; i < s->l1_size; i++)
169         new_l1_table[i] = be64_to_cpu(new_l1_table[i]);
170 
171     /* set new table */
172     BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ACTIVATE_TABLE);
173     stl_be_p(data, new_l1_size);
174     stq_be_p(data + 4, new_l1_table_offset);
175     ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_size),
176                            data, sizeof(data));
177     if (ret < 0) {
178         goto fail;
179     }
180     qemu_vfree(s->l1_table);
181     old_l1_table_offset = s->l1_table_offset;
182     s->l1_table_offset = new_l1_table_offset;
183     s->l1_table = new_l1_table;
184     old_l1_size = s->l1_size;
185     s->l1_size = new_l1_size;
186     qcow2_free_clusters(bs, old_l1_table_offset, old_l1_size * sizeof(uint64_t),
187                         QCOW2_DISCARD_OTHER);
188     return 0;
189  fail:
190     qemu_vfree(new_l1_table);
191     qcow2_free_clusters(bs, new_l1_table_offset, new_l1_size2,
192                         QCOW2_DISCARD_OTHER);
193     return ret;
194 }
195 
196 /*
197  * l2_load
198  *
199  * Loads a L2 table into memory. If the table is in the cache, the cache
200  * is used; otherwise the L2 table is loaded from the image file.
201  *
202  * Returns a pointer to the L2 table on success, or NULL if the read from
203  * the image file failed.
204  */
205 
206 static int l2_load(BlockDriverState *bs, uint64_t l2_offset,
207     uint64_t **l2_table)
208 {
209     BDRVQcow2State *s = bs->opaque;
210 
211     return qcow2_cache_get(bs, s->l2_table_cache, l2_offset,
212                            (void **)l2_table);
213 }
214 
215 /*
216  * Writes one sector of the L1 table to the disk (can't update single entries
217  * and we really don't want bdrv_pread to perform a read-modify-write)
218  */
219 #define L1_ENTRIES_PER_SECTOR (512 / 8)
220 int qcow2_write_l1_entry(BlockDriverState *bs, int l1_index)
221 {
222     BDRVQcow2State *s = bs->opaque;
223     uint64_t buf[L1_ENTRIES_PER_SECTOR] = { 0 };
224     int l1_start_index;
225     int i, ret;
226 
227     l1_start_index = l1_index & ~(L1_ENTRIES_PER_SECTOR - 1);
228     for (i = 0; i < L1_ENTRIES_PER_SECTOR && l1_start_index + i < s->l1_size;
229          i++)
230     {
231         buf[i] = cpu_to_be64(s->l1_table[l1_start_index + i]);
232     }
233 
234     ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_ACTIVE_L1,
235             s->l1_table_offset + 8 * l1_start_index, sizeof(buf));
236     if (ret < 0) {
237         return ret;
238     }
239 
240     BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE);
241     ret = bdrv_pwrite_sync(bs->file,
242                            s->l1_table_offset + 8 * l1_start_index,
243                            buf, sizeof(buf));
244     if (ret < 0) {
245         return ret;
246     }
247 
248     return 0;
249 }
250 
251 /*
252  * l2_allocate
253  *
254  * Allocate a new l2 entry in the file. If l1_index points to an already
255  * used entry in the L2 table (i.e. we are doing a copy on write for the L2
256  * table) copy the contents of the old L2 table into the newly allocated one.
257  * Otherwise the new table is initialized with zeros.
258  *
259  */
260 
261 static int l2_allocate(BlockDriverState *bs, int l1_index, uint64_t **table)
262 {
263     BDRVQcow2State *s = bs->opaque;
264     uint64_t old_l2_offset;
265     uint64_t *l2_table = NULL;
266     int64_t l2_offset;
267     int ret;
268 
269     old_l2_offset = s->l1_table[l1_index];
270 
271     trace_qcow2_l2_allocate(bs, l1_index);
272 
273     /* allocate a new l2 entry */
274 
275     l2_offset = qcow2_alloc_clusters(bs, s->l2_size * sizeof(uint64_t));
276     if (l2_offset < 0) {
277         ret = l2_offset;
278         goto fail;
279     }
280 
281     /* If we're allocating the table at offset 0 then something is wrong */
282     if (l2_offset == 0) {
283         qcow2_signal_corruption(bs, true, -1, -1, "Preventing invalid "
284                                 "allocation of L2 table at offset 0");
285         ret = -EIO;
286         goto fail;
287     }
288 
289     ret = qcow2_cache_flush(bs, s->refcount_block_cache);
290     if (ret < 0) {
291         goto fail;
292     }
293 
294     /* allocate a new entry in the l2 cache */
295 
296     trace_qcow2_l2_allocate_get_empty(bs, l1_index);
297     ret = qcow2_cache_get_empty(bs, s->l2_table_cache, l2_offset, (void**) table);
298     if (ret < 0) {
299         goto fail;
300     }
301 
302     l2_table = *table;
303 
304     if ((old_l2_offset & L1E_OFFSET_MASK) == 0) {
305         /* if there was no old l2 table, clear the new table */
306         memset(l2_table, 0, s->l2_size * sizeof(uint64_t));
307     } else {
308         uint64_t* old_table;
309 
310         /* if there was an old l2 table, read it from the disk */
311         BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_COW_READ);
312         ret = qcow2_cache_get(bs, s->l2_table_cache,
313             old_l2_offset & L1E_OFFSET_MASK,
314             (void**) &old_table);
315         if (ret < 0) {
316             goto fail;
317         }
318 
319         memcpy(l2_table, old_table, s->cluster_size);
320 
321         qcow2_cache_put(bs, s->l2_table_cache, (void **) &old_table);
322     }
323 
324     /* write the l2 table to the file */
325     BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_WRITE);
326 
327     trace_qcow2_l2_allocate_write_l2(bs, l1_index);
328     qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table);
329     ret = qcow2_cache_flush(bs, s->l2_table_cache);
330     if (ret < 0) {
331         goto fail;
332     }
333 
334     /* update the L1 entry */
335     trace_qcow2_l2_allocate_write_l1(bs, l1_index);
336     s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED;
337     ret = qcow2_write_l1_entry(bs, l1_index);
338     if (ret < 0) {
339         goto fail;
340     }
341 
342     *table = l2_table;
343     trace_qcow2_l2_allocate_done(bs, l1_index, 0);
344     return 0;
345 
346 fail:
347     trace_qcow2_l2_allocate_done(bs, l1_index, ret);
348     if (l2_table != NULL) {
349         qcow2_cache_put(bs, s->l2_table_cache, (void**) table);
350     }
351     s->l1_table[l1_index] = old_l2_offset;
352     if (l2_offset > 0) {
353         qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t),
354                             QCOW2_DISCARD_ALWAYS);
355     }
356     return ret;
357 }
358 
359 /*
360  * Checks how many clusters in a given L2 table are contiguous in the image
361  * file. As soon as one of the flags in the bitmask stop_flags changes compared
362  * to the first cluster, the search is stopped and the cluster is not counted
363  * as contiguous. (This allows it, for example, to stop at the first compressed
364  * cluster which may require a different handling)
365  */
366 static int count_contiguous_clusters(int nb_clusters, int cluster_size,
367         uint64_t *l2_table, uint64_t stop_flags)
368 {
369     int i;
370     QCow2ClusterType first_cluster_type;
371     uint64_t mask = stop_flags | L2E_OFFSET_MASK | QCOW_OFLAG_COMPRESSED;
372     uint64_t first_entry = be64_to_cpu(l2_table[0]);
373     uint64_t offset = first_entry & mask;
374 
375     if (!offset) {
376         return 0;
377     }
378 
379     /* must be allocated */
380     first_cluster_type = qcow2_get_cluster_type(first_entry);
381     assert(first_cluster_type == QCOW2_CLUSTER_NORMAL ||
382            first_cluster_type == QCOW2_CLUSTER_ZERO_ALLOC);
383 
384     for (i = 0; i < nb_clusters; i++) {
385         uint64_t l2_entry = be64_to_cpu(l2_table[i]) & mask;
386         if (offset + (uint64_t) i * cluster_size != l2_entry) {
387             break;
388         }
389     }
390 
391 	return i;
392 }
393 
394 /*
395  * Checks how many consecutive unallocated clusters in a given L2
396  * table have the same cluster type.
397  */
398 static int count_contiguous_clusters_unallocated(int nb_clusters,
399                                                  uint64_t *l2_table,
400                                                  QCow2ClusterType wanted_type)
401 {
402     int i;
403 
404     assert(wanted_type == QCOW2_CLUSTER_ZERO_PLAIN ||
405            wanted_type == QCOW2_CLUSTER_UNALLOCATED);
406     for (i = 0; i < nb_clusters; i++) {
407         uint64_t entry = be64_to_cpu(l2_table[i]);
408         QCow2ClusterType type = qcow2_get_cluster_type(entry);
409 
410         if (type != wanted_type) {
411             break;
412         }
413     }
414 
415     return i;
416 }
417 
418 static int coroutine_fn do_perform_cow_read(BlockDriverState *bs,
419                                             uint64_t src_cluster_offset,
420                                             unsigned offset_in_cluster,
421                                             QEMUIOVector *qiov)
422 {
423     int ret;
424 
425     if (qiov->size == 0) {
426         return 0;
427     }
428 
429     BLKDBG_EVENT(bs->file, BLKDBG_COW_READ);
430 
431     if (!bs->drv) {
432         return -ENOMEDIUM;
433     }
434 
435     /* Call .bdrv_co_readv() directly instead of using the public block-layer
436      * interface.  This avoids double I/O throttling and request tracking,
437      * which can lead to deadlock when block layer copy-on-read is enabled.
438      */
439     ret = bs->drv->bdrv_co_preadv(bs, src_cluster_offset + offset_in_cluster,
440                                   qiov->size, qiov, 0);
441     if (ret < 0) {
442         return ret;
443     }
444 
445     return 0;
446 }
447 
448 static bool coroutine_fn do_perform_cow_encrypt(BlockDriverState *bs,
449                                                 uint64_t src_cluster_offset,
450                                                 uint64_t cluster_offset,
451                                                 unsigned offset_in_cluster,
452                                                 uint8_t *buffer,
453                                                 unsigned bytes)
454 {
455     if (bytes && bs->encrypted) {
456         BDRVQcow2State *s = bs->opaque;
457         int64_t offset = (s->crypt_physical_offset ?
458                           (cluster_offset + offset_in_cluster) :
459                           (src_cluster_offset + offset_in_cluster));
460         assert((offset_in_cluster & ~BDRV_SECTOR_MASK) == 0);
461         assert((bytes & ~BDRV_SECTOR_MASK) == 0);
462         assert(s->crypto);
463         if (qcrypto_block_encrypt(s->crypto, offset, buffer, bytes, NULL) < 0) {
464             return false;
465         }
466     }
467     return true;
468 }
469 
470 static int coroutine_fn do_perform_cow_write(BlockDriverState *bs,
471                                              uint64_t cluster_offset,
472                                              unsigned offset_in_cluster,
473                                              QEMUIOVector *qiov)
474 {
475     int ret;
476 
477     if (qiov->size == 0) {
478         return 0;
479     }
480 
481     ret = qcow2_pre_write_overlap_check(bs, 0,
482             cluster_offset + offset_in_cluster, qiov->size);
483     if (ret < 0) {
484         return ret;
485     }
486 
487     BLKDBG_EVENT(bs->file, BLKDBG_COW_WRITE);
488     ret = bdrv_co_pwritev(bs->file, cluster_offset + offset_in_cluster,
489                           qiov->size, qiov, 0);
490     if (ret < 0) {
491         return ret;
492     }
493 
494     return 0;
495 }
496 
497 
498 /*
499  * get_cluster_offset
500  *
501  * For a given offset of the virtual disk, find the cluster type and offset in
502  * the qcow2 file. The offset is stored in *cluster_offset.
503  *
504  * On entry, *bytes is the maximum number of contiguous bytes starting at
505  * offset that we are interested in.
506  *
507  * On exit, *bytes is the number of bytes starting at offset that have the same
508  * cluster type and (if applicable) are stored contiguously in the image file.
509  * Compressed clusters are always returned one by one.
510  *
511  * Returns the cluster type (QCOW2_CLUSTER_*) on success, -errno in error
512  * cases.
513  */
514 int qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset,
515                              unsigned int *bytes, uint64_t *cluster_offset)
516 {
517     BDRVQcow2State *s = bs->opaque;
518     unsigned int l2_index;
519     uint64_t l1_index, l2_offset, *l2_table;
520     int l1_bits, c;
521     unsigned int offset_in_cluster;
522     uint64_t bytes_available, bytes_needed, nb_clusters;
523     QCow2ClusterType type;
524     int ret;
525 
526     offset_in_cluster = offset_into_cluster(s, offset);
527     bytes_needed = (uint64_t) *bytes + offset_in_cluster;
528 
529     l1_bits = s->l2_bits + s->cluster_bits;
530 
531     /* compute how many bytes there are between the start of the cluster
532      * containing offset and the end of the l1 entry */
533     bytes_available = (1ULL << l1_bits) - (offset & ((1ULL << l1_bits) - 1))
534                     + offset_in_cluster;
535 
536     if (bytes_needed > bytes_available) {
537         bytes_needed = bytes_available;
538     }
539 
540     *cluster_offset = 0;
541 
542     /* seek to the l2 offset in the l1 table */
543 
544     l1_index = offset >> l1_bits;
545     if (l1_index >= s->l1_size) {
546         type = QCOW2_CLUSTER_UNALLOCATED;
547         goto out;
548     }
549 
550     l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK;
551     if (!l2_offset) {
552         type = QCOW2_CLUSTER_UNALLOCATED;
553         goto out;
554     }
555 
556     if (offset_into_cluster(s, l2_offset)) {
557         qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64
558                                 " unaligned (L1 index: %#" PRIx64 ")",
559                                 l2_offset, l1_index);
560         return -EIO;
561     }
562 
563     /* load the l2 table in memory */
564 
565     ret = l2_load(bs, l2_offset, &l2_table);
566     if (ret < 0) {
567         return ret;
568     }
569 
570     /* find the cluster offset for the given disk offset */
571 
572     l2_index = offset_to_l2_index(s, offset);
573     *cluster_offset = be64_to_cpu(l2_table[l2_index]);
574 
575     nb_clusters = size_to_clusters(s, bytes_needed);
576     /* bytes_needed <= *bytes + offset_in_cluster, both of which are unsigned
577      * integers; the minimum cluster size is 512, so this assertion is always
578      * true */
579     assert(nb_clusters <= INT_MAX);
580 
581     type = qcow2_get_cluster_type(*cluster_offset);
582     if (s->qcow_version < 3 && (type == QCOW2_CLUSTER_ZERO_PLAIN ||
583                                 type == QCOW2_CLUSTER_ZERO_ALLOC)) {
584         qcow2_signal_corruption(bs, true, -1, -1, "Zero cluster entry found"
585                                 " in pre-v3 image (L2 offset: %#" PRIx64
586                                 ", L2 index: %#x)", l2_offset, l2_index);
587         ret = -EIO;
588         goto fail;
589     }
590     switch (type) {
591     case QCOW2_CLUSTER_COMPRESSED:
592         /* Compressed clusters can only be processed one by one */
593         c = 1;
594         *cluster_offset &= L2E_COMPRESSED_OFFSET_SIZE_MASK;
595         break;
596     case QCOW2_CLUSTER_ZERO_PLAIN:
597     case QCOW2_CLUSTER_UNALLOCATED:
598         /* how many empty clusters ? */
599         c = count_contiguous_clusters_unallocated(nb_clusters,
600                                                   &l2_table[l2_index], type);
601         *cluster_offset = 0;
602         break;
603     case QCOW2_CLUSTER_ZERO_ALLOC:
604     case QCOW2_CLUSTER_NORMAL:
605         /* how many allocated clusters ? */
606         c = count_contiguous_clusters(nb_clusters, s->cluster_size,
607                                       &l2_table[l2_index], QCOW_OFLAG_ZERO);
608         *cluster_offset &= L2E_OFFSET_MASK;
609         if (offset_into_cluster(s, *cluster_offset)) {
610             qcow2_signal_corruption(bs, true, -1, -1,
611                                     "Cluster allocation offset %#"
612                                     PRIx64 " unaligned (L2 offset: %#" PRIx64
613                                     ", L2 index: %#x)", *cluster_offset,
614                                     l2_offset, l2_index);
615             ret = -EIO;
616             goto fail;
617         }
618         break;
619     default:
620         abort();
621     }
622 
623     qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
624 
625     bytes_available = (int64_t)c * s->cluster_size;
626 
627 out:
628     if (bytes_available > bytes_needed) {
629         bytes_available = bytes_needed;
630     }
631 
632     /* bytes_available <= bytes_needed <= *bytes + offset_in_cluster;
633      * subtracting offset_in_cluster will therefore definitely yield something
634      * not exceeding UINT_MAX */
635     assert(bytes_available - offset_in_cluster <= UINT_MAX);
636     *bytes = bytes_available - offset_in_cluster;
637 
638     return type;
639 
640 fail:
641     qcow2_cache_put(bs, s->l2_table_cache, (void **)&l2_table);
642     return ret;
643 }
644 
645 /*
646  * get_cluster_table
647  *
648  * for a given disk offset, load (and allocate if needed)
649  * the l2 table.
650  *
651  * the l2 table offset in the qcow2 file and the cluster index
652  * in the l2 table are given to the caller.
653  *
654  * Returns 0 on success, -errno in failure case
655  */
656 static int get_cluster_table(BlockDriverState *bs, uint64_t offset,
657                              uint64_t **new_l2_table,
658                              int *new_l2_index)
659 {
660     BDRVQcow2State *s = bs->opaque;
661     unsigned int l2_index;
662     uint64_t l1_index, l2_offset;
663     uint64_t *l2_table = NULL;
664     int ret;
665 
666     /* seek to the l2 offset in the l1 table */
667 
668     l1_index = offset >> (s->l2_bits + s->cluster_bits);
669     if (l1_index >= s->l1_size) {
670         ret = qcow2_grow_l1_table(bs, l1_index + 1, false);
671         if (ret < 0) {
672             return ret;
673         }
674     }
675 
676     assert(l1_index < s->l1_size);
677     l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK;
678     if (offset_into_cluster(s, l2_offset)) {
679         qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64
680                                 " unaligned (L1 index: %#" PRIx64 ")",
681                                 l2_offset, l1_index);
682         return -EIO;
683     }
684 
685     /* seek the l2 table of the given l2 offset */
686 
687     if (s->l1_table[l1_index] & QCOW_OFLAG_COPIED) {
688         /* load the l2 table in memory */
689         ret = l2_load(bs, l2_offset, &l2_table);
690         if (ret < 0) {
691             return ret;
692         }
693     } else {
694         /* First allocate a new L2 table (and do COW if needed) */
695         ret = l2_allocate(bs, l1_index, &l2_table);
696         if (ret < 0) {
697             return ret;
698         }
699 
700         /* Then decrease the refcount of the old table */
701         if (l2_offset) {
702             qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t),
703                                 QCOW2_DISCARD_OTHER);
704         }
705     }
706 
707     /* find the cluster offset for the given disk offset */
708 
709     l2_index = offset_to_l2_index(s, offset);
710 
711     *new_l2_table = l2_table;
712     *new_l2_index = l2_index;
713 
714     return 0;
715 }
716 
717 /*
718  * alloc_compressed_cluster_offset
719  *
720  * For a given offset of the disk image, return cluster offset in
721  * qcow2 file.
722  *
723  * If the offset is not found, allocate a new compressed cluster.
724  *
725  * Return the cluster offset if successful,
726  * Return 0, otherwise.
727  *
728  */
729 
730 uint64_t qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs,
731                                                uint64_t offset,
732                                                int compressed_size)
733 {
734     BDRVQcow2State *s = bs->opaque;
735     int l2_index, ret;
736     uint64_t *l2_table;
737     int64_t cluster_offset;
738     int nb_csectors;
739 
740     ret = get_cluster_table(bs, offset, &l2_table, &l2_index);
741     if (ret < 0) {
742         return 0;
743     }
744 
745     /* Compression can't overwrite anything. Fail if the cluster was already
746      * allocated. */
747     cluster_offset = be64_to_cpu(l2_table[l2_index]);
748     if (cluster_offset & L2E_OFFSET_MASK) {
749         qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
750         return 0;
751     }
752 
753     cluster_offset = qcow2_alloc_bytes(bs, compressed_size);
754     if (cluster_offset < 0) {
755         qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
756         return 0;
757     }
758 
759     nb_csectors = ((cluster_offset + compressed_size - 1) >> 9) -
760                   (cluster_offset >> 9);
761 
762     cluster_offset |= QCOW_OFLAG_COMPRESSED |
763                       ((uint64_t)nb_csectors << s->csize_shift);
764 
765     /* update L2 table */
766 
767     /* compressed clusters never have the copied flag */
768 
769     BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED);
770     qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table);
771     l2_table[l2_index] = cpu_to_be64(cluster_offset);
772     qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
773 
774     return cluster_offset;
775 }
776 
777 static int perform_cow(BlockDriverState *bs, QCowL2Meta *m)
778 {
779     BDRVQcow2State *s = bs->opaque;
780     Qcow2COWRegion *start = &m->cow_start;
781     Qcow2COWRegion *end = &m->cow_end;
782     unsigned buffer_size;
783     unsigned data_bytes = end->offset - (start->offset + start->nb_bytes);
784     bool merge_reads;
785     uint8_t *start_buffer, *end_buffer;
786     QEMUIOVector qiov;
787     int ret;
788 
789     assert(start->nb_bytes <= UINT_MAX - end->nb_bytes);
790     assert(start->nb_bytes + end->nb_bytes <= UINT_MAX - data_bytes);
791     assert(start->offset + start->nb_bytes <= end->offset);
792     assert(!m->data_qiov || m->data_qiov->size == data_bytes);
793 
794     if (start->nb_bytes == 0 && end->nb_bytes == 0) {
795         return 0;
796     }
797 
798     /* If we have to read both the start and end COW regions and the
799      * middle region is not too large then perform just one read
800      * operation */
801     merge_reads = start->nb_bytes && end->nb_bytes && data_bytes <= 16384;
802     if (merge_reads) {
803         buffer_size = start->nb_bytes + data_bytes + end->nb_bytes;
804     } else {
805         /* If we have to do two reads, add some padding in the middle
806          * if necessary to make sure that the end region is optimally
807          * aligned. */
808         size_t align = bdrv_opt_mem_align(bs);
809         assert(align > 0 && align <= UINT_MAX);
810         assert(QEMU_ALIGN_UP(start->nb_bytes, align) <=
811                UINT_MAX - end->nb_bytes);
812         buffer_size = QEMU_ALIGN_UP(start->nb_bytes, align) + end->nb_bytes;
813     }
814 
815     /* Reserve a buffer large enough to store all the data that we're
816      * going to read */
817     start_buffer = qemu_try_blockalign(bs, buffer_size);
818     if (start_buffer == NULL) {
819         return -ENOMEM;
820     }
821     /* The part of the buffer where the end region is located */
822     end_buffer = start_buffer + buffer_size - end->nb_bytes;
823 
824     qemu_iovec_init(&qiov, 2 + (m->data_qiov ? m->data_qiov->niov : 0));
825 
826     qemu_co_mutex_unlock(&s->lock);
827     /* First we read the existing data from both COW regions. We
828      * either read the whole region in one go, or the start and end
829      * regions separately. */
830     if (merge_reads) {
831         qemu_iovec_add(&qiov, start_buffer, buffer_size);
832         ret = do_perform_cow_read(bs, m->offset, start->offset, &qiov);
833     } else {
834         qemu_iovec_add(&qiov, start_buffer, start->nb_bytes);
835         ret = do_perform_cow_read(bs, m->offset, start->offset, &qiov);
836         if (ret < 0) {
837             goto fail;
838         }
839 
840         qemu_iovec_reset(&qiov);
841         qemu_iovec_add(&qiov, end_buffer, end->nb_bytes);
842         ret = do_perform_cow_read(bs, m->offset, end->offset, &qiov);
843     }
844     if (ret < 0) {
845         goto fail;
846     }
847 
848     /* Encrypt the data if necessary before writing it */
849     if (bs->encrypted) {
850         if (!do_perform_cow_encrypt(bs, m->offset, m->alloc_offset,
851                                     start->offset, start_buffer,
852                                     start->nb_bytes) ||
853             !do_perform_cow_encrypt(bs, m->offset, m->alloc_offset,
854                                     end->offset, end_buffer, end->nb_bytes)) {
855             ret = -EIO;
856             goto fail;
857         }
858     }
859 
860     /* And now we can write everything. If we have the guest data we
861      * can write everything in one single operation */
862     if (m->data_qiov) {
863         qemu_iovec_reset(&qiov);
864         if (start->nb_bytes) {
865             qemu_iovec_add(&qiov, start_buffer, start->nb_bytes);
866         }
867         qemu_iovec_concat(&qiov, m->data_qiov, 0, data_bytes);
868         if (end->nb_bytes) {
869             qemu_iovec_add(&qiov, end_buffer, end->nb_bytes);
870         }
871         /* NOTE: we have a write_aio blkdebug event here followed by
872          * a cow_write one in do_perform_cow_write(), but there's only
873          * one single I/O operation */
874         BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO);
875         ret = do_perform_cow_write(bs, m->alloc_offset, start->offset, &qiov);
876     } else {
877         /* If there's no guest data then write both COW regions separately */
878         qemu_iovec_reset(&qiov);
879         qemu_iovec_add(&qiov, start_buffer, start->nb_bytes);
880         ret = do_perform_cow_write(bs, m->alloc_offset, start->offset, &qiov);
881         if (ret < 0) {
882             goto fail;
883         }
884 
885         qemu_iovec_reset(&qiov);
886         qemu_iovec_add(&qiov, end_buffer, end->nb_bytes);
887         ret = do_perform_cow_write(bs, m->alloc_offset, end->offset, &qiov);
888     }
889 
890 fail:
891     qemu_co_mutex_lock(&s->lock);
892 
893     /*
894      * Before we update the L2 table to actually point to the new cluster, we
895      * need to be sure that the refcounts have been increased and COW was
896      * handled.
897      */
898     if (ret == 0) {
899         qcow2_cache_depends_on_flush(s->l2_table_cache);
900     }
901 
902     qemu_vfree(start_buffer);
903     qemu_iovec_destroy(&qiov);
904     return ret;
905 }
906 
907 int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m)
908 {
909     BDRVQcow2State *s = bs->opaque;
910     int i, j = 0, l2_index, ret;
911     uint64_t *old_cluster, *l2_table;
912     uint64_t cluster_offset = m->alloc_offset;
913 
914     trace_qcow2_cluster_link_l2(qemu_coroutine_self(), m->nb_clusters);
915     assert(m->nb_clusters > 0);
916 
917     old_cluster = g_try_new(uint64_t, m->nb_clusters);
918     if (old_cluster == NULL) {
919         ret = -ENOMEM;
920         goto err;
921     }
922 
923     /* copy content of unmodified sectors */
924     ret = perform_cow(bs, m);
925     if (ret < 0) {
926         goto err;
927     }
928 
929     /* Update L2 table. */
930     if (s->use_lazy_refcounts) {
931         qcow2_mark_dirty(bs);
932     }
933     if (qcow2_need_accurate_refcounts(s)) {
934         qcow2_cache_set_dependency(bs, s->l2_table_cache,
935                                    s->refcount_block_cache);
936     }
937 
938     ret = get_cluster_table(bs, m->offset, &l2_table, &l2_index);
939     if (ret < 0) {
940         goto err;
941     }
942     qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table);
943 
944     assert(l2_index + m->nb_clusters <= s->l2_size);
945     for (i = 0; i < m->nb_clusters; i++) {
946         /* if two concurrent writes happen to the same unallocated cluster
947          * each write allocates separate cluster and writes data concurrently.
948          * The first one to complete updates l2 table with pointer to its
949          * cluster the second one has to do RMW (which is done above by
950          * perform_cow()), update l2 table with its cluster pointer and free
951          * old cluster. This is what this loop does */
952         if (l2_table[l2_index + i] != 0) {
953             old_cluster[j++] = l2_table[l2_index + i];
954         }
955 
956         l2_table[l2_index + i] = cpu_to_be64((cluster_offset +
957                     (i << s->cluster_bits)) | QCOW_OFLAG_COPIED);
958      }
959 
960 
961     qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
962 
963     /*
964      * If this was a COW, we need to decrease the refcount of the old cluster.
965      *
966      * Don't discard clusters that reach a refcount of 0 (e.g. compressed
967      * clusters), the next write will reuse them anyway.
968      */
969     if (!m->keep_old_clusters && j != 0) {
970         for (i = 0; i < j; i++) {
971             qcow2_free_any_clusters(bs, be64_to_cpu(old_cluster[i]), 1,
972                                     QCOW2_DISCARD_NEVER);
973         }
974     }
975 
976     ret = 0;
977 err:
978     g_free(old_cluster);
979     return ret;
980  }
981 
982 /*
983  * Returns the number of contiguous clusters that can be used for an allocating
984  * write, but require COW to be performed (this includes yet unallocated space,
985  * which must copy from the backing file)
986  */
987 static int count_cow_clusters(BDRVQcow2State *s, int nb_clusters,
988     uint64_t *l2_table, int l2_index)
989 {
990     int i;
991 
992     for (i = 0; i < nb_clusters; i++) {
993         uint64_t l2_entry = be64_to_cpu(l2_table[l2_index + i]);
994         QCow2ClusterType cluster_type = qcow2_get_cluster_type(l2_entry);
995 
996         switch(cluster_type) {
997         case QCOW2_CLUSTER_NORMAL:
998             if (l2_entry & QCOW_OFLAG_COPIED) {
999                 goto out;
1000             }
1001             break;
1002         case QCOW2_CLUSTER_UNALLOCATED:
1003         case QCOW2_CLUSTER_COMPRESSED:
1004         case QCOW2_CLUSTER_ZERO_PLAIN:
1005         case QCOW2_CLUSTER_ZERO_ALLOC:
1006             break;
1007         default:
1008             abort();
1009         }
1010     }
1011 
1012 out:
1013     assert(i <= nb_clusters);
1014     return i;
1015 }
1016 
1017 /*
1018  * Check if there already is an AIO write request in flight which allocates
1019  * the same cluster. In this case we need to wait until the previous
1020  * request has completed and updated the L2 table accordingly.
1021  *
1022  * Returns:
1023  *   0       if there was no dependency. *cur_bytes indicates the number of
1024  *           bytes from guest_offset that can be read before the next
1025  *           dependency must be processed (or the request is complete)
1026  *
1027  *   -EAGAIN if we had to wait for another request, previously gathered
1028  *           information on cluster allocation may be invalid now. The caller
1029  *           must start over anyway, so consider *cur_bytes undefined.
1030  */
1031 static int handle_dependencies(BlockDriverState *bs, uint64_t guest_offset,
1032     uint64_t *cur_bytes, QCowL2Meta **m)
1033 {
1034     BDRVQcow2State *s = bs->opaque;
1035     QCowL2Meta *old_alloc;
1036     uint64_t bytes = *cur_bytes;
1037 
1038     QLIST_FOREACH(old_alloc, &s->cluster_allocs, next_in_flight) {
1039 
1040         uint64_t start = guest_offset;
1041         uint64_t end = start + bytes;
1042         uint64_t old_start = l2meta_cow_start(old_alloc);
1043         uint64_t old_end = l2meta_cow_end(old_alloc);
1044 
1045         if (end <= old_start || start >= old_end) {
1046             /* No intersection */
1047         } else {
1048             if (start < old_start) {
1049                 /* Stop at the start of a running allocation */
1050                 bytes = old_start - start;
1051             } else {
1052                 bytes = 0;
1053             }
1054 
1055             /* Stop if already an l2meta exists. After yielding, it wouldn't
1056              * be valid any more, so we'd have to clean up the old L2Metas
1057              * and deal with requests depending on them before starting to
1058              * gather new ones. Not worth the trouble. */
1059             if (bytes == 0 && *m) {
1060                 *cur_bytes = 0;
1061                 return 0;
1062             }
1063 
1064             if (bytes == 0) {
1065                 /* Wait for the dependency to complete. We need to recheck
1066                  * the free/allocated clusters when we continue. */
1067                 qemu_co_queue_wait(&old_alloc->dependent_requests, &s->lock);
1068                 return -EAGAIN;
1069             }
1070         }
1071     }
1072 
1073     /* Make sure that existing clusters and new allocations are only used up to
1074      * the next dependency if we shortened the request above */
1075     *cur_bytes = bytes;
1076 
1077     return 0;
1078 }
1079 
1080 /*
1081  * Checks how many already allocated clusters that don't require a copy on
1082  * write there are at the given guest_offset (up to *bytes). If
1083  * *host_offset is not zero, only physically contiguous clusters beginning at
1084  * this host offset are counted.
1085  *
1086  * Note that guest_offset may not be cluster aligned. In this case, the
1087  * returned *host_offset points to exact byte referenced by guest_offset and
1088  * therefore isn't cluster aligned as well.
1089  *
1090  * Returns:
1091  *   0:     if no allocated clusters are available at the given offset.
1092  *          *bytes is normally unchanged. It is set to 0 if the cluster
1093  *          is allocated and doesn't need COW, but doesn't have the right
1094  *          physical offset.
1095  *
1096  *   1:     if allocated clusters that don't require a COW are available at
1097  *          the requested offset. *bytes may have decreased and describes
1098  *          the length of the area that can be written to.
1099  *
1100  *  -errno: in error cases
1101  */
1102 static int handle_copied(BlockDriverState *bs, uint64_t guest_offset,
1103     uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m)
1104 {
1105     BDRVQcow2State *s = bs->opaque;
1106     int l2_index;
1107     uint64_t cluster_offset;
1108     uint64_t *l2_table;
1109     uint64_t nb_clusters;
1110     unsigned int keep_clusters;
1111     int ret;
1112 
1113     trace_qcow2_handle_copied(qemu_coroutine_self(), guest_offset, *host_offset,
1114                               *bytes);
1115 
1116     assert(*host_offset == 0 ||    offset_into_cluster(s, guest_offset)
1117                                 == offset_into_cluster(s, *host_offset));
1118 
1119     /*
1120      * Calculate the number of clusters to look for. We stop at L2 table
1121      * boundaries to keep things simple.
1122      */
1123     nb_clusters =
1124         size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes);
1125 
1126     l2_index = offset_to_l2_index(s, guest_offset);
1127     nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
1128     assert(nb_clusters <= INT_MAX);
1129 
1130     /* Find L2 entry for the first involved cluster */
1131     ret = get_cluster_table(bs, guest_offset, &l2_table, &l2_index);
1132     if (ret < 0) {
1133         return ret;
1134     }
1135 
1136     cluster_offset = be64_to_cpu(l2_table[l2_index]);
1137 
1138     /* Check how many clusters are already allocated and don't need COW */
1139     if (qcow2_get_cluster_type(cluster_offset) == QCOW2_CLUSTER_NORMAL
1140         && (cluster_offset & QCOW_OFLAG_COPIED))
1141     {
1142         /* If a specific host_offset is required, check it */
1143         bool offset_matches =
1144             (cluster_offset & L2E_OFFSET_MASK) == *host_offset;
1145 
1146         if (offset_into_cluster(s, cluster_offset & L2E_OFFSET_MASK)) {
1147             qcow2_signal_corruption(bs, true, -1, -1, "Data cluster offset "
1148                                     "%#llx unaligned (guest offset: %#" PRIx64
1149                                     ")", cluster_offset & L2E_OFFSET_MASK,
1150                                     guest_offset);
1151             ret = -EIO;
1152             goto out;
1153         }
1154 
1155         if (*host_offset != 0 && !offset_matches) {
1156             *bytes = 0;
1157             ret = 0;
1158             goto out;
1159         }
1160 
1161         /* We keep all QCOW_OFLAG_COPIED clusters */
1162         keep_clusters =
1163             count_contiguous_clusters(nb_clusters, s->cluster_size,
1164                                       &l2_table[l2_index],
1165                                       QCOW_OFLAG_COPIED | QCOW_OFLAG_ZERO);
1166         assert(keep_clusters <= nb_clusters);
1167 
1168         *bytes = MIN(*bytes,
1169                  keep_clusters * s->cluster_size
1170                  - offset_into_cluster(s, guest_offset));
1171 
1172         ret = 1;
1173     } else {
1174         ret = 0;
1175     }
1176 
1177     /* Cleanup */
1178 out:
1179     qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
1180 
1181     /* Only return a host offset if we actually made progress. Otherwise we
1182      * would make requirements for handle_alloc() that it can't fulfill */
1183     if (ret > 0) {
1184         *host_offset = (cluster_offset & L2E_OFFSET_MASK)
1185                      + offset_into_cluster(s, guest_offset);
1186     }
1187 
1188     return ret;
1189 }
1190 
1191 /*
1192  * Allocates new clusters for the given guest_offset.
1193  *
1194  * At most *nb_clusters are allocated, and on return *nb_clusters is updated to
1195  * contain the number of clusters that have been allocated and are contiguous
1196  * in the image file.
1197  *
1198  * If *host_offset is non-zero, it specifies the offset in the image file at
1199  * which the new clusters must start. *nb_clusters can be 0 on return in this
1200  * case if the cluster at host_offset is already in use. If *host_offset is
1201  * zero, the clusters can be allocated anywhere in the image file.
1202  *
1203  * *host_offset is updated to contain the offset into the image file at which
1204  * the first allocated cluster starts.
1205  *
1206  * Return 0 on success and -errno in error cases. -EAGAIN means that the
1207  * function has been waiting for another request and the allocation must be
1208  * restarted, but the whole request should not be failed.
1209  */
1210 static int do_alloc_cluster_offset(BlockDriverState *bs, uint64_t guest_offset,
1211                                    uint64_t *host_offset, uint64_t *nb_clusters)
1212 {
1213     BDRVQcow2State *s = bs->opaque;
1214 
1215     trace_qcow2_do_alloc_clusters_offset(qemu_coroutine_self(), guest_offset,
1216                                          *host_offset, *nb_clusters);
1217 
1218     /* Allocate new clusters */
1219     trace_qcow2_cluster_alloc_phys(qemu_coroutine_self());
1220     if (*host_offset == 0) {
1221         int64_t cluster_offset =
1222             qcow2_alloc_clusters(bs, *nb_clusters * s->cluster_size);
1223         if (cluster_offset < 0) {
1224             return cluster_offset;
1225         }
1226         *host_offset = cluster_offset;
1227         return 0;
1228     } else {
1229         int64_t ret = qcow2_alloc_clusters_at(bs, *host_offset, *nb_clusters);
1230         if (ret < 0) {
1231             return ret;
1232         }
1233         *nb_clusters = ret;
1234         return 0;
1235     }
1236 }
1237 
1238 /*
1239  * Allocates new clusters for an area that either is yet unallocated or needs a
1240  * copy on write. If *host_offset is non-zero, clusters are only allocated if
1241  * the new allocation can match the specified host offset.
1242  *
1243  * Note that guest_offset may not be cluster aligned. In this case, the
1244  * returned *host_offset points to exact byte referenced by guest_offset and
1245  * therefore isn't cluster aligned as well.
1246  *
1247  * Returns:
1248  *   0:     if no clusters could be allocated. *bytes is set to 0,
1249  *          *host_offset is left unchanged.
1250  *
1251  *   1:     if new clusters were allocated. *bytes may be decreased if the
1252  *          new allocation doesn't cover all of the requested area.
1253  *          *host_offset is updated to contain the host offset of the first
1254  *          newly allocated cluster.
1255  *
1256  *  -errno: in error cases
1257  */
1258 static int handle_alloc(BlockDriverState *bs, uint64_t guest_offset,
1259     uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m)
1260 {
1261     BDRVQcow2State *s = bs->opaque;
1262     int l2_index;
1263     uint64_t *l2_table;
1264     uint64_t entry;
1265     uint64_t nb_clusters;
1266     int ret;
1267     bool keep_old_clusters = false;
1268 
1269     uint64_t alloc_cluster_offset = 0;
1270 
1271     trace_qcow2_handle_alloc(qemu_coroutine_self(), guest_offset, *host_offset,
1272                              *bytes);
1273     assert(*bytes > 0);
1274 
1275     /*
1276      * Calculate the number of clusters to look for. We stop at L2 table
1277      * boundaries to keep things simple.
1278      */
1279     nb_clusters =
1280         size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes);
1281 
1282     l2_index = offset_to_l2_index(s, guest_offset);
1283     nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
1284     assert(nb_clusters <= INT_MAX);
1285 
1286     /* Find L2 entry for the first involved cluster */
1287     ret = get_cluster_table(bs, guest_offset, &l2_table, &l2_index);
1288     if (ret < 0) {
1289         return ret;
1290     }
1291 
1292     entry = be64_to_cpu(l2_table[l2_index]);
1293 
1294     /* For the moment, overwrite compressed clusters one by one */
1295     if (entry & QCOW_OFLAG_COMPRESSED) {
1296         nb_clusters = 1;
1297     } else {
1298         nb_clusters = count_cow_clusters(s, nb_clusters, l2_table, l2_index);
1299     }
1300 
1301     /* This function is only called when there were no non-COW clusters, so if
1302      * we can't find any unallocated or COW clusters either, something is
1303      * wrong with our code. */
1304     assert(nb_clusters > 0);
1305 
1306     if (qcow2_get_cluster_type(entry) == QCOW2_CLUSTER_ZERO_ALLOC &&
1307         (entry & QCOW_OFLAG_COPIED) &&
1308         (!*host_offset ||
1309          start_of_cluster(s, *host_offset) == (entry & L2E_OFFSET_MASK)))
1310     {
1311         int preallocated_nb_clusters;
1312 
1313         if (offset_into_cluster(s, entry & L2E_OFFSET_MASK)) {
1314             qcow2_signal_corruption(bs, true, -1, -1, "Preallocated zero "
1315                                     "cluster offset %#llx unaligned (guest "
1316                                     "offset: %#" PRIx64 ")",
1317                                     entry & L2E_OFFSET_MASK, guest_offset);
1318             ret = -EIO;
1319             goto fail;
1320         }
1321 
1322         /* Try to reuse preallocated zero clusters; contiguous normal clusters
1323          * would be fine, too, but count_cow_clusters() above has limited
1324          * nb_clusters already to a range of COW clusters */
1325         preallocated_nb_clusters =
1326             count_contiguous_clusters(nb_clusters, s->cluster_size,
1327                                       &l2_table[l2_index], QCOW_OFLAG_COPIED);
1328         assert(preallocated_nb_clusters > 0);
1329 
1330         nb_clusters = preallocated_nb_clusters;
1331         alloc_cluster_offset = entry & L2E_OFFSET_MASK;
1332 
1333         /* We want to reuse these clusters, so qcow2_alloc_cluster_link_l2()
1334          * should not free them. */
1335         keep_old_clusters = true;
1336     }
1337 
1338     qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
1339 
1340     if (!alloc_cluster_offset) {
1341         /* Allocate, if necessary at a given offset in the image file */
1342         alloc_cluster_offset = start_of_cluster(s, *host_offset);
1343         ret = do_alloc_cluster_offset(bs, guest_offset, &alloc_cluster_offset,
1344                                       &nb_clusters);
1345         if (ret < 0) {
1346             goto fail;
1347         }
1348 
1349         /* Can't extend contiguous allocation */
1350         if (nb_clusters == 0) {
1351             *bytes = 0;
1352             return 0;
1353         }
1354 
1355         /* !*host_offset would overwrite the image header and is reserved for
1356          * "no host offset preferred". If 0 was a valid host offset, it'd
1357          * trigger the following overlap check; do that now to avoid having an
1358          * invalid value in *host_offset. */
1359         if (!alloc_cluster_offset) {
1360             ret = qcow2_pre_write_overlap_check(bs, 0, alloc_cluster_offset,
1361                                                 nb_clusters * s->cluster_size);
1362             assert(ret < 0);
1363             goto fail;
1364         }
1365     }
1366 
1367     /*
1368      * Save info needed for meta data update.
1369      *
1370      * requested_bytes: Number of bytes from the start of the first
1371      * newly allocated cluster to the end of the (possibly shortened
1372      * before) write request.
1373      *
1374      * avail_bytes: Number of bytes from the start of the first
1375      * newly allocated to the end of the last newly allocated cluster.
1376      *
1377      * nb_bytes: The number of bytes from the start of the first
1378      * newly allocated cluster to the end of the area that the write
1379      * request actually writes to (excluding COW at the end)
1380      */
1381     uint64_t requested_bytes = *bytes + offset_into_cluster(s, guest_offset);
1382     int avail_bytes = MIN(INT_MAX, nb_clusters << s->cluster_bits);
1383     int nb_bytes = MIN(requested_bytes, avail_bytes);
1384     QCowL2Meta *old_m = *m;
1385 
1386     *m = g_malloc0(sizeof(**m));
1387 
1388     **m = (QCowL2Meta) {
1389         .next           = old_m,
1390 
1391         .alloc_offset   = alloc_cluster_offset,
1392         .offset         = start_of_cluster(s, guest_offset),
1393         .nb_clusters    = nb_clusters,
1394 
1395         .keep_old_clusters  = keep_old_clusters,
1396 
1397         .cow_start = {
1398             .offset     = 0,
1399             .nb_bytes   = offset_into_cluster(s, guest_offset),
1400         },
1401         .cow_end = {
1402             .offset     = nb_bytes,
1403             .nb_bytes   = avail_bytes - nb_bytes,
1404         },
1405     };
1406     qemu_co_queue_init(&(*m)->dependent_requests);
1407     QLIST_INSERT_HEAD(&s->cluster_allocs, *m, next_in_flight);
1408 
1409     *host_offset = alloc_cluster_offset + offset_into_cluster(s, guest_offset);
1410     *bytes = MIN(*bytes, nb_bytes - offset_into_cluster(s, guest_offset));
1411     assert(*bytes != 0);
1412 
1413     return 1;
1414 
1415 fail:
1416     if (*m && (*m)->nb_clusters > 0) {
1417         QLIST_REMOVE(*m, next_in_flight);
1418     }
1419     return ret;
1420 }
1421 
1422 /*
1423  * alloc_cluster_offset
1424  *
1425  * For a given offset on the virtual disk, find the cluster offset in qcow2
1426  * file. If the offset is not found, allocate a new cluster.
1427  *
1428  * If the cluster was already allocated, m->nb_clusters is set to 0 and
1429  * other fields in m are meaningless.
1430  *
1431  * If the cluster is newly allocated, m->nb_clusters is set to the number of
1432  * contiguous clusters that have been allocated. In this case, the other
1433  * fields of m are valid and contain information about the first allocated
1434  * cluster.
1435  *
1436  * If the request conflicts with another write request in flight, the coroutine
1437  * is queued and will be reentered when the dependency has completed.
1438  *
1439  * Return 0 on success and -errno in error cases
1440  */
1441 int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset,
1442                                unsigned int *bytes, uint64_t *host_offset,
1443                                QCowL2Meta **m)
1444 {
1445     BDRVQcow2State *s = bs->opaque;
1446     uint64_t start, remaining;
1447     uint64_t cluster_offset;
1448     uint64_t cur_bytes;
1449     int ret;
1450 
1451     trace_qcow2_alloc_clusters_offset(qemu_coroutine_self(), offset, *bytes);
1452 
1453 again:
1454     start = offset;
1455     remaining = *bytes;
1456     cluster_offset = 0;
1457     *host_offset = 0;
1458     cur_bytes = 0;
1459     *m = NULL;
1460 
1461     while (true) {
1462 
1463         if (!*host_offset) {
1464             *host_offset = start_of_cluster(s, cluster_offset);
1465         }
1466 
1467         assert(remaining >= cur_bytes);
1468 
1469         start           += cur_bytes;
1470         remaining       -= cur_bytes;
1471         cluster_offset  += cur_bytes;
1472 
1473         if (remaining == 0) {
1474             break;
1475         }
1476 
1477         cur_bytes = remaining;
1478 
1479         /*
1480          * Now start gathering as many contiguous clusters as possible:
1481          *
1482          * 1. Check for overlaps with in-flight allocations
1483          *
1484          *      a) Overlap not in the first cluster -> shorten this request and
1485          *         let the caller handle the rest in its next loop iteration.
1486          *
1487          *      b) Real overlaps of two requests. Yield and restart the search
1488          *         for contiguous clusters (the situation could have changed
1489          *         while we were sleeping)
1490          *
1491          *      c) TODO: Request starts in the same cluster as the in-flight
1492          *         allocation ends. Shorten the COW of the in-fight allocation,
1493          *         set cluster_offset to write to the same cluster and set up
1494          *         the right synchronisation between the in-flight request and
1495          *         the new one.
1496          */
1497         ret = handle_dependencies(bs, start, &cur_bytes, m);
1498         if (ret == -EAGAIN) {
1499             /* Currently handle_dependencies() doesn't yield if we already had
1500              * an allocation. If it did, we would have to clean up the L2Meta
1501              * structs before starting over. */
1502             assert(*m == NULL);
1503             goto again;
1504         } else if (ret < 0) {
1505             return ret;
1506         } else if (cur_bytes == 0) {
1507             break;
1508         } else {
1509             /* handle_dependencies() may have decreased cur_bytes (shortened
1510              * the allocations below) so that the next dependency is processed
1511              * correctly during the next loop iteration. */
1512         }
1513 
1514         /*
1515          * 2. Count contiguous COPIED clusters.
1516          */
1517         ret = handle_copied(bs, start, &cluster_offset, &cur_bytes, m);
1518         if (ret < 0) {
1519             return ret;
1520         } else if (ret) {
1521             continue;
1522         } else if (cur_bytes == 0) {
1523             break;
1524         }
1525 
1526         /*
1527          * 3. If the request still hasn't completed, allocate new clusters,
1528          *    considering any cluster_offset of steps 1c or 2.
1529          */
1530         ret = handle_alloc(bs, start, &cluster_offset, &cur_bytes, m);
1531         if (ret < 0) {
1532             return ret;
1533         } else if (ret) {
1534             continue;
1535         } else {
1536             assert(cur_bytes == 0);
1537             break;
1538         }
1539     }
1540 
1541     *bytes -= remaining;
1542     assert(*bytes > 0);
1543     assert(*host_offset != 0);
1544 
1545     return 0;
1546 }
1547 
1548 static int decompress_buffer(uint8_t *out_buf, int out_buf_size,
1549                              const uint8_t *buf, int buf_size)
1550 {
1551     z_stream strm1, *strm = &strm1;
1552     int ret, out_len;
1553 
1554     memset(strm, 0, sizeof(*strm));
1555 
1556     strm->next_in = (uint8_t *)buf;
1557     strm->avail_in = buf_size;
1558     strm->next_out = out_buf;
1559     strm->avail_out = out_buf_size;
1560 
1561     ret = inflateInit2(strm, -12);
1562     if (ret != Z_OK)
1563         return -1;
1564     ret = inflate(strm, Z_FINISH);
1565     out_len = strm->next_out - out_buf;
1566     if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) ||
1567         out_len != out_buf_size) {
1568         inflateEnd(strm);
1569         return -1;
1570     }
1571     inflateEnd(strm);
1572     return 0;
1573 }
1574 
1575 int qcow2_decompress_cluster(BlockDriverState *bs, uint64_t cluster_offset)
1576 {
1577     BDRVQcow2State *s = bs->opaque;
1578     int ret, csize, nb_csectors, sector_offset;
1579     uint64_t coffset;
1580 
1581     coffset = cluster_offset & s->cluster_offset_mask;
1582     if (s->cluster_cache_offset != coffset) {
1583         nb_csectors = ((cluster_offset >> s->csize_shift) & s->csize_mask) + 1;
1584         sector_offset = coffset & 511;
1585         csize = nb_csectors * 512 - sector_offset;
1586 
1587         /* Allocate buffers on first decompress operation, most images are
1588          * uncompressed and the memory overhead can be avoided.  The buffers
1589          * are freed in .bdrv_close().
1590          */
1591         if (!s->cluster_data) {
1592             /* one more sector for decompressed data alignment */
1593             s->cluster_data = qemu_try_blockalign(bs->file->bs,
1594                     QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size + 512);
1595             if (!s->cluster_data) {
1596                 return -ENOMEM;
1597             }
1598         }
1599         if (!s->cluster_cache) {
1600             s->cluster_cache = g_malloc(s->cluster_size);
1601         }
1602 
1603         BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED);
1604         ret = bdrv_read(bs->file, coffset >> 9, s->cluster_data,
1605                         nb_csectors);
1606         if (ret < 0) {
1607             return ret;
1608         }
1609         if (decompress_buffer(s->cluster_cache, s->cluster_size,
1610                               s->cluster_data + sector_offset, csize) < 0) {
1611             return -EIO;
1612         }
1613         s->cluster_cache_offset = coffset;
1614     }
1615     return 0;
1616 }
1617 
1618 /*
1619  * This discards as many clusters of nb_clusters as possible at once (i.e.
1620  * all clusters in the same L2 table) and returns the number of discarded
1621  * clusters.
1622  */
1623 static int discard_single_l2(BlockDriverState *bs, uint64_t offset,
1624                              uint64_t nb_clusters, enum qcow2_discard_type type,
1625                              bool full_discard)
1626 {
1627     BDRVQcow2State *s = bs->opaque;
1628     uint64_t *l2_table;
1629     int l2_index;
1630     int ret;
1631     int i;
1632 
1633     ret = get_cluster_table(bs, offset, &l2_table, &l2_index);
1634     if (ret < 0) {
1635         return ret;
1636     }
1637 
1638     /* Limit nb_clusters to one L2 table */
1639     nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
1640     assert(nb_clusters <= INT_MAX);
1641 
1642     for (i = 0; i < nb_clusters; i++) {
1643         uint64_t old_l2_entry;
1644 
1645         old_l2_entry = be64_to_cpu(l2_table[l2_index + i]);
1646 
1647         /*
1648          * If full_discard is false, make sure that a discarded area reads back
1649          * as zeroes for v3 images (we cannot do it for v2 without actually
1650          * writing a zero-filled buffer). We can skip the operation if the
1651          * cluster is already marked as zero, or if it's unallocated and we
1652          * don't have a backing file.
1653          *
1654          * TODO We might want to use bdrv_block_status(bs) here, but we're
1655          * holding s->lock, so that doesn't work today.
1656          *
1657          * If full_discard is true, the sector should not read back as zeroes,
1658          * but rather fall through to the backing file.
1659          */
1660         switch (qcow2_get_cluster_type(old_l2_entry)) {
1661         case QCOW2_CLUSTER_UNALLOCATED:
1662             if (full_discard || !bs->backing) {
1663                 continue;
1664             }
1665             break;
1666 
1667         case QCOW2_CLUSTER_ZERO_PLAIN:
1668             if (!full_discard) {
1669                 continue;
1670             }
1671             break;
1672 
1673         case QCOW2_CLUSTER_ZERO_ALLOC:
1674         case QCOW2_CLUSTER_NORMAL:
1675         case QCOW2_CLUSTER_COMPRESSED:
1676             break;
1677 
1678         default:
1679             abort();
1680         }
1681 
1682         /* First remove L2 entries */
1683         qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table);
1684         if (!full_discard && s->qcow_version >= 3) {
1685             l2_table[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO);
1686         } else {
1687             l2_table[l2_index + i] = cpu_to_be64(0);
1688         }
1689 
1690         /* Then decrease the refcount */
1691         qcow2_free_any_clusters(bs, old_l2_entry, 1, type);
1692     }
1693 
1694     qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
1695 
1696     return nb_clusters;
1697 }
1698 
1699 int qcow2_cluster_discard(BlockDriverState *bs, uint64_t offset,
1700                           uint64_t bytes, enum qcow2_discard_type type,
1701                           bool full_discard)
1702 {
1703     BDRVQcow2State *s = bs->opaque;
1704     uint64_t end_offset = offset + bytes;
1705     uint64_t nb_clusters;
1706     int64_t cleared;
1707     int ret;
1708 
1709     /* Caller must pass aligned values, except at image end */
1710     assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
1711     assert(QEMU_IS_ALIGNED(end_offset, s->cluster_size) ||
1712            end_offset == bs->total_sectors << BDRV_SECTOR_BITS);
1713 
1714     nb_clusters = size_to_clusters(s, bytes);
1715 
1716     s->cache_discards = true;
1717 
1718     /* Each L2 table is handled by its own loop iteration */
1719     while (nb_clusters > 0) {
1720         cleared = discard_single_l2(bs, offset, nb_clusters, type,
1721                                     full_discard);
1722         if (cleared < 0) {
1723             ret = cleared;
1724             goto fail;
1725         }
1726 
1727         nb_clusters -= cleared;
1728         offset += (cleared * s->cluster_size);
1729     }
1730 
1731     ret = 0;
1732 fail:
1733     s->cache_discards = false;
1734     qcow2_process_discards(bs, ret);
1735 
1736     return ret;
1737 }
1738 
1739 /*
1740  * This zeroes as many clusters of nb_clusters as possible at once (i.e.
1741  * all clusters in the same L2 table) and returns the number of zeroed
1742  * clusters.
1743  */
1744 static int zero_single_l2(BlockDriverState *bs, uint64_t offset,
1745                           uint64_t nb_clusters, int flags)
1746 {
1747     BDRVQcow2State *s = bs->opaque;
1748     uint64_t *l2_table;
1749     int l2_index;
1750     int ret;
1751     int i;
1752     bool unmap = !!(flags & BDRV_REQ_MAY_UNMAP);
1753 
1754     ret = get_cluster_table(bs, offset, &l2_table, &l2_index);
1755     if (ret < 0) {
1756         return ret;
1757     }
1758 
1759     /* Limit nb_clusters to one L2 table */
1760     nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
1761     assert(nb_clusters <= INT_MAX);
1762 
1763     for (i = 0; i < nb_clusters; i++) {
1764         uint64_t old_offset;
1765         QCow2ClusterType cluster_type;
1766 
1767         old_offset = be64_to_cpu(l2_table[l2_index + i]);
1768 
1769         /*
1770          * Minimize L2 changes if the cluster already reads back as
1771          * zeroes with correct allocation.
1772          */
1773         cluster_type = qcow2_get_cluster_type(old_offset);
1774         if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN ||
1775             (cluster_type == QCOW2_CLUSTER_ZERO_ALLOC && !unmap)) {
1776             continue;
1777         }
1778 
1779         qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table);
1780         if (cluster_type == QCOW2_CLUSTER_COMPRESSED || unmap) {
1781             l2_table[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO);
1782             qcow2_free_any_clusters(bs, old_offset, 1, QCOW2_DISCARD_REQUEST);
1783         } else {
1784             l2_table[l2_index + i] |= cpu_to_be64(QCOW_OFLAG_ZERO);
1785         }
1786     }
1787 
1788     qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
1789 
1790     return nb_clusters;
1791 }
1792 
1793 int qcow2_cluster_zeroize(BlockDriverState *bs, uint64_t offset,
1794                           uint64_t bytes, int flags)
1795 {
1796     BDRVQcow2State *s = bs->opaque;
1797     uint64_t end_offset = offset + bytes;
1798     uint64_t nb_clusters;
1799     int64_t cleared;
1800     int ret;
1801 
1802     /* Caller must pass aligned values, except at image end */
1803     assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
1804     assert(QEMU_IS_ALIGNED(end_offset, s->cluster_size) ||
1805            end_offset == bs->total_sectors << BDRV_SECTOR_BITS);
1806 
1807     /* The zero flag is only supported by version 3 and newer */
1808     if (s->qcow_version < 3) {
1809         return -ENOTSUP;
1810     }
1811 
1812     /* Each L2 table is handled by its own loop iteration */
1813     nb_clusters = size_to_clusters(s, bytes);
1814 
1815     s->cache_discards = true;
1816 
1817     while (nb_clusters > 0) {
1818         cleared = zero_single_l2(bs, offset, nb_clusters, flags);
1819         if (cleared < 0) {
1820             ret = cleared;
1821             goto fail;
1822         }
1823 
1824         nb_clusters -= cleared;
1825         offset += (cleared * s->cluster_size);
1826     }
1827 
1828     ret = 0;
1829 fail:
1830     s->cache_discards = false;
1831     qcow2_process_discards(bs, ret);
1832 
1833     return ret;
1834 }
1835 
1836 /*
1837  * Expands all zero clusters in a specific L1 table (or deallocates them, for
1838  * non-backed non-pre-allocated zero clusters).
1839  *
1840  * l1_entries and *visited_l1_entries are used to keep track of progress for
1841  * status_cb(). l1_entries contains the total number of L1 entries and
1842  * *visited_l1_entries counts all visited L1 entries.
1843  */
1844 static int expand_zero_clusters_in_l1(BlockDriverState *bs, uint64_t *l1_table,
1845                                       int l1_size, int64_t *visited_l1_entries,
1846                                       int64_t l1_entries,
1847                                       BlockDriverAmendStatusCB *status_cb,
1848                                       void *cb_opaque)
1849 {
1850     BDRVQcow2State *s = bs->opaque;
1851     bool is_active_l1 = (l1_table == s->l1_table);
1852     uint64_t *l2_table = NULL;
1853     int ret;
1854     int i, j;
1855 
1856     if (!is_active_l1) {
1857         /* inactive L2 tables require a buffer to be stored in when loading
1858          * them from disk */
1859         l2_table = qemu_try_blockalign(bs->file->bs, s->cluster_size);
1860         if (l2_table == NULL) {
1861             return -ENOMEM;
1862         }
1863     }
1864 
1865     for (i = 0; i < l1_size; i++) {
1866         uint64_t l2_offset = l1_table[i] & L1E_OFFSET_MASK;
1867         bool l2_dirty = false;
1868         uint64_t l2_refcount;
1869 
1870         if (!l2_offset) {
1871             /* unallocated */
1872             (*visited_l1_entries)++;
1873             if (status_cb) {
1874                 status_cb(bs, *visited_l1_entries, l1_entries, cb_opaque);
1875             }
1876             continue;
1877         }
1878 
1879         if (offset_into_cluster(s, l2_offset)) {
1880             qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#"
1881                                     PRIx64 " unaligned (L1 index: %#x)",
1882                                     l2_offset, i);
1883             ret = -EIO;
1884             goto fail;
1885         }
1886 
1887         if (is_active_l1) {
1888             /* get active L2 tables from cache */
1889             ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset,
1890                     (void **)&l2_table);
1891         } else {
1892             /* load inactive L2 tables from disk */
1893             ret = bdrv_read(bs->file, l2_offset / BDRV_SECTOR_SIZE,
1894                             (void *)l2_table, s->cluster_sectors);
1895         }
1896         if (ret < 0) {
1897             goto fail;
1898         }
1899 
1900         ret = qcow2_get_refcount(bs, l2_offset >> s->cluster_bits,
1901                                  &l2_refcount);
1902         if (ret < 0) {
1903             goto fail;
1904         }
1905 
1906         for (j = 0; j < s->l2_size; j++) {
1907             uint64_t l2_entry = be64_to_cpu(l2_table[j]);
1908             int64_t offset = l2_entry & L2E_OFFSET_MASK;
1909             QCow2ClusterType cluster_type = qcow2_get_cluster_type(l2_entry);
1910 
1911             if (cluster_type != QCOW2_CLUSTER_ZERO_PLAIN &&
1912                 cluster_type != QCOW2_CLUSTER_ZERO_ALLOC) {
1913                 continue;
1914             }
1915 
1916             if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) {
1917                 if (!bs->backing) {
1918                     /* not backed; therefore we can simply deallocate the
1919                      * cluster */
1920                     l2_table[j] = 0;
1921                     l2_dirty = true;
1922                     continue;
1923                 }
1924 
1925                 offset = qcow2_alloc_clusters(bs, s->cluster_size);
1926                 if (offset < 0) {
1927                     ret = offset;
1928                     goto fail;
1929                 }
1930 
1931                 if (l2_refcount > 1) {
1932                     /* For shared L2 tables, set the refcount accordingly (it is
1933                      * already 1 and needs to be l2_refcount) */
1934                     ret = qcow2_update_cluster_refcount(bs,
1935                             offset >> s->cluster_bits,
1936                             refcount_diff(1, l2_refcount), false,
1937                             QCOW2_DISCARD_OTHER);
1938                     if (ret < 0) {
1939                         qcow2_free_clusters(bs, offset, s->cluster_size,
1940                                             QCOW2_DISCARD_OTHER);
1941                         goto fail;
1942                     }
1943                 }
1944             }
1945 
1946             if (offset_into_cluster(s, offset)) {
1947                 qcow2_signal_corruption(bs, true, -1, -1,
1948                                         "Cluster allocation offset "
1949                                         "%#" PRIx64 " unaligned (L2 offset: %#"
1950                                         PRIx64 ", L2 index: %#x)", offset,
1951                                         l2_offset, j);
1952                 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) {
1953                     qcow2_free_clusters(bs, offset, s->cluster_size,
1954                                         QCOW2_DISCARD_ALWAYS);
1955                 }
1956                 ret = -EIO;
1957                 goto fail;
1958             }
1959 
1960             ret = qcow2_pre_write_overlap_check(bs, 0, offset, s->cluster_size);
1961             if (ret < 0) {
1962                 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) {
1963                     qcow2_free_clusters(bs, offset, s->cluster_size,
1964                                         QCOW2_DISCARD_ALWAYS);
1965                 }
1966                 goto fail;
1967             }
1968 
1969             ret = bdrv_pwrite_zeroes(bs->file, offset, s->cluster_size, 0);
1970             if (ret < 0) {
1971                 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) {
1972                     qcow2_free_clusters(bs, offset, s->cluster_size,
1973                                         QCOW2_DISCARD_ALWAYS);
1974                 }
1975                 goto fail;
1976             }
1977 
1978             if (l2_refcount == 1) {
1979                 l2_table[j] = cpu_to_be64(offset | QCOW_OFLAG_COPIED);
1980             } else {
1981                 l2_table[j] = cpu_to_be64(offset);
1982             }
1983             l2_dirty = true;
1984         }
1985 
1986         if (is_active_l1) {
1987             if (l2_dirty) {
1988                 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table);
1989                 qcow2_cache_depends_on_flush(s->l2_table_cache);
1990             }
1991             qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
1992         } else {
1993             if (l2_dirty) {
1994                 ret = qcow2_pre_write_overlap_check(bs,
1995                         QCOW2_OL_INACTIVE_L2 | QCOW2_OL_ACTIVE_L2, l2_offset,
1996                         s->cluster_size);
1997                 if (ret < 0) {
1998                     goto fail;
1999                 }
2000 
2001                 ret = bdrv_write(bs->file, l2_offset / BDRV_SECTOR_SIZE,
2002                                  (void *)l2_table, s->cluster_sectors);
2003                 if (ret < 0) {
2004                     goto fail;
2005                 }
2006             }
2007         }
2008 
2009         (*visited_l1_entries)++;
2010         if (status_cb) {
2011             status_cb(bs, *visited_l1_entries, l1_entries, cb_opaque);
2012         }
2013     }
2014 
2015     ret = 0;
2016 
2017 fail:
2018     if (l2_table) {
2019         if (!is_active_l1) {
2020             qemu_vfree(l2_table);
2021         } else {
2022             qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
2023         }
2024     }
2025     return ret;
2026 }
2027 
2028 /*
2029  * For backed images, expands all zero clusters on the image. For non-backed
2030  * images, deallocates all non-pre-allocated zero clusters (and claims the
2031  * allocation for pre-allocated ones). This is important for downgrading to a
2032  * qcow2 version which doesn't yet support metadata zero clusters.
2033  */
2034 int qcow2_expand_zero_clusters(BlockDriverState *bs,
2035                                BlockDriverAmendStatusCB *status_cb,
2036                                void *cb_opaque)
2037 {
2038     BDRVQcow2State *s = bs->opaque;
2039     uint64_t *l1_table = NULL;
2040     int64_t l1_entries = 0, visited_l1_entries = 0;
2041     int ret;
2042     int i, j;
2043 
2044     if (status_cb) {
2045         l1_entries = s->l1_size;
2046         for (i = 0; i < s->nb_snapshots; i++) {
2047             l1_entries += s->snapshots[i].l1_size;
2048         }
2049     }
2050 
2051     ret = expand_zero_clusters_in_l1(bs, s->l1_table, s->l1_size,
2052                                      &visited_l1_entries, l1_entries,
2053                                      status_cb, cb_opaque);
2054     if (ret < 0) {
2055         goto fail;
2056     }
2057 
2058     /* Inactive L1 tables may point to active L2 tables - therefore it is
2059      * necessary to flush the L2 table cache before trying to access the L2
2060      * tables pointed to by inactive L1 entries (else we might try to expand
2061      * zero clusters that have already been expanded); furthermore, it is also
2062      * necessary to empty the L2 table cache, since it may contain tables which
2063      * are now going to be modified directly on disk, bypassing the cache.
2064      * qcow2_cache_empty() does both for us. */
2065     ret = qcow2_cache_empty(bs, s->l2_table_cache);
2066     if (ret < 0) {
2067         goto fail;
2068     }
2069 
2070     for (i = 0; i < s->nb_snapshots; i++) {
2071         int l1_sectors = DIV_ROUND_UP(s->snapshots[i].l1_size *
2072                                       sizeof(uint64_t), BDRV_SECTOR_SIZE);
2073 
2074         l1_table = g_realloc(l1_table, l1_sectors * BDRV_SECTOR_SIZE);
2075 
2076         ret = bdrv_read(bs->file,
2077                         s->snapshots[i].l1_table_offset / BDRV_SECTOR_SIZE,
2078                         (void *)l1_table, l1_sectors);
2079         if (ret < 0) {
2080             goto fail;
2081         }
2082 
2083         for (j = 0; j < s->snapshots[i].l1_size; j++) {
2084             be64_to_cpus(&l1_table[j]);
2085         }
2086 
2087         ret = expand_zero_clusters_in_l1(bs, l1_table, s->snapshots[i].l1_size,
2088                                          &visited_l1_entries, l1_entries,
2089                                          status_cb, cb_opaque);
2090         if (ret < 0) {
2091             goto fail;
2092         }
2093     }
2094 
2095     ret = 0;
2096 
2097 fail:
2098     g_free(l1_table);
2099     return ret;
2100 }
2101