xref: /openbmc/qemu/block/qcow2-cluster.c (revision 56411125)
1 /*
2  * Block driver for the QCOW version 2 format
3  *
4  * Copyright (c) 2004-2006 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 #include <zlib.h>
26 
27 #include "qemu-common.h"
28 #include "block/block_int.h"
29 #include "block/qcow2.h"
30 #include "trace.h"
31 
32 int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size,
33                         bool exact_size)
34 {
35     BDRVQcow2State *s = bs->opaque;
36     int new_l1_size2, ret, i;
37     uint64_t *new_l1_table;
38     int64_t old_l1_table_offset, old_l1_size;
39     int64_t new_l1_table_offset, new_l1_size;
40     uint8_t data[12];
41 
42     if (min_size <= s->l1_size)
43         return 0;
44 
45     /* Do a sanity check on min_size before trying to calculate new_l1_size
46      * (this prevents overflows during the while loop for the calculation of
47      * new_l1_size) */
48     if (min_size > INT_MAX / sizeof(uint64_t)) {
49         return -EFBIG;
50     }
51 
52     if (exact_size) {
53         new_l1_size = min_size;
54     } else {
55         /* Bump size up to reduce the number of times we have to grow */
56         new_l1_size = s->l1_size;
57         if (new_l1_size == 0) {
58             new_l1_size = 1;
59         }
60         while (min_size > new_l1_size) {
61             new_l1_size = (new_l1_size * 3 + 1) / 2;
62         }
63     }
64 
65     if (new_l1_size > INT_MAX / sizeof(uint64_t)) {
66         return -EFBIG;
67     }
68 
69 #ifdef DEBUG_ALLOC2
70     fprintf(stderr, "grow l1_table from %d to %" PRId64 "\n",
71             s->l1_size, new_l1_size);
72 #endif
73 
74     new_l1_size2 = sizeof(uint64_t) * new_l1_size;
75     new_l1_table = qemu_try_blockalign(bs->file->bs,
76                                        align_offset(new_l1_size2, 512));
77     if (new_l1_table == NULL) {
78         return -ENOMEM;
79     }
80     memset(new_l1_table, 0, align_offset(new_l1_size2, 512));
81 
82     memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t));
83 
84     /* write new table (align to cluster) */
85     BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ALLOC_TABLE);
86     new_l1_table_offset = qcow2_alloc_clusters(bs, new_l1_size2);
87     if (new_l1_table_offset < 0) {
88         qemu_vfree(new_l1_table);
89         return new_l1_table_offset;
90     }
91 
92     ret = qcow2_cache_flush(bs, s->refcount_block_cache);
93     if (ret < 0) {
94         goto fail;
95     }
96 
97     /* the L1 position has not yet been updated, so these clusters must
98      * indeed be completely free */
99     ret = qcow2_pre_write_overlap_check(bs, 0, new_l1_table_offset,
100                                         new_l1_size2);
101     if (ret < 0) {
102         goto fail;
103     }
104 
105     BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_WRITE_TABLE);
106     for(i = 0; i < s->l1_size; i++)
107         new_l1_table[i] = cpu_to_be64(new_l1_table[i]);
108     ret = bdrv_pwrite_sync(bs->file->bs, new_l1_table_offset,
109                            new_l1_table, new_l1_size2);
110     if (ret < 0)
111         goto fail;
112     for(i = 0; i < s->l1_size; i++)
113         new_l1_table[i] = be64_to_cpu(new_l1_table[i]);
114 
115     /* set new table */
116     BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ACTIVATE_TABLE);
117     cpu_to_be32w((uint32_t*)data, new_l1_size);
118     stq_be_p(data + 4, new_l1_table_offset);
119     ret = bdrv_pwrite_sync(bs->file->bs, offsetof(QCowHeader, l1_size),
120                            data, sizeof(data));
121     if (ret < 0) {
122         goto fail;
123     }
124     qemu_vfree(s->l1_table);
125     old_l1_table_offset = s->l1_table_offset;
126     s->l1_table_offset = new_l1_table_offset;
127     s->l1_table = new_l1_table;
128     old_l1_size = s->l1_size;
129     s->l1_size = new_l1_size;
130     qcow2_free_clusters(bs, old_l1_table_offset, old_l1_size * sizeof(uint64_t),
131                         QCOW2_DISCARD_OTHER);
132     return 0;
133  fail:
134     qemu_vfree(new_l1_table);
135     qcow2_free_clusters(bs, new_l1_table_offset, new_l1_size2,
136                         QCOW2_DISCARD_OTHER);
137     return ret;
138 }
139 
140 /*
141  * l2_load
142  *
143  * Loads a L2 table into memory. If the table is in the cache, the cache
144  * is used; otherwise the L2 table is loaded from the image file.
145  *
146  * Returns a pointer to the L2 table on success, or NULL if the read from
147  * the image file failed.
148  */
149 
150 static int l2_load(BlockDriverState *bs, uint64_t l2_offset,
151     uint64_t **l2_table)
152 {
153     BDRVQcow2State *s = bs->opaque;
154     int ret;
155 
156     ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset, (void**) l2_table);
157 
158     return ret;
159 }
160 
161 /*
162  * Writes one sector of the L1 table to the disk (can't update single entries
163  * and we really don't want bdrv_pread to perform a read-modify-write)
164  */
165 #define L1_ENTRIES_PER_SECTOR (512 / 8)
166 int qcow2_write_l1_entry(BlockDriverState *bs, int l1_index)
167 {
168     BDRVQcow2State *s = bs->opaque;
169     uint64_t buf[L1_ENTRIES_PER_SECTOR] = { 0 };
170     int l1_start_index;
171     int i, ret;
172 
173     l1_start_index = l1_index & ~(L1_ENTRIES_PER_SECTOR - 1);
174     for (i = 0; i < L1_ENTRIES_PER_SECTOR && l1_start_index + i < s->l1_size;
175          i++)
176     {
177         buf[i] = cpu_to_be64(s->l1_table[l1_start_index + i]);
178     }
179 
180     ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_ACTIVE_L1,
181             s->l1_table_offset + 8 * l1_start_index, sizeof(buf));
182     if (ret < 0) {
183         return ret;
184     }
185 
186     BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE);
187     ret = bdrv_pwrite_sync(bs->file->bs,
188                            s->l1_table_offset + 8 * l1_start_index,
189                            buf, sizeof(buf));
190     if (ret < 0) {
191         return ret;
192     }
193 
194     return 0;
195 }
196 
197 /*
198  * l2_allocate
199  *
200  * Allocate a new l2 entry in the file. If l1_index points to an already
201  * used entry in the L2 table (i.e. we are doing a copy on write for the L2
202  * table) copy the contents of the old L2 table into the newly allocated one.
203  * Otherwise the new table is initialized with zeros.
204  *
205  */
206 
207 static int l2_allocate(BlockDriverState *bs, int l1_index, uint64_t **table)
208 {
209     BDRVQcow2State *s = bs->opaque;
210     uint64_t old_l2_offset;
211     uint64_t *l2_table = NULL;
212     int64_t l2_offset;
213     int ret;
214 
215     old_l2_offset = s->l1_table[l1_index];
216 
217     trace_qcow2_l2_allocate(bs, l1_index);
218 
219     /* allocate a new l2 entry */
220 
221     l2_offset = qcow2_alloc_clusters(bs, s->l2_size * sizeof(uint64_t));
222     if (l2_offset < 0) {
223         ret = l2_offset;
224         goto fail;
225     }
226 
227     ret = qcow2_cache_flush(bs, s->refcount_block_cache);
228     if (ret < 0) {
229         goto fail;
230     }
231 
232     /* allocate a new entry in the l2 cache */
233 
234     trace_qcow2_l2_allocate_get_empty(bs, l1_index);
235     ret = qcow2_cache_get_empty(bs, s->l2_table_cache, l2_offset, (void**) table);
236     if (ret < 0) {
237         goto fail;
238     }
239 
240     l2_table = *table;
241 
242     if ((old_l2_offset & L1E_OFFSET_MASK) == 0) {
243         /* if there was no old l2 table, clear the new table */
244         memset(l2_table, 0, s->l2_size * sizeof(uint64_t));
245     } else {
246         uint64_t* old_table;
247 
248         /* if there was an old l2 table, read it from the disk */
249         BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_COW_READ);
250         ret = qcow2_cache_get(bs, s->l2_table_cache,
251             old_l2_offset & L1E_OFFSET_MASK,
252             (void**) &old_table);
253         if (ret < 0) {
254             goto fail;
255         }
256 
257         memcpy(l2_table, old_table, s->cluster_size);
258 
259         qcow2_cache_put(bs, s->l2_table_cache, (void **) &old_table);
260     }
261 
262     /* write the l2 table to the file */
263     BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_WRITE);
264 
265     trace_qcow2_l2_allocate_write_l2(bs, l1_index);
266     qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table);
267     ret = qcow2_cache_flush(bs, s->l2_table_cache);
268     if (ret < 0) {
269         goto fail;
270     }
271 
272     /* update the L1 entry */
273     trace_qcow2_l2_allocate_write_l1(bs, l1_index);
274     s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED;
275     ret = qcow2_write_l1_entry(bs, l1_index);
276     if (ret < 0) {
277         goto fail;
278     }
279 
280     *table = l2_table;
281     trace_qcow2_l2_allocate_done(bs, l1_index, 0);
282     return 0;
283 
284 fail:
285     trace_qcow2_l2_allocate_done(bs, l1_index, ret);
286     if (l2_table != NULL) {
287         qcow2_cache_put(bs, s->l2_table_cache, (void**) table);
288     }
289     s->l1_table[l1_index] = old_l2_offset;
290     if (l2_offset > 0) {
291         qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t),
292                             QCOW2_DISCARD_ALWAYS);
293     }
294     return ret;
295 }
296 
297 /*
298  * Checks how many clusters in a given L2 table are contiguous in the image
299  * file. As soon as one of the flags in the bitmask stop_flags changes compared
300  * to the first cluster, the search is stopped and the cluster is not counted
301  * as contiguous. (This allows it, for example, to stop at the first compressed
302  * cluster which may require a different handling)
303  */
304 static int count_contiguous_clusters(int nb_clusters, int cluster_size,
305         uint64_t *l2_table, uint64_t stop_flags)
306 {
307     int i;
308     uint64_t mask = stop_flags | L2E_OFFSET_MASK | QCOW_OFLAG_COMPRESSED;
309     uint64_t first_entry = be64_to_cpu(l2_table[0]);
310     uint64_t offset = first_entry & mask;
311 
312     if (!offset)
313         return 0;
314 
315     assert(qcow2_get_cluster_type(first_entry) != QCOW2_CLUSTER_COMPRESSED);
316 
317     for (i = 0; i < nb_clusters; i++) {
318         uint64_t l2_entry = be64_to_cpu(l2_table[i]) & mask;
319         if (offset + (uint64_t) i * cluster_size != l2_entry) {
320             break;
321         }
322     }
323 
324 	return i;
325 }
326 
327 static int count_contiguous_free_clusters(int nb_clusters, uint64_t *l2_table)
328 {
329     int i;
330 
331     for (i = 0; i < nb_clusters; i++) {
332         int type = qcow2_get_cluster_type(be64_to_cpu(l2_table[i]));
333 
334         if (type != QCOW2_CLUSTER_UNALLOCATED) {
335             break;
336         }
337     }
338 
339     return i;
340 }
341 
342 /* The crypt function is compatible with the linux cryptoloop
343    algorithm for < 4 GB images. NOTE: out_buf == in_buf is
344    supported */
345 int qcow2_encrypt_sectors(BDRVQcow2State *s, int64_t sector_num,
346                           uint8_t *out_buf, const uint8_t *in_buf,
347                           int nb_sectors, bool enc,
348                           Error **errp)
349 {
350     union {
351         uint64_t ll[2];
352         uint8_t b[16];
353     } ivec;
354     int i;
355     int ret;
356 
357     for(i = 0; i < nb_sectors; i++) {
358         ivec.ll[0] = cpu_to_le64(sector_num);
359         ivec.ll[1] = 0;
360         if (qcrypto_cipher_setiv(s->cipher,
361                                  ivec.b, G_N_ELEMENTS(ivec.b),
362                                  errp) < 0) {
363             return -1;
364         }
365         if (enc) {
366             ret = qcrypto_cipher_encrypt(s->cipher,
367                                          in_buf,
368                                          out_buf,
369                                          512,
370                                          errp);
371         } else {
372             ret = qcrypto_cipher_decrypt(s->cipher,
373                                          in_buf,
374                                          out_buf,
375                                          512,
376                                          errp);
377         }
378         if (ret < 0) {
379             return -1;
380         }
381         sector_num++;
382         in_buf += 512;
383         out_buf += 512;
384     }
385     return 0;
386 }
387 
388 static int coroutine_fn copy_sectors(BlockDriverState *bs,
389                                      uint64_t start_sect,
390                                      uint64_t cluster_offset,
391                                      int n_start, int n_end)
392 {
393     BDRVQcow2State *s = bs->opaque;
394     QEMUIOVector qiov;
395     struct iovec iov;
396     int n, ret;
397 
398     n = n_end - n_start;
399     if (n <= 0) {
400         return 0;
401     }
402 
403     iov.iov_len = n * BDRV_SECTOR_SIZE;
404     iov.iov_base = qemu_try_blockalign(bs, iov.iov_len);
405     if (iov.iov_base == NULL) {
406         return -ENOMEM;
407     }
408 
409     qemu_iovec_init_external(&qiov, &iov, 1);
410 
411     BLKDBG_EVENT(bs->file, BLKDBG_COW_READ);
412 
413     if (!bs->drv) {
414         ret = -ENOMEDIUM;
415         goto out;
416     }
417 
418     /* Call .bdrv_co_readv() directly instead of using the public block-layer
419      * interface.  This avoids double I/O throttling and request tracking,
420      * which can lead to deadlock when block layer copy-on-read is enabled.
421      */
422     ret = bs->drv->bdrv_co_readv(bs, start_sect + n_start, n, &qiov);
423     if (ret < 0) {
424         goto out;
425     }
426 
427     if (bs->encrypted) {
428         Error *err = NULL;
429         assert(s->cipher);
430         if (qcow2_encrypt_sectors(s, start_sect + n_start,
431                                   iov.iov_base, iov.iov_base, n,
432                                   true, &err) < 0) {
433             ret = -EIO;
434             error_free(err);
435             goto out;
436         }
437     }
438 
439     ret = qcow2_pre_write_overlap_check(bs, 0,
440             cluster_offset + n_start * BDRV_SECTOR_SIZE, n * BDRV_SECTOR_SIZE);
441     if (ret < 0) {
442         goto out;
443     }
444 
445     BLKDBG_EVENT(bs->file, BLKDBG_COW_WRITE);
446     ret = bdrv_co_writev(bs->file->bs, (cluster_offset >> 9) + n_start, n,
447                          &qiov);
448     if (ret < 0) {
449         goto out;
450     }
451 
452     ret = 0;
453 out:
454     qemu_vfree(iov.iov_base);
455     return ret;
456 }
457 
458 
459 /*
460  * get_cluster_offset
461  *
462  * For a given offset of the disk image, find the cluster offset in
463  * qcow2 file. The offset is stored in *cluster_offset.
464  *
465  * on entry, *num is the number of contiguous sectors we'd like to
466  * access following offset.
467  *
468  * on exit, *num is the number of contiguous sectors we can read.
469  *
470  * Returns the cluster type (QCOW2_CLUSTER_*) on success, -errno in error
471  * cases.
472  */
473 int qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset,
474     int *num, uint64_t *cluster_offset)
475 {
476     BDRVQcow2State *s = bs->opaque;
477     unsigned int l2_index;
478     uint64_t l1_index, l2_offset, *l2_table;
479     int l1_bits, c;
480     unsigned int index_in_cluster, nb_clusters;
481     uint64_t nb_available, nb_needed;
482     int ret;
483 
484     index_in_cluster = (offset >> 9) & (s->cluster_sectors - 1);
485     nb_needed = *num + index_in_cluster;
486 
487     l1_bits = s->l2_bits + s->cluster_bits;
488 
489     /* compute how many bytes there are between the offset and
490      * the end of the l1 entry
491      */
492 
493     nb_available = (1ULL << l1_bits) - (offset & ((1ULL << l1_bits) - 1));
494 
495     /* compute the number of available sectors */
496 
497     nb_available = (nb_available >> 9) + index_in_cluster;
498 
499     if (nb_needed > nb_available) {
500         nb_needed = nb_available;
501     }
502     assert(nb_needed <= INT_MAX);
503 
504     *cluster_offset = 0;
505 
506     /* seek to the l2 offset in the l1 table */
507 
508     l1_index = offset >> l1_bits;
509     if (l1_index >= s->l1_size) {
510         ret = QCOW2_CLUSTER_UNALLOCATED;
511         goto out;
512     }
513 
514     l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK;
515     if (!l2_offset) {
516         ret = QCOW2_CLUSTER_UNALLOCATED;
517         goto out;
518     }
519 
520     if (offset_into_cluster(s, l2_offset)) {
521         qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64
522                                 " unaligned (L1 index: %#" PRIx64 ")",
523                                 l2_offset, l1_index);
524         return -EIO;
525     }
526 
527     /* load the l2 table in memory */
528 
529     ret = l2_load(bs, l2_offset, &l2_table);
530     if (ret < 0) {
531         return ret;
532     }
533 
534     /* find the cluster offset for the given disk offset */
535 
536     l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);
537     *cluster_offset = be64_to_cpu(l2_table[l2_index]);
538 
539     /* nb_needed <= INT_MAX, thus nb_clusters <= INT_MAX, too */
540     nb_clusters = size_to_clusters(s, nb_needed << 9);
541 
542     ret = qcow2_get_cluster_type(*cluster_offset);
543     switch (ret) {
544     case QCOW2_CLUSTER_COMPRESSED:
545         /* Compressed clusters can only be processed one by one */
546         c = 1;
547         *cluster_offset &= L2E_COMPRESSED_OFFSET_SIZE_MASK;
548         break;
549     case QCOW2_CLUSTER_ZERO:
550         if (s->qcow_version < 3) {
551             qcow2_signal_corruption(bs, true, -1, -1, "Zero cluster entry found"
552                                     " in pre-v3 image (L2 offset: %#" PRIx64
553                                     ", L2 index: %#x)", l2_offset, l2_index);
554             ret = -EIO;
555             goto fail;
556         }
557         c = count_contiguous_clusters(nb_clusters, s->cluster_size,
558                 &l2_table[l2_index], QCOW_OFLAG_ZERO);
559         *cluster_offset = 0;
560         break;
561     case QCOW2_CLUSTER_UNALLOCATED:
562         /* how many empty clusters ? */
563         c = count_contiguous_free_clusters(nb_clusters, &l2_table[l2_index]);
564         *cluster_offset = 0;
565         break;
566     case QCOW2_CLUSTER_NORMAL:
567         /* how many allocated clusters ? */
568         c = count_contiguous_clusters(nb_clusters, s->cluster_size,
569                 &l2_table[l2_index], QCOW_OFLAG_ZERO);
570         *cluster_offset &= L2E_OFFSET_MASK;
571         if (offset_into_cluster(s, *cluster_offset)) {
572             qcow2_signal_corruption(bs, true, -1, -1, "Data cluster offset %#"
573                                     PRIx64 " unaligned (L2 offset: %#" PRIx64
574                                     ", L2 index: %#x)", *cluster_offset,
575                                     l2_offset, l2_index);
576             ret = -EIO;
577             goto fail;
578         }
579         break;
580     default:
581         abort();
582     }
583 
584     qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
585 
586     nb_available = (c * s->cluster_sectors);
587 
588 out:
589     if (nb_available > nb_needed)
590         nb_available = nb_needed;
591 
592     *num = nb_available - index_in_cluster;
593 
594     return ret;
595 
596 fail:
597     qcow2_cache_put(bs, s->l2_table_cache, (void **)&l2_table);
598     return ret;
599 }
600 
601 /*
602  * get_cluster_table
603  *
604  * for a given disk offset, load (and allocate if needed)
605  * the l2 table.
606  *
607  * the l2 table offset in the qcow2 file and the cluster index
608  * in the l2 table are given to the caller.
609  *
610  * Returns 0 on success, -errno in failure case
611  */
612 static int get_cluster_table(BlockDriverState *bs, uint64_t offset,
613                              uint64_t **new_l2_table,
614                              int *new_l2_index)
615 {
616     BDRVQcow2State *s = bs->opaque;
617     unsigned int l2_index;
618     uint64_t l1_index, l2_offset;
619     uint64_t *l2_table = NULL;
620     int ret;
621 
622     /* seek to the l2 offset in the l1 table */
623 
624     l1_index = offset >> (s->l2_bits + s->cluster_bits);
625     if (l1_index >= s->l1_size) {
626         ret = qcow2_grow_l1_table(bs, l1_index + 1, false);
627         if (ret < 0) {
628             return ret;
629         }
630     }
631 
632     assert(l1_index < s->l1_size);
633     l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK;
634     if (offset_into_cluster(s, l2_offset)) {
635         qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64
636                                 " unaligned (L1 index: %#" PRIx64 ")",
637                                 l2_offset, l1_index);
638         return -EIO;
639     }
640 
641     /* seek the l2 table of the given l2 offset */
642 
643     if (s->l1_table[l1_index] & QCOW_OFLAG_COPIED) {
644         /* load the l2 table in memory */
645         ret = l2_load(bs, l2_offset, &l2_table);
646         if (ret < 0) {
647             return ret;
648         }
649     } else {
650         /* First allocate a new L2 table (and do COW if needed) */
651         ret = l2_allocate(bs, l1_index, &l2_table);
652         if (ret < 0) {
653             return ret;
654         }
655 
656         /* Then decrease the refcount of the old table */
657         if (l2_offset) {
658             qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t),
659                                 QCOW2_DISCARD_OTHER);
660         }
661     }
662 
663     /* find the cluster offset for the given disk offset */
664 
665     l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);
666 
667     *new_l2_table = l2_table;
668     *new_l2_index = l2_index;
669 
670     return 0;
671 }
672 
673 /*
674  * alloc_compressed_cluster_offset
675  *
676  * For a given offset of the disk image, return cluster offset in
677  * qcow2 file.
678  *
679  * If the offset is not found, allocate a new compressed cluster.
680  *
681  * Return the cluster offset if successful,
682  * Return 0, otherwise.
683  *
684  */
685 
686 uint64_t qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs,
687                                                uint64_t offset,
688                                                int compressed_size)
689 {
690     BDRVQcow2State *s = bs->opaque;
691     int l2_index, ret;
692     uint64_t *l2_table;
693     int64_t cluster_offset;
694     int nb_csectors;
695 
696     ret = get_cluster_table(bs, offset, &l2_table, &l2_index);
697     if (ret < 0) {
698         return 0;
699     }
700 
701     /* Compression can't overwrite anything. Fail if the cluster was already
702      * allocated. */
703     cluster_offset = be64_to_cpu(l2_table[l2_index]);
704     if (cluster_offset & L2E_OFFSET_MASK) {
705         qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
706         return 0;
707     }
708 
709     cluster_offset = qcow2_alloc_bytes(bs, compressed_size);
710     if (cluster_offset < 0) {
711         qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
712         return 0;
713     }
714 
715     nb_csectors = ((cluster_offset + compressed_size - 1) >> 9) -
716                   (cluster_offset >> 9);
717 
718     cluster_offset |= QCOW_OFLAG_COMPRESSED |
719                       ((uint64_t)nb_csectors << s->csize_shift);
720 
721     /* update L2 table */
722 
723     /* compressed clusters never have the copied flag */
724 
725     BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED);
726     qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table);
727     l2_table[l2_index] = cpu_to_be64(cluster_offset);
728     qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
729 
730     return cluster_offset;
731 }
732 
733 static int perform_cow(BlockDriverState *bs, QCowL2Meta *m, Qcow2COWRegion *r)
734 {
735     BDRVQcow2State *s = bs->opaque;
736     int ret;
737 
738     if (r->nb_sectors == 0) {
739         return 0;
740     }
741 
742     qemu_co_mutex_unlock(&s->lock);
743     ret = copy_sectors(bs, m->offset / BDRV_SECTOR_SIZE, m->alloc_offset,
744                        r->offset / BDRV_SECTOR_SIZE,
745                        r->offset / BDRV_SECTOR_SIZE + r->nb_sectors);
746     qemu_co_mutex_lock(&s->lock);
747 
748     if (ret < 0) {
749         return ret;
750     }
751 
752     /*
753      * Before we update the L2 table to actually point to the new cluster, we
754      * need to be sure that the refcounts have been increased and COW was
755      * handled.
756      */
757     qcow2_cache_depends_on_flush(s->l2_table_cache);
758 
759     return 0;
760 }
761 
762 int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m)
763 {
764     BDRVQcow2State *s = bs->opaque;
765     int i, j = 0, l2_index, ret;
766     uint64_t *old_cluster, *l2_table;
767     uint64_t cluster_offset = m->alloc_offset;
768 
769     trace_qcow2_cluster_link_l2(qemu_coroutine_self(), m->nb_clusters);
770     assert(m->nb_clusters > 0);
771 
772     old_cluster = g_try_new(uint64_t, m->nb_clusters);
773     if (old_cluster == NULL) {
774         ret = -ENOMEM;
775         goto err;
776     }
777 
778     /* copy content of unmodified sectors */
779     ret = perform_cow(bs, m, &m->cow_start);
780     if (ret < 0) {
781         goto err;
782     }
783 
784     ret = perform_cow(bs, m, &m->cow_end);
785     if (ret < 0) {
786         goto err;
787     }
788 
789     /* Update L2 table. */
790     if (s->use_lazy_refcounts) {
791         qcow2_mark_dirty(bs);
792     }
793     if (qcow2_need_accurate_refcounts(s)) {
794         qcow2_cache_set_dependency(bs, s->l2_table_cache,
795                                    s->refcount_block_cache);
796     }
797 
798     ret = get_cluster_table(bs, m->offset, &l2_table, &l2_index);
799     if (ret < 0) {
800         goto err;
801     }
802     qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table);
803 
804     assert(l2_index + m->nb_clusters <= s->l2_size);
805     for (i = 0; i < m->nb_clusters; i++) {
806         /* if two concurrent writes happen to the same unallocated cluster
807 	 * each write allocates separate cluster and writes data concurrently.
808 	 * The first one to complete updates l2 table with pointer to its
809 	 * cluster the second one has to do RMW (which is done above by
810 	 * copy_sectors()), update l2 table with its cluster pointer and free
811 	 * old cluster. This is what this loop does */
812         if(l2_table[l2_index + i] != 0)
813             old_cluster[j++] = l2_table[l2_index + i];
814 
815         l2_table[l2_index + i] = cpu_to_be64((cluster_offset +
816                     (i << s->cluster_bits)) | QCOW_OFLAG_COPIED);
817      }
818 
819 
820     qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
821 
822     /*
823      * If this was a COW, we need to decrease the refcount of the old cluster.
824      *
825      * Don't discard clusters that reach a refcount of 0 (e.g. compressed
826      * clusters), the next write will reuse them anyway.
827      */
828     if (j != 0) {
829         for (i = 0; i < j; i++) {
830             qcow2_free_any_clusters(bs, be64_to_cpu(old_cluster[i]), 1,
831                                     QCOW2_DISCARD_NEVER);
832         }
833     }
834 
835     ret = 0;
836 err:
837     g_free(old_cluster);
838     return ret;
839  }
840 
841 /*
842  * Returns the number of contiguous clusters that can be used for an allocating
843  * write, but require COW to be performed (this includes yet unallocated space,
844  * which must copy from the backing file)
845  */
846 static int count_cow_clusters(BDRVQcow2State *s, int nb_clusters,
847     uint64_t *l2_table, int l2_index)
848 {
849     int i;
850 
851     for (i = 0; i < nb_clusters; i++) {
852         uint64_t l2_entry = be64_to_cpu(l2_table[l2_index + i]);
853         int cluster_type = qcow2_get_cluster_type(l2_entry);
854 
855         switch(cluster_type) {
856         case QCOW2_CLUSTER_NORMAL:
857             if (l2_entry & QCOW_OFLAG_COPIED) {
858                 goto out;
859             }
860             break;
861         case QCOW2_CLUSTER_UNALLOCATED:
862         case QCOW2_CLUSTER_COMPRESSED:
863         case QCOW2_CLUSTER_ZERO:
864             break;
865         default:
866             abort();
867         }
868     }
869 
870 out:
871     assert(i <= nb_clusters);
872     return i;
873 }
874 
875 /*
876  * Check if there already is an AIO write request in flight which allocates
877  * the same cluster. In this case we need to wait until the previous
878  * request has completed and updated the L2 table accordingly.
879  *
880  * Returns:
881  *   0       if there was no dependency. *cur_bytes indicates the number of
882  *           bytes from guest_offset that can be read before the next
883  *           dependency must be processed (or the request is complete)
884  *
885  *   -EAGAIN if we had to wait for another request, previously gathered
886  *           information on cluster allocation may be invalid now. The caller
887  *           must start over anyway, so consider *cur_bytes undefined.
888  */
889 static int handle_dependencies(BlockDriverState *bs, uint64_t guest_offset,
890     uint64_t *cur_bytes, QCowL2Meta **m)
891 {
892     BDRVQcow2State *s = bs->opaque;
893     QCowL2Meta *old_alloc;
894     uint64_t bytes = *cur_bytes;
895 
896     QLIST_FOREACH(old_alloc, &s->cluster_allocs, next_in_flight) {
897 
898         uint64_t start = guest_offset;
899         uint64_t end = start + bytes;
900         uint64_t old_start = l2meta_cow_start(old_alloc);
901         uint64_t old_end = l2meta_cow_end(old_alloc);
902 
903         if (end <= old_start || start >= old_end) {
904             /* No intersection */
905         } else {
906             if (start < old_start) {
907                 /* Stop at the start of a running allocation */
908                 bytes = old_start - start;
909             } else {
910                 bytes = 0;
911             }
912 
913             /* Stop if already an l2meta exists. After yielding, it wouldn't
914              * be valid any more, so we'd have to clean up the old L2Metas
915              * and deal with requests depending on them before starting to
916              * gather new ones. Not worth the trouble. */
917             if (bytes == 0 && *m) {
918                 *cur_bytes = 0;
919                 return 0;
920             }
921 
922             if (bytes == 0) {
923                 /* Wait for the dependency to complete. We need to recheck
924                  * the free/allocated clusters when we continue. */
925                 qemu_co_mutex_unlock(&s->lock);
926                 qemu_co_queue_wait(&old_alloc->dependent_requests);
927                 qemu_co_mutex_lock(&s->lock);
928                 return -EAGAIN;
929             }
930         }
931     }
932 
933     /* Make sure that existing clusters and new allocations are only used up to
934      * the next dependency if we shortened the request above */
935     *cur_bytes = bytes;
936 
937     return 0;
938 }
939 
940 /*
941  * Checks how many already allocated clusters that don't require a copy on
942  * write there are at the given guest_offset (up to *bytes). If
943  * *host_offset is not zero, only physically contiguous clusters beginning at
944  * this host offset are counted.
945  *
946  * Note that guest_offset may not be cluster aligned. In this case, the
947  * returned *host_offset points to exact byte referenced by guest_offset and
948  * therefore isn't cluster aligned as well.
949  *
950  * Returns:
951  *   0:     if no allocated clusters are available at the given offset.
952  *          *bytes is normally unchanged. It is set to 0 if the cluster
953  *          is allocated and doesn't need COW, but doesn't have the right
954  *          physical offset.
955  *
956  *   1:     if allocated clusters that don't require a COW are available at
957  *          the requested offset. *bytes may have decreased and describes
958  *          the length of the area that can be written to.
959  *
960  *  -errno: in error cases
961  */
962 static int handle_copied(BlockDriverState *bs, uint64_t guest_offset,
963     uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m)
964 {
965     BDRVQcow2State *s = bs->opaque;
966     int l2_index;
967     uint64_t cluster_offset;
968     uint64_t *l2_table;
969     uint64_t nb_clusters;
970     unsigned int keep_clusters;
971     int ret;
972 
973     trace_qcow2_handle_copied(qemu_coroutine_self(), guest_offset, *host_offset,
974                               *bytes);
975 
976     assert(*host_offset == 0 ||    offset_into_cluster(s, guest_offset)
977                                 == offset_into_cluster(s, *host_offset));
978 
979     /*
980      * Calculate the number of clusters to look for. We stop at L2 table
981      * boundaries to keep things simple.
982      */
983     nb_clusters =
984         size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes);
985 
986     l2_index = offset_to_l2_index(s, guest_offset);
987     nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
988     assert(nb_clusters <= INT_MAX);
989 
990     /* Find L2 entry for the first involved cluster */
991     ret = get_cluster_table(bs, guest_offset, &l2_table, &l2_index);
992     if (ret < 0) {
993         return ret;
994     }
995 
996     cluster_offset = be64_to_cpu(l2_table[l2_index]);
997 
998     /* Check how many clusters are already allocated and don't need COW */
999     if (qcow2_get_cluster_type(cluster_offset) == QCOW2_CLUSTER_NORMAL
1000         && (cluster_offset & QCOW_OFLAG_COPIED))
1001     {
1002         /* If a specific host_offset is required, check it */
1003         bool offset_matches =
1004             (cluster_offset & L2E_OFFSET_MASK) == *host_offset;
1005 
1006         if (offset_into_cluster(s, cluster_offset & L2E_OFFSET_MASK)) {
1007             qcow2_signal_corruption(bs, true, -1, -1, "Data cluster offset "
1008                                     "%#llx unaligned (guest offset: %#" PRIx64
1009                                     ")", cluster_offset & L2E_OFFSET_MASK,
1010                                     guest_offset);
1011             ret = -EIO;
1012             goto out;
1013         }
1014 
1015         if (*host_offset != 0 && !offset_matches) {
1016             *bytes = 0;
1017             ret = 0;
1018             goto out;
1019         }
1020 
1021         /* We keep all QCOW_OFLAG_COPIED clusters */
1022         keep_clusters =
1023             count_contiguous_clusters(nb_clusters, s->cluster_size,
1024                                       &l2_table[l2_index],
1025                                       QCOW_OFLAG_COPIED | QCOW_OFLAG_ZERO);
1026         assert(keep_clusters <= nb_clusters);
1027 
1028         *bytes = MIN(*bytes,
1029                  keep_clusters * s->cluster_size
1030                  - offset_into_cluster(s, guest_offset));
1031 
1032         ret = 1;
1033     } else {
1034         ret = 0;
1035     }
1036 
1037     /* Cleanup */
1038 out:
1039     qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
1040 
1041     /* Only return a host offset if we actually made progress. Otherwise we
1042      * would make requirements for handle_alloc() that it can't fulfill */
1043     if (ret > 0) {
1044         *host_offset = (cluster_offset & L2E_OFFSET_MASK)
1045                      + offset_into_cluster(s, guest_offset);
1046     }
1047 
1048     return ret;
1049 }
1050 
1051 /*
1052  * Allocates new clusters for the given guest_offset.
1053  *
1054  * At most *nb_clusters are allocated, and on return *nb_clusters is updated to
1055  * contain the number of clusters that have been allocated and are contiguous
1056  * in the image file.
1057  *
1058  * If *host_offset is non-zero, it specifies the offset in the image file at
1059  * which the new clusters must start. *nb_clusters can be 0 on return in this
1060  * case if the cluster at host_offset is already in use. If *host_offset is
1061  * zero, the clusters can be allocated anywhere in the image file.
1062  *
1063  * *host_offset is updated to contain the offset into the image file at which
1064  * the first allocated cluster starts.
1065  *
1066  * Return 0 on success and -errno in error cases. -EAGAIN means that the
1067  * function has been waiting for another request and the allocation must be
1068  * restarted, but the whole request should not be failed.
1069  */
1070 static int do_alloc_cluster_offset(BlockDriverState *bs, uint64_t guest_offset,
1071                                    uint64_t *host_offset, uint64_t *nb_clusters)
1072 {
1073     BDRVQcow2State *s = bs->opaque;
1074 
1075     trace_qcow2_do_alloc_clusters_offset(qemu_coroutine_self(), guest_offset,
1076                                          *host_offset, *nb_clusters);
1077 
1078     /* Allocate new clusters */
1079     trace_qcow2_cluster_alloc_phys(qemu_coroutine_self());
1080     if (*host_offset == 0) {
1081         int64_t cluster_offset =
1082             qcow2_alloc_clusters(bs, *nb_clusters * s->cluster_size);
1083         if (cluster_offset < 0) {
1084             return cluster_offset;
1085         }
1086         *host_offset = cluster_offset;
1087         return 0;
1088     } else {
1089         int64_t ret = qcow2_alloc_clusters_at(bs, *host_offset, *nb_clusters);
1090         if (ret < 0) {
1091             return ret;
1092         }
1093         *nb_clusters = ret;
1094         return 0;
1095     }
1096 }
1097 
1098 /*
1099  * Allocates new clusters for an area that either is yet unallocated or needs a
1100  * copy on write. If *host_offset is non-zero, clusters are only allocated if
1101  * the new allocation can match the specified host offset.
1102  *
1103  * Note that guest_offset may not be cluster aligned. In this case, the
1104  * returned *host_offset points to exact byte referenced by guest_offset and
1105  * therefore isn't cluster aligned as well.
1106  *
1107  * Returns:
1108  *   0:     if no clusters could be allocated. *bytes is set to 0,
1109  *          *host_offset is left unchanged.
1110  *
1111  *   1:     if new clusters were allocated. *bytes may be decreased if the
1112  *          new allocation doesn't cover all of the requested area.
1113  *          *host_offset is updated to contain the host offset of the first
1114  *          newly allocated cluster.
1115  *
1116  *  -errno: in error cases
1117  */
1118 static int handle_alloc(BlockDriverState *bs, uint64_t guest_offset,
1119     uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m)
1120 {
1121     BDRVQcow2State *s = bs->opaque;
1122     int l2_index;
1123     uint64_t *l2_table;
1124     uint64_t entry;
1125     uint64_t nb_clusters;
1126     int ret;
1127 
1128     uint64_t alloc_cluster_offset;
1129 
1130     trace_qcow2_handle_alloc(qemu_coroutine_self(), guest_offset, *host_offset,
1131                              *bytes);
1132     assert(*bytes > 0);
1133 
1134     /*
1135      * Calculate the number of clusters to look for. We stop at L2 table
1136      * boundaries to keep things simple.
1137      */
1138     nb_clusters =
1139         size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes);
1140 
1141     l2_index = offset_to_l2_index(s, guest_offset);
1142     nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
1143     assert(nb_clusters <= INT_MAX);
1144 
1145     /* Find L2 entry for the first involved cluster */
1146     ret = get_cluster_table(bs, guest_offset, &l2_table, &l2_index);
1147     if (ret < 0) {
1148         return ret;
1149     }
1150 
1151     entry = be64_to_cpu(l2_table[l2_index]);
1152 
1153     /* For the moment, overwrite compressed clusters one by one */
1154     if (entry & QCOW_OFLAG_COMPRESSED) {
1155         nb_clusters = 1;
1156     } else {
1157         nb_clusters = count_cow_clusters(s, nb_clusters, l2_table, l2_index);
1158     }
1159 
1160     /* This function is only called when there were no non-COW clusters, so if
1161      * we can't find any unallocated or COW clusters either, something is
1162      * wrong with our code. */
1163     assert(nb_clusters > 0);
1164 
1165     qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
1166 
1167     /* Allocate, if necessary at a given offset in the image file */
1168     alloc_cluster_offset = start_of_cluster(s, *host_offset);
1169     ret = do_alloc_cluster_offset(bs, guest_offset, &alloc_cluster_offset,
1170                                   &nb_clusters);
1171     if (ret < 0) {
1172         goto fail;
1173     }
1174 
1175     /* Can't extend contiguous allocation */
1176     if (nb_clusters == 0) {
1177         *bytes = 0;
1178         return 0;
1179     }
1180 
1181     /* !*host_offset would overwrite the image header and is reserved for "no
1182      * host offset preferred". If 0 was a valid host offset, it'd trigger the
1183      * following overlap check; do that now to avoid having an invalid value in
1184      * *host_offset. */
1185     if (!alloc_cluster_offset) {
1186         ret = qcow2_pre_write_overlap_check(bs, 0, alloc_cluster_offset,
1187                                             nb_clusters * s->cluster_size);
1188         assert(ret < 0);
1189         goto fail;
1190     }
1191 
1192     /*
1193      * Save info needed for meta data update.
1194      *
1195      * requested_sectors: Number of sectors from the start of the first
1196      * newly allocated cluster to the end of the (possibly shortened
1197      * before) write request.
1198      *
1199      * avail_sectors: Number of sectors from the start of the first
1200      * newly allocated to the end of the last newly allocated cluster.
1201      *
1202      * nb_sectors: The number of sectors from the start of the first
1203      * newly allocated cluster to the end of the area that the write
1204      * request actually writes to (excluding COW at the end)
1205      */
1206     int requested_sectors =
1207         (*bytes + offset_into_cluster(s, guest_offset))
1208         >> BDRV_SECTOR_BITS;
1209     int avail_sectors = nb_clusters
1210                         << (s->cluster_bits - BDRV_SECTOR_BITS);
1211     int alloc_n_start = offset_into_cluster(s, guest_offset)
1212                         >> BDRV_SECTOR_BITS;
1213     int nb_sectors = MIN(requested_sectors, avail_sectors);
1214     QCowL2Meta *old_m = *m;
1215 
1216     *m = g_malloc0(sizeof(**m));
1217 
1218     **m = (QCowL2Meta) {
1219         .next           = old_m,
1220 
1221         .alloc_offset   = alloc_cluster_offset,
1222         .offset         = start_of_cluster(s, guest_offset),
1223         .nb_clusters    = nb_clusters,
1224         .nb_available   = nb_sectors,
1225 
1226         .cow_start = {
1227             .offset     = 0,
1228             .nb_sectors = alloc_n_start,
1229         },
1230         .cow_end = {
1231             .offset     = nb_sectors * BDRV_SECTOR_SIZE,
1232             .nb_sectors = avail_sectors - nb_sectors,
1233         },
1234     };
1235     qemu_co_queue_init(&(*m)->dependent_requests);
1236     QLIST_INSERT_HEAD(&s->cluster_allocs, *m, next_in_flight);
1237 
1238     *host_offset = alloc_cluster_offset + offset_into_cluster(s, guest_offset);
1239     *bytes = MIN(*bytes, (nb_sectors * BDRV_SECTOR_SIZE)
1240                          - offset_into_cluster(s, guest_offset));
1241     assert(*bytes != 0);
1242 
1243     return 1;
1244 
1245 fail:
1246     if (*m && (*m)->nb_clusters > 0) {
1247         QLIST_REMOVE(*m, next_in_flight);
1248     }
1249     return ret;
1250 }
1251 
1252 /*
1253  * alloc_cluster_offset
1254  *
1255  * For a given offset on the virtual disk, find the cluster offset in qcow2
1256  * file. If the offset is not found, allocate a new cluster.
1257  *
1258  * If the cluster was already allocated, m->nb_clusters is set to 0 and
1259  * other fields in m are meaningless.
1260  *
1261  * If the cluster is newly allocated, m->nb_clusters is set to the number of
1262  * contiguous clusters that have been allocated. In this case, the other
1263  * fields of m are valid and contain information about the first allocated
1264  * cluster.
1265  *
1266  * If the request conflicts with another write request in flight, the coroutine
1267  * is queued and will be reentered when the dependency has completed.
1268  *
1269  * Return 0 on success and -errno in error cases
1270  */
1271 int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset,
1272     int *num, uint64_t *host_offset, QCowL2Meta **m)
1273 {
1274     BDRVQcow2State *s = bs->opaque;
1275     uint64_t start, remaining;
1276     uint64_t cluster_offset;
1277     uint64_t cur_bytes;
1278     int ret;
1279 
1280     trace_qcow2_alloc_clusters_offset(qemu_coroutine_self(), offset, *num);
1281 
1282     assert((offset & ~BDRV_SECTOR_MASK) == 0);
1283 
1284 again:
1285     start = offset;
1286     remaining = (uint64_t)*num << BDRV_SECTOR_BITS;
1287     cluster_offset = 0;
1288     *host_offset = 0;
1289     cur_bytes = 0;
1290     *m = NULL;
1291 
1292     while (true) {
1293 
1294         if (!*host_offset) {
1295             *host_offset = start_of_cluster(s, cluster_offset);
1296         }
1297 
1298         assert(remaining >= cur_bytes);
1299 
1300         start           += cur_bytes;
1301         remaining       -= cur_bytes;
1302         cluster_offset  += cur_bytes;
1303 
1304         if (remaining == 0) {
1305             break;
1306         }
1307 
1308         cur_bytes = remaining;
1309 
1310         /*
1311          * Now start gathering as many contiguous clusters as possible:
1312          *
1313          * 1. Check for overlaps with in-flight allocations
1314          *
1315          *      a) Overlap not in the first cluster -> shorten this request and
1316          *         let the caller handle the rest in its next loop iteration.
1317          *
1318          *      b) Real overlaps of two requests. Yield and restart the search
1319          *         for contiguous clusters (the situation could have changed
1320          *         while we were sleeping)
1321          *
1322          *      c) TODO: Request starts in the same cluster as the in-flight
1323          *         allocation ends. Shorten the COW of the in-fight allocation,
1324          *         set cluster_offset to write to the same cluster and set up
1325          *         the right synchronisation between the in-flight request and
1326          *         the new one.
1327          */
1328         ret = handle_dependencies(bs, start, &cur_bytes, m);
1329         if (ret == -EAGAIN) {
1330             /* Currently handle_dependencies() doesn't yield if we already had
1331              * an allocation. If it did, we would have to clean up the L2Meta
1332              * structs before starting over. */
1333             assert(*m == NULL);
1334             goto again;
1335         } else if (ret < 0) {
1336             return ret;
1337         } else if (cur_bytes == 0) {
1338             break;
1339         } else {
1340             /* handle_dependencies() may have decreased cur_bytes (shortened
1341              * the allocations below) so that the next dependency is processed
1342              * correctly during the next loop iteration. */
1343         }
1344 
1345         /*
1346          * 2. Count contiguous COPIED clusters.
1347          */
1348         ret = handle_copied(bs, start, &cluster_offset, &cur_bytes, m);
1349         if (ret < 0) {
1350             return ret;
1351         } else if (ret) {
1352             continue;
1353         } else if (cur_bytes == 0) {
1354             break;
1355         }
1356 
1357         /*
1358          * 3. If the request still hasn't completed, allocate new clusters,
1359          *    considering any cluster_offset of steps 1c or 2.
1360          */
1361         ret = handle_alloc(bs, start, &cluster_offset, &cur_bytes, m);
1362         if (ret < 0) {
1363             return ret;
1364         } else if (ret) {
1365             continue;
1366         } else {
1367             assert(cur_bytes == 0);
1368             break;
1369         }
1370     }
1371 
1372     *num -= remaining >> BDRV_SECTOR_BITS;
1373     assert(*num > 0);
1374     assert(*host_offset != 0);
1375 
1376     return 0;
1377 }
1378 
1379 static int decompress_buffer(uint8_t *out_buf, int out_buf_size,
1380                              const uint8_t *buf, int buf_size)
1381 {
1382     z_stream strm1, *strm = &strm1;
1383     int ret, out_len;
1384 
1385     memset(strm, 0, sizeof(*strm));
1386 
1387     strm->next_in = (uint8_t *)buf;
1388     strm->avail_in = buf_size;
1389     strm->next_out = out_buf;
1390     strm->avail_out = out_buf_size;
1391 
1392     ret = inflateInit2(strm, -12);
1393     if (ret != Z_OK)
1394         return -1;
1395     ret = inflate(strm, Z_FINISH);
1396     out_len = strm->next_out - out_buf;
1397     if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) ||
1398         out_len != out_buf_size) {
1399         inflateEnd(strm);
1400         return -1;
1401     }
1402     inflateEnd(strm);
1403     return 0;
1404 }
1405 
1406 int qcow2_decompress_cluster(BlockDriverState *bs, uint64_t cluster_offset)
1407 {
1408     BDRVQcow2State *s = bs->opaque;
1409     int ret, csize, nb_csectors, sector_offset;
1410     uint64_t coffset;
1411 
1412     coffset = cluster_offset & s->cluster_offset_mask;
1413     if (s->cluster_cache_offset != coffset) {
1414         nb_csectors = ((cluster_offset >> s->csize_shift) & s->csize_mask) + 1;
1415         sector_offset = coffset & 511;
1416         csize = nb_csectors * 512 - sector_offset;
1417         BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED);
1418         ret = bdrv_read(bs->file->bs, coffset >> 9, s->cluster_data,
1419                         nb_csectors);
1420         if (ret < 0) {
1421             return ret;
1422         }
1423         if (decompress_buffer(s->cluster_cache, s->cluster_size,
1424                               s->cluster_data + sector_offset, csize) < 0) {
1425             return -EIO;
1426         }
1427         s->cluster_cache_offset = coffset;
1428     }
1429     return 0;
1430 }
1431 
1432 /*
1433  * This discards as many clusters of nb_clusters as possible at once (i.e.
1434  * all clusters in the same L2 table) and returns the number of discarded
1435  * clusters.
1436  */
1437 static int discard_single_l2(BlockDriverState *bs, uint64_t offset,
1438                              uint64_t nb_clusters, enum qcow2_discard_type type,
1439                              bool full_discard)
1440 {
1441     BDRVQcow2State *s = bs->opaque;
1442     uint64_t *l2_table;
1443     int l2_index;
1444     int ret;
1445     int i;
1446 
1447     ret = get_cluster_table(bs, offset, &l2_table, &l2_index);
1448     if (ret < 0) {
1449         return ret;
1450     }
1451 
1452     /* Limit nb_clusters to one L2 table */
1453     nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
1454     assert(nb_clusters <= INT_MAX);
1455 
1456     for (i = 0; i < nb_clusters; i++) {
1457         uint64_t old_l2_entry;
1458 
1459         old_l2_entry = be64_to_cpu(l2_table[l2_index + i]);
1460 
1461         /*
1462          * If full_discard is false, make sure that a discarded area reads back
1463          * as zeroes for v3 images (we cannot do it for v2 without actually
1464          * writing a zero-filled buffer). We can skip the operation if the
1465          * cluster is already marked as zero, or if it's unallocated and we
1466          * don't have a backing file.
1467          *
1468          * TODO We might want to use bdrv_get_block_status(bs) here, but we're
1469          * holding s->lock, so that doesn't work today.
1470          *
1471          * If full_discard is true, the sector should not read back as zeroes,
1472          * but rather fall through to the backing file.
1473          */
1474         switch (qcow2_get_cluster_type(old_l2_entry)) {
1475             case QCOW2_CLUSTER_UNALLOCATED:
1476                 if (full_discard || !bs->backing) {
1477                     continue;
1478                 }
1479                 break;
1480 
1481             case QCOW2_CLUSTER_ZERO:
1482                 if (!full_discard) {
1483                     continue;
1484                 }
1485                 break;
1486 
1487             case QCOW2_CLUSTER_NORMAL:
1488             case QCOW2_CLUSTER_COMPRESSED:
1489                 break;
1490 
1491             default:
1492                 abort();
1493         }
1494 
1495         /* First remove L2 entries */
1496         qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table);
1497         if (!full_discard && s->qcow_version >= 3) {
1498             l2_table[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO);
1499         } else {
1500             l2_table[l2_index + i] = cpu_to_be64(0);
1501         }
1502 
1503         /* Then decrease the refcount */
1504         qcow2_free_any_clusters(bs, old_l2_entry, 1, type);
1505     }
1506 
1507     qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
1508 
1509     return nb_clusters;
1510 }
1511 
1512 int qcow2_discard_clusters(BlockDriverState *bs, uint64_t offset,
1513     int nb_sectors, enum qcow2_discard_type type, bool full_discard)
1514 {
1515     BDRVQcow2State *s = bs->opaque;
1516     uint64_t end_offset;
1517     uint64_t nb_clusters;
1518     int ret;
1519 
1520     end_offset = offset + (nb_sectors << BDRV_SECTOR_BITS);
1521 
1522     /* Round start up and end down */
1523     offset = align_offset(offset, s->cluster_size);
1524     end_offset = start_of_cluster(s, end_offset);
1525 
1526     if (offset > end_offset) {
1527         return 0;
1528     }
1529 
1530     nb_clusters = size_to_clusters(s, end_offset - offset);
1531 
1532     s->cache_discards = true;
1533 
1534     /* Each L2 table is handled by its own loop iteration */
1535     while (nb_clusters > 0) {
1536         ret = discard_single_l2(bs, offset, nb_clusters, type, full_discard);
1537         if (ret < 0) {
1538             goto fail;
1539         }
1540 
1541         nb_clusters -= ret;
1542         offset += (ret * s->cluster_size);
1543     }
1544 
1545     ret = 0;
1546 fail:
1547     s->cache_discards = false;
1548     qcow2_process_discards(bs, ret);
1549 
1550     return ret;
1551 }
1552 
1553 /*
1554  * This zeroes as many clusters of nb_clusters as possible at once (i.e.
1555  * all clusters in the same L2 table) and returns the number of zeroed
1556  * clusters.
1557  */
1558 static int zero_single_l2(BlockDriverState *bs, uint64_t offset,
1559                           uint64_t nb_clusters)
1560 {
1561     BDRVQcow2State *s = bs->opaque;
1562     uint64_t *l2_table;
1563     int l2_index;
1564     int ret;
1565     int i;
1566 
1567     ret = get_cluster_table(bs, offset, &l2_table, &l2_index);
1568     if (ret < 0) {
1569         return ret;
1570     }
1571 
1572     /* Limit nb_clusters to one L2 table */
1573     nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
1574     assert(nb_clusters <= INT_MAX);
1575 
1576     for (i = 0; i < nb_clusters; i++) {
1577         uint64_t old_offset;
1578 
1579         old_offset = be64_to_cpu(l2_table[l2_index + i]);
1580 
1581         /* Update L2 entries */
1582         qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table);
1583         if (old_offset & QCOW_OFLAG_COMPRESSED) {
1584             l2_table[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO);
1585             qcow2_free_any_clusters(bs, old_offset, 1, QCOW2_DISCARD_REQUEST);
1586         } else {
1587             l2_table[l2_index + i] |= cpu_to_be64(QCOW_OFLAG_ZERO);
1588         }
1589     }
1590 
1591     qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
1592 
1593     return nb_clusters;
1594 }
1595 
1596 int qcow2_zero_clusters(BlockDriverState *bs, uint64_t offset, int nb_sectors)
1597 {
1598     BDRVQcow2State *s = bs->opaque;
1599     uint64_t nb_clusters;
1600     int ret;
1601 
1602     /* The zero flag is only supported by version 3 and newer */
1603     if (s->qcow_version < 3) {
1604         return -ENOTSUP;
1605     }
1606 
1607     /* Each L2 table is handled by its own loop iteration */
1608     nb_clusters = size_to_clusters(s, nb_sectors << BDRV_SECTOR_BITS);
1609 
1610     s->cache_discards = true;
1611 
1612     while (nb_clusters > 0) {
1613         ret = zero_single_l2(bs, offset, nb_clusters);
1614         if (ret < 0) {
1615             goto fail;
1616         }
1617 
1618         nb_clusters -= ret;
1619         offset += (ret * s->cluster_size);
1620     }
1621 
1622     ret = 0;
1623 fail:
1624     s->cache_discards = false;
1625     qcow2_process_discards(bs, ret);
1626 
1627     return ret;
1628 }
1629 
1630 /*
1631  * Expands all zero clusters in a specific L1 table (or deallocates them, for
1632  * non-backed non-pre-allocated zero clusters).
1633  *
1634  * l1_entries and *visited_l1_entries are used to keep track of progress for
1635  * status_cb(). l1_entries contains the total number of L1 entries and
1636  * *visited_l1_entries counts all visited L1 entries.
1637  */
1638 static int expand_zero_clusters_in_l1(BlockDriverState *bs, uint64_t *l1_table,
1639                                       int l1_size, int64_t *visited_l1_entries,
1640                                       int64_t l1_entries,
1641                                       BlockDriverAmendStatusCB *status_cb)
1642 {
1643     BDRVQcow2State *s = bs->opaque;
1644     bool is_active_l1 = (l1_table == s->l1_table);
1645     uint64_t *l2_table = NULL;
1646     int ret;
1647     int i, j;
1648 
1649     if (!is_active_l1) {
1650         /* inactive L2 tables require a buffer to be stored in when loading
1651          * them from disk */
1652         l2_table = qemu_try_blockalign(bs->file->bs, s->cluster_size);
1653         if (l2_table == NULL) {
1654             return -ENOMEM;
1655         }
1656     }
1657 
1658     for (i = 0; i < l1_size; i++) {
1659         uint64_t l2_offset = l1_table[i] & L1E_OFFSET_MASK;
1660         bool l2_dirty = false;
1661         uint64_t l2_refcount;
1662 
1663         if (!l2_offset) {
1664             /* unallocated */
1665             (*visited_l1_entries)++;
1666             if (status_cb) {
1667                 status_cb(bs, *visited_l1_entries, l1_entries);
1668             }
1669             continue;
1670         }
1671 
1672         if (offset_into_cluster(s, l2_offset)) {
1673             qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#"
1674                                     PRIx64 " unaligned (L1 index: %#x)",
1675                                     l2_offset, i);
1676             ret = -EIO;
1677             goto fail;
1678         }
1679 
1680         if (is_active_l1) {
1681             /* get active L2 tables from cache */
1682             ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset,
1683                     (void **)&l2_table);
1684         } else {
1685             /* load inactive L2 tables from disk */
1686             ret = bdrv_read(bs->file->bs, l2_offset / BDRV_SECTOR_SIZE,
1687                             (void *)l2_table, s->cluster_sectors);
1688         }
1689         if (ret < 0) {
1690             goto fail;
1691         }
1692 
1693         ret = qcow2_get_refcount(bs, l2_offset >> s->cluster_bits,
1694                                  &l2_refcount);
1695         if (ret < 0) {
1696             goto fail;
1697         }
1698 
1699         for (j = 0; j < s->l2_size; j++) {
1700             uint64_t l2_entry = be64_to_cpu(l2_table[j]);
1701             int64_t offset = l2_entry & L2E_OFFSET_MASK;
1702             int cluster_type = qcow2_get_cluster_type(l2_entry);
1703             bool preallocated = offset != 0;
1704 
1705             if (cluster_type != QCOW2_CLUSTER_ZERO) {
1706                 continue;
1707             }
1708 
1709             if (!preallocated) {
1710                 if (!bs->backing) {
1711                     /* not backed; therefore we can simply deallocate the
1712                      * cluster */
1713                     l2_table[j] = 0;
1714                     l2_dirty = true;
1715                     continue;
1716                 }
1717 
1718                 offset = qcow2_alloc_clusters(bs, s->cluster_size);
1719                 if (offset < 0) {
1720                     ret = offset;
1721                     goto fail;
1722                 }
1723 
1724                 if (l2_refcount > 1) {
1725                     /* For shared L2 tables, set the refcount accordingly (it is
1726                      * already 1 and needs to be l2_refcount) */
1727                     ret = qcow2_update_cluster_refcount(bs,
1728                             offset >> s->cluster_bits,
1729                             refcount_diff(1, l2_refcount), false,
1730                             QCOW2_DISCARD_OTHER);
1731                     if (ret < 0) {
1732                         qcow2_free_clusters(bs, offset, s->cluster_size,
1733                                             QCOW2_DISCARD_OTHER);
1734                         goto fail;
1735                     }
1736                 }
1737             }
1738 
1739             if (offset_into_cluster(s, offset)) {
1740                 qcow2_signal_corruption(bs, true, -1, -1, "Data cluster offset "
1741                                         "%#" PRIx64 " unaligned (L2 offset: %#"
1742                                         PRIx64 ", L2 index: %#x)", offset,
1743                                         l2_offset, j);
1744                 if (!preallocated) {
1745                     qcow2_free_clusters(bs, offset, s->cluster_size,
1746                                         QCOW2_DISCARD_ALWAYS);
1747                 }
1748                 ret = -EIO;
1749                 goto fail;
1750             }
1751 
1752             ret = qcow2_pre_write_overlap_check(bs, 0, offset, s->cluster_size);
1753             if (ret < 0) {
1754                 if (!preallocated) {
1755                     qcow2_free_clusters(bs, offset, s->cluster_size,
1756                                         QCOW2_DISCARD_ALWAYS);
1757                 }
1758                 goto fail;
1759             }
1760 
1761             ret = bdrv_write_zeroes(bs->file->bs, offset / BDRV_SECTOR_SIZE,
1762                                     s->cluster_sectors, 0);
1763             if (ret < 0) {
1764                 if (!preallocated) {
1765                     qcow2_free_clusters(bs, offset, s->cluster_size,
1766                                         QCOW2_DISCARD_ALWAYS);
1767                 }
1768                 goto fail;
1769             }
1770 
1771             if (l2_refcount == 1) {
1772                 l2_table[j] = cpu_to_be64(offset | QCOW_OFLAG_COPIED);
1773             } else {
1774                 l2_table[j] = cpu_to_be64(offset);
1775             }
1776             l2_dirty = true;
1777         }
1778 
1779         if (is_active_l1) {
1780             if (l2_dirty) {
1781                 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table);
1782                 qcow2_cache_depends_on_flush(s->l2_table_cache);
1783             }
1784             qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
1785         } else {
1786             if (l2_dirty) {
1787                 ret = qcow2_pre_write_overlap_check(bs,
1788                         QCOW2_OL_INACTIVE_L2 | QCOW2_OL_ACTIVE_L2, l2_offset,
1789                         s->cluster_size);
1790                 if (ret < 0) {
1791                     goto fail;
1792                 }
1793 
1794                 ret = bdrv_write(bs->file->bs, l2_offset / BDRV_SECTOR_SIZE,
1795                                  (void *)l2_table, s->cluster_sectors);
1796                 if (ret < 0) {
1797                     goto fail;
1798                 }
1799             }
1800         }
1801 
1802         (*visited_l1_entries)++;
1803         if (status_cb) {
1804             status_cb(bs, *visited_l1_entries, l1_entries);
1805         }
1806     }
1807 
1808     ret = 0;
1809 
1810 fail:
1811     if (l2_table) {
1812         if (!is_active_l1) {
1813             qemu_vfree(l2_table);
1814         } else {
1815             qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
1816         }
1817     }
1818     return ret;
1819 }
1820 
1821 /*
1822  * For backed images, expands all zero clusters on the image. For non-backed
1823  * images, deallocates all non-pre-allocated zero clusters (and claims the
1824  * allocation for pre-allocated ones). This is important for downgrading to a
1825  * qcow2 version which doesn't yet support metadata zero clusters.
1826  */
1827 int qcow2_expand_zero_clusters(BlockDriverState *bs,
1828                                BlockDriverAmendStatusCB *status_cb)
1829 {
1830     BDRVQcow2State *s = bs->opaque;
1831     uint64_t *l1_table = NULL;
1832     int64_t l1_entries = 0, visited_l1_entries = 0;
1833     int ret;
1834     int i, j;
1835 
1836     if (status_cb) {
1837         l1_entries = s->l1_size;
1838         for (i = 0; i < s->nb_snapshots; i++) {
1839             l1_entries += s->snapshots[i].l1_size;
1840         }
1841     }
1842 
1843     ret = expand_zero_clusters_in_l1(bs, s->l1_table, s->l1_size,
1844                                      &visited_l1_entries, l1_entries,
1845                                      status_cb);
1846     if (ret < 0) {
1847         goto fail;
1848     }
1849 
1850     /* Inactive L1 tables may point to active L2 tables - therefore it is
1851      * necessary to flush the L2 table cache before trying to access the L2
1852      * tables pointed to by inactive L1 entries (else we might try to expand
1853      * zero clusters that have already been expanded); furthermore, it is also
1854      * necessary to empty the L2 table cache, since it may contain tables which
1855      * are now going to be modified directly on disk, bypassing the cache.
1856      * qcow2_cache_empty() does both for us. */
1857     ret = qcow2_cache_empty(bs, s->l2_table_cache);
1858     if (ret < 0) {
1859         goto fail;
1860     }
1861 
1862     for (i = 0; i < s->nb_snapshots; i++) {
1863         int l1_sectors = (s->snapshots[i].l1_size * sizeof(uint64_t) +
1864                 BDRV_SECTOR_SIZE - 1) / BDRV_SECTOR_SIZE;
1865 
1866         l1_table = g_realloc(l1_table, l1_sectors * BDRV_SECTOR_SIZE);
1867 
1868         ret = bdrv_read(bs->file->bs,
1869                         s->snapshots[i].l1_table_offset / BDRV_SECTOR_SIZE,
1870                         (void *)l1_table, l1_sectors);
1871         if (ret < 0) {
1872             goto fail;
1873         }
1874 
1875         for (j = 0; j < s->snapshots[i].l1_size; j++) {
1876             be64_to_cpus(&l1_table[j]);
1877         }
1878 
1879         ret = expand_zero_clusters_in_l1(bs, l1_table, s->snapshots[i].l1_size,
1880                                          &visited_l1_entries, l1_entries,
1881                                          status_cb);
1882         if (ret < 0) {
1883             goto fail;
1884         }
1885     }
1886 
1887     ret = 0;
1888 
1889 fail:
1890     g_free(l1_table);
1891     return ret;
1892 }
1893