xref: /openbmc/qemu/block/qcow2-cluster.c (revision 1fd6bb44)
1 /*
2  * Block driver for the QCOW version 2 format
3  *
4  * Copyright (c) 2004-2006 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 #include <zlib.h>
26 
27 #include "qemu-common.h"
28 #include "block/block_int.h"
29 #include "block/qcow2.h"
30 #include "trace.h"
31 
32 int qcow2_grow_l1_table(BlockDriverState *bs, int min_size, bool exact_size)
33 {
34     BDRVQcowState *s = bs->opaque;
35     int new_l1_size, new_l1_size2, ret, i;
36     uint64_t *new_l1_table;
37     int64_t new_l1_table_offset;
38     uint8_t data[12];
39 
40     if (min_size <= s->l1_size)
41         return 0;
42 
43     if (exact_size) {
44         new_l1_size = min_size;
45     } else {
46         /* Bump size up to reduce the number of times we have to grow */
47         new_l1_size = s->l1_size;
48         if (new_l1_size == 0) {
49             new_l1_size = 1;
50         }
51         while (min_size > new_l1_size) {
52             new_l1_size = (new_l1_size * 3 + 1) / 2;
53         }
54     }
55 
56 #ifdef DEBUG_ALLOC2
57     fprintf(stderr, "grow l1_table from %d to %d\n", s->l1_size, new_l1_size);
58 #endif
59 
60     new_l1_size2 = sizeof(uint64_t) * new_l1_size;
61     new_l1_table = g_malloc0(align_offset(new_l1_size2, 512));
62     memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t));
63 
64     /* write new table (align to cluster) */
65     BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ALLOC_TABLE);
66     new_l1_table_offset = qcow2_alloc_clusters(bs, new_l1_size2);
67     if (new_l1_table_offset < 0) {
68         g_free(new_l1_table);
69         return new_l1_table_offset;
70     }
71 
72     ret = qcow2_cache_flush(bs, s->refcount_block_cache);
73     if (ret < 0) {
74         goto fail;
75     }
76 
77     BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_WRITE_TABLE);
78     for(i = 0; i < s->l1_size; i++)
79         new_l1_table[i] = cpu_to_be64(new_l1_table[i]);
80     ret = bdrv_pwrite_sync(bs->file, new_l1_table_offset, new_l1_table, new_l1_size2);
81     if (ret < 0)
82         goto fail;
83     for(i = 0; i < s->l1_size; i++)
84         new_l1_table[i] = be64_to_cpu(new_l1_table[i]);
85 
86     /* set new table */
87     BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ACTIVATE_TABLE);
88     cpu_to_be32w((uint32_t*)data, new_l1_size);
89     cpu_to_be64wu((uint64_t*)(data + 4), new_l1_table_offset);
90     ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_size), data,sizeof(data));
91     if (ret < 0) {
92         goto fail;
93     }
94     g_free(s->l1_table);
95     qcow2_free_clusters(bs, s->l1_table_offset, s->l1_size * sizeof(uint64_t));
96     s->l1_table_offset = new_l1_table_offset;
97     s->l1_table = new_l1_table;
98     s->l1_size = new_l1_size;
99     return 0;
100  fail:
101     g_free(new_l1_table);
102     qcow2_free_clusters(bs, new_l1_table_offset, new_l1_size2);
103     return ret;
104 }
105 
106 /*
107  * l2_load
108  *
109  * Loads a L2 table into memory. If the table is in the cache, the cache
110  * is used; otherwise the L2 table is loaded from the image file.
111  *
112  * Returns a pointer to the L2 table on success, or NULL if the read from
113  * the image file failed.
114  */
115 
116 static int l2_load(BlockDriverState *bs, uint64_t l2_offset,
117     uint64_t **l2_table)
118 {
119     BDRVQcowState *s = bs->opaque;
120     int ret;
121 
122     ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset, (void**) l2_table);
123 
124     return ret;
125 }
126 
127 /*
128  * Writes one sector of the L1 table to the disk (can't update single entries
129  * and we really don't want bdrv_pread to perform a read-modify-write)
130  */
131 #define L1_ENTRIES_PER_SECTOR (512 / 8)
132 static int write_l1_entry(BlockDriverState *bs, int l1_index)
133 {
134     BDRVQcowState *s = bs->opaque;
135     uint64_t buf[L1_ENTRIES_PER_SECTOR];
136     int l1_start_index;
137     int i, ret;
138 
139     l1_start_index = l1_index & ~(L1_ENTRIES_PER_SECTOR - 1);
140     for (i = 0; i < L1_ENTRIES_PER_SECTOR; i++) {
141         buf[i] = cpu_to_be64(s->l1_table[l1_start_index + i]);
142     }
143 
144     BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE);
145     ret = bdrv_pwrite_sync(bs->file, s->l1_table_offset + 8 * l1_start_index,
146         buf, sizeof(buf));
147     if (ret < 0) {
148         return ret;
149     }
150 
151     return 0;
152 }
153 
154 /*
155  * l2_allocate
156  *
157  * Allocate a new l2 entry in the file. If l1_index points to an already
158  * used entry in the L2 table (i.e. we are doing a copy on write for the L2
159  * table) copy the contents of the old L2 table into the newly allocated one.
160  * Otherwise the new table is initialized with zeros.
161  *
162  */
163 
164 static int l2_allocate(BlockDriverState *bs, int l1_index, uint64_t **table)
165 {
166     BDRVQcowState *s = bs->opaque;
167     uint64_t old_l2_offset;
168     uint64_t *l2_table;
169     int64_t l2_offset;
170     int ret;
171 
172     old_l2_offset = s->l1_table[l1_index];
173 
174     trace_qcow2_l2_allocate(bs, l1_index);
175 
176     /* allocate a new l2 entry */
177 
178     l2_offset = qcow2_alloc_clusters(bs, s->l2_size * sizeof(uint64_t));
179     if (l2_offset < 0) {
180         return l2_offset;
181     }
182 
183     ret = qcow2_cache_flush(bs, s->refcount_block_cache);
184     if (ret < 0) {
185         goto fail;
186     }
187 
188     /* allocate a new entry in the l2 cache */
189 
190     trace_qcow2_l2_allocate_get_empty(bs, l1_index);
191     ret = qcow2_cache_get_empty(bs, s->l2_table_cache, l2_offset, (void**) table);
192     if (ret < 0) {
193         return ret;
194     }
195 
196     l2_table = *table;
197 
198     if ((old_l2_offset & L1E_OFFSET_MASK) == 0) {
199         /* if there was no old l2 table, clear the new table */
200         memset(l2_table, 0, s->l2_size * sizeof(uint64_t));
201     } else {
202         uint64_t* old_table;
203 
204         /* if there was an old l2 table, read it from the disk */
205         BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_COW_READ);
206         ret = qcow2_cache_get(bs, s->l2_table_cache,
207             old_l2_offset & L1E_OFFSET_MASK,
208             (void**) &old_table);
209         if (ret < 0) {
210             goto fail;
211         }
212 
213         memcpy(l2_table, old_table, s->cluster_size);
214 
215         ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &old_table);
216         if (ret < 0) {
217             goto fail;
218         }
219     }
220 
221     /* write the l2 table to the file */
222     BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_WRITE);
223 
224     trace_qcow2_l2_allocate_write_l2(bs, l1_index);
225     qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
226     ret = qcow2_cache_flush(bs, s->l2_table_cache);
227     if (ret < 0) {
228         goto fail;
229     }
230 
231     /* update the L1 entry */
232     trace_qcow2_l2_allocate_write_l1(bs, l1_index);
233     s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED;
234     ret = write_l1_entry(bs, l1_index);
235     if (ret < 0) {
236         goto fail;
237     }
238 
239     *table = l2_table;
240     trace_qcow2_l2_allocate_done(bs, l1_index, 0);
241     return 0;
242 
243 fail:
244     trace_qcow2_l2_allocate_done(bs, l1_index, ret);
245     qcow2_cache_put(bs, s->l2_table_cache, (void**) table);
246     s->l1_table[l1_index] = old_l2_offset;
247     return ret;
248 }
249 
250 /*
251  * Checks how many clusters in a given L2 table are contiguous in the image
252  * file. As soon as one of the flags in the bitmask stop_flags changes compared
253  * to the first cluster, the search is stopped and the cluster is not counted
254  * as contiguous. (This allows it, for example, to stop at the first compressed
255  * cluster which may require a different handling)
256  */
257 static int count_contiguous_clusters(uint64_t nb_clusters, int cluster_size,
258         uint64_t *l2_table, uint64_t start, uint64_t stop_flags)
259 {
260     int i;
261     uint64_t mask = stop_flags | L2E_OFFSET_MASK;
262     uint64_t offset = be64_to_cpu(l2_table[0]) & mask;
263 
264     if (!offset)
265         return 0;
266 
267     for (i = start; i < start + nb_clusters; i++) {
268         uint64_t l2_entry = be64_to_cpu(l2_table[i]) & mask;
269         if (offset + (uint64_t) i * cluster_size != l2_entry) {
270             break;
271         }
272     }
273 
274 	return (i - start);
275 }
276 
277 static int count_contiguous_free_clusters(uint64_t nb_clusters, uint64_t *l2_table)
278 {
279     int i;
280 
281     for (i = 0; i < nb_clusters; i++) {
282         int type = qcow2_get_cluster_type(be64_to_cpu(l2_table[i]));
283 
284         if (type != QCOW2_CLUSTER_UNALLOCATED) {
285             break;
286         }
287     }
288 
289     return i;
290 }
291 
292 /* The crypt function is compatible with the linux cryptoloop
293    algorithm for < 4 GB images. NOTE: out_buf == in_buf is
294    supported */
295 void qcow2_encrypt_sectors(BDRVQcowState *s, int64_t sector_num,
296                            uint8_t *out_buf, const uint8_t *in_buf,
297                            int nb_sectors, int enc,
298                            const AES_KEY *key)
299 {
300     union {
301         uint64_t ll[2];
302         uint8_t b[16];
303     } ivec;
304     int i;
305 
306     for(i = 0; i < nb_sectors; i++) {
307         ivec.ll[0] = cpu_to_le64(sector_num);
308         ivec.ll[1] = 0;
309         AES_cbc_encrypt(in_buf, out_buf, 512, key,
310                         ivec.b, enc);
311         sector_num++;
312         in_buf += 512;
313         out_buf += 512;
314     }
315 }
316 
317 static int coroutine_fn copy_sectors(BlockDriverState *bs,
318                                      uint64_t start_sect,
319                                      uint64_t cluster_offset,
320                                      int n_start, int n_end)
321 {
322     BDRVQcowState *s = bs->opaque;
323     QEMUIOVector qiov;
324     struct iovec iov;
325     int n, ret;
326 
327     /*
328      * If this is the last cluster and it is only partially used, we must only
329      * copy until the end of the image, or bdrv_check_request will fail for the
330      * bdrv_read/write calls below.
331      */
332     if (start_sect + n_end > bs->total_sectors) {
333         n_end = bs->total_sectors - start_sect;
334     }
335 
336     n = n_end - n_start;
337     if (n <= 0) {
338         return 0;
339     }
340 
341     iov.iov_len = n * BDRV_SECTOR_SIZE;
342     iov.iov_base = qemu_blockalign(bs, iov.iov_len);
343 
344     qemu_iovec_init_external(&qiov, &iov, 1);
345 
346     BLKDBG_EVENT(bs->file, BLKDBG_COW_READ);
347 
348     /* Call .bdrv_co_readv() directly instead of using the public block-layer
349      * interface.  This avoids double I/O throttling and request tracking,
350      * which can lead to deadlock when block layer copy-on-read is enabled.
351      */
352     ret = bs->drv->bdrv_co_readv(bs, start_sect + n_start, n, &qiov);
353     if (ret < 0) {
354         goto out;
355     }
356 
357     if (s->crypt_method) {
358         qcow2_encrypt_sectors(s, start_sect + n_start,
359                         iov.iov_base, iov.iov_base, n, 1,
360                         &s->aes_encrypt_key);
361     }
362 
363     BLKDBG_EVENT(bs->file, BLKDBG_COW_WRITE);
364     ret = bdrv_co_writev(bs->file, (cluster_offset >> 9) + n_start, n, &qiov);
365     if (ret < 0) {
366         goto out;
367     }
368 
369     ret = 0;
370 out:
371     qemu_vfree(iov.iov_base);
372     return ret;
373 }
374 
375 
376 /*
377  * get_cluster_offset
378  *
379  * For a given offset of the disk image, find the cluster offset in
380  * qcow2 file. The offset is stored in *cluster_offset.
381  *
382  * on entry, *num is the number of contiguous sectors we'd like to
383  * access following offset.
384  *
385  * on exit, *num is the number of contiguous sectors we can read.
386  *
387  * Returns the cluster type (QCOW2_CLUSTER_*) on success, -errno in error
388  * cases.
389  */
390 int qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset,
391     int *num, uint64_t *cluster_offset)
392 {
393     BDRVQcowState *s = bs->opaque;
394     unsigned int l1_index, l2_index;
395     uint64_t l2_offset, *l2_table;
396     int l1_bits, c;
397     unsigned int index_in_cluster, nb_clusters;
398     uint64_t nb_available, nb_needed;
399     int ret;
400 
401     index_in_cluster = (offset >> 9) & (s->cluster_sectors - 1);
402     nb_needed = *num + index_in_cluster;
403 
404     l1_bits = s->l2_bits + s->cluster_bits;
405 
406     /* compute how many bytes there are between the offset and
407      * the end of the l1 entry
408      */
409 
410     nb_available = (1ULL << l1_bits) - (offset & ((1ULL << l1_bits) - 1));
411 
412     /* compute the number of available sectors */
413 
414     nb_available = (nb_available >> 9) + index_in_cluster;
415 
416     if (nb_needed > nb_available) {
417         nb_needed = nb_available;
418     }
419 
420     *cluster_offset = 0;
421 
422     /* seek the the l2 offset in the l1 table */
423 
424     l1_index = offset >> l1_bits;
425     if (l1_index >= s->l1_size) {
426         ret = QCOW2_CLUSTER_UNALLOCATED;
427         goto out;
428     }
429 
430     l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK;
431     if (!l2_offset) {
432         ret = QCOW2_CLUSTER_UNALLOCATED;
433         goto out;
434     }
435 
436     /* load the l2 table in memory */
437 
438     ret = l2_load(bs, l2_offset, &l2_table);
439     if (ret < 0) {
440         return ret;
441     }
442 
443     /* find the cluster offset for the given disk offset */
444 
445     l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);
446     *cluster_offset = be64_to_cpu(l2_table[l2_index]);
447     nb_clusters = size_to_clusters(s, nb_needed << 9);
448 
449     ret = qcow2_get_cluster_type(*cluster_offset);
450     switch (ret) {
451     case QCOW2_CLUSTER_COMPRESSED:
452         /* Compressed clusters can only be processed one by one */
453         c = 1;
454         *cluster_offset &= L2E_COMPRESSED_OFFSET_SIZE_MASK;
455         break;
456     case QCOW2_CLUSTER_ZERO:
457         if (s->qcow_version < 3) {
458             return -EIO;
459         }
460         c = count_contiguous_clusters(nb_clusters, s->cluster_size,
461                 &l2_table[l2_index], 0,
462                 QCOW_OFLAG_COMPRESSED | QCOW_OFLAG_ZERO);
463         *cluster_offset = 0;
464         break;
465     case QCOW2_CLUSTER_UNALLOCATED:
466         /* how many empty clusters ? */
467         c = count_contiguous_free_clusters(nb_clusters, &l2_table[l2_index]);
468         *cluster_offset = 0;
469         break;
470     case QCOW2_CLUSTER_NORMAL:
471         /* how many allocated clusters ? */
472         c = count_contiguous_clusters(nb_clusters, s->cluster_size,
473                 &l2_table[l2_index], 0,
474                 QCOW_OFLAG_COMPRESSED | QCOW_OFLAG_ZERO);
475         *cluster_offset &= L2E_OFFSET_MASK;
476         break;
477     default:
478         abort();
479     }
480 
481     qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
482 
483     nb_available = (c * s->cluster_sectors);
484 
485 out:
486     if (nb_available > nb_needed)
487         nb_available = nb_needed;
488 
489     *num = nb_available - index_in_cluster;
490 
491     return ret;
492 }
493 
494 /*
495  * get_cluster_table
496  *
497  * for a given disk offset, load (and allocate if needed)
498  * the l2 table.
499  *
500  * the l2 table offset in the qcow2 file and the cluster index
501  * in the l2 table are given to the caller.
502  *
503  * Returns 0 on success, -errno in failure case
504  */
505 static int get_cluster_table(BlockDriverState *bs, uint64_t offset,
506                              uint64_t **new_l2_table,
507                              int *new_l2_index)
508 {
509     BDRVQcowState *s = bs->opaque;
510     unsigned int l1_index, l2_index;
511     uint64_t l2_offset;
512     uint64_t *l2_table = NULL;
513     int ret;
514 
515     /* seek the the l2 offset in the l1 table */
516 
517     l1_index = offset >> (s->l2_bits + s->cluster_bits);
518     if (l1_index >= s->l1_size) {
519         ret = qcow2_grow_l1_table(bs, l1_index + 1, false);
520         if (ret < 0) {
521             return ret;
522         }
523     }
524 
525     l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK;
526 
527     /* seek the l2 table of the given l2 offset */
528 
529     if (s->l1_table[l1_index] & QCOW_OFLAG_COPIED) {
530         /* load the l2 table in memory */
531         ret = l2_load(bs, l2_offset, &l2_table);
532         if (ret < 0) {
533             return ret;
534         }
535     } else {
536         /* First allocate a new L2 table (and do COW if needed) */
537         ret = l2_allocate(bs, l1_index, &l2_table);
538         if (ret < 0) {
539             return ret;
540         }
541 
542         /* Then decrease the refcount of the old table */
543         if (l2_offset) {
544             qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t));
545         }
546     }
547 
548     /* find the cluster offset for the given disk offset */
549 
550     l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);
551 
552     *new_l2_table = l2_table;
553     *new_l2_index = l2_index;
554 
555     return 0;
556 }
557 
558 /*
559  * alloc_compressed_cluster_offset
560  *
561  * For a given offset of the disk image, return cluster offset in
562  * qcow2 file.
563  *
564  * If the offset is not found, allocate a new compressed cluster.
565  *
566  * Return the cluster offset if successful,
567  * Return 0, otherwise.
568  *
569  */
570 
571 uint64_t qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs,
572                                                uint64_t offset,
573                                                int compressed_size)
574 {
575     BDRVQcowState *s = bs->opaque;
576     int l2_index, ret;
577     uint64_t *l2_table;
578     int64_t cluster_offset;
579     int nb_csectors;
580 
581     ret = get_cluster_table(bs, offset, &l2_table, &l2_index);
582     if (ret < 0) {
583         return 0;
584     }
585 
586     /* Compression can't overwrite anything. Fail if the cluster was already
587      * allocated. */
588     cluster_offset = be64_to_cpu(l2_table[l2_index]);
589     if (cluster_offset & L2E_OFFSET_MASK) {
590         qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
591         return 0;
592     }
593 
594     cluster_offset = qcow2_alloc_bytes(bs, compressed_size);
595     if (cluster_offset < 0) {
596         qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
597         return 0;
598     }
599 
600     nb_csectors = ((cluster_offset + compressed_size - 1) >> 9) -
601                   (cluster_offset >> 9);
602 
603     cluster_offset |= QCOW_OFLAG_COMPRESSED |
604                       ((uint64_t)nb_csectors << s->csize_shift);
605 
606     /* update L2 table */
607 
608     /* compressed clusters never have the copied flag */
609 
610     BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED);
611     qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
612     l2_table[l2_index] = cpu_to_be64(cluster_offset);
613     ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
614     if (ret < 0) {
615         return 0;
616     }
617 
618     return cluster_offset;
619 }
620 
621 static int perform_cow(BlockDriverState *bs, QCowL2Meta *m, Qcow2COWRegion *r)
622 {
623     BDRVQcowState *s = bs->opaque;
624     int ret;
625 
626     if (r->nb_sectors == 0) {
627         return 0;
628     }
629 
630     qemu_co_mutex_unlock(&s->lock);
631     ret = copy_sectors(bs, m->offset / BDRV_SECTOR_SIZE, m->alloc_offset,
632                        r->offset / BDRV_SECTOR_SIZE,
633                        r->offset / BDRV_SECTOR_SIZE + r->nb_sectors);
634     qemu_co_mutex_lock(&s->lock);
635 
636     if (ret < 0) {
637         return ret;
638     }
639 
640     /*
641      * Before we update the L2 table to actually point to the new cluster, we
642      * need to be sure that the refcounts have been increased and COW was
643      * handled.
644      */
645     qcow2_cache_depends_on_flush(s->l2_table_cache);
646 
647     return 0;
648 }
649 
650 int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m)
651 {
652     BDRVQcowState *s = bs->opaque;
653     int i, j = 0, l2_index, ret;
654     uint64_t *old_cluster, *l2_table;
655     uint64_t cluster_offset = m->alloc_offset;
656 
657     trace_qcow2_cluster_link_l2(qemu_coroutine_self(), m->nb_clusters);
658     assert(m->nb_clusters > 0);
659 
660     old_cluster = g_malloc(m->nb_clusters * sizeof(uint64_t));
661 
662     /* copy content of unmodified sectors */
663     ret = perform_cow(bs, m, &m->cow_start);
664     if (ret < 0) {
665         goto err;
666     }
667 
668     ret = perform_cow(bs, m, &m->cow_end);
669     if (ret < 0) {
670         goto err;
671     }
672 
673     /* Update L2 table. */
674     if (s->use_lazy_refcounts) {
675         qcow2_mark_dirty(bs);
676     }
677     if (qcow2_need_accurate_refcounts(s)) {
678         qcow2_cache_set_dependency(bs, s->l2_table_cache,
679                                    s->refcount_block_cache);
680     }
681 
682     ret = get_cluster_table(bs, m->offset, &l2_table, &l2_index);
683     if (ret < 0) {
684         goto err;
685     }
686     qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
687 
688     for (i = 0; i < m->nb_clusters; i++) {
689         /* if two concurrent writes happen to the same unallocated cluster
690 	 * each write allocates separate cluster and writes data concurrently.
691 	 * The first one to complete updates l2 table with pointer to its
692 	 * cluster the second one has to do RMW (which is done above by
693 	 * copy_sectors()), update l2 table with its cluster pointer and free
694 	 * old cluster. This is what this loop does */
695         if(l2_table[l2_index + i] != 0)
696             old_cluster[j++] = l2_table[l2_index + i];
697 
698         l2_table[l2_index + i] = cpu_to_be64((cluster_offset +
699                     (i << s->cluster_bits)) | QCOW_OFLAG_COPIED);
700      }
701 
702 
703     ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
704     if (ret < 0) {
705         goto err;
706     }
707 
708     /*
709      * If this was a COW, we need to decrease the refcount of the old cluster.
710      * Also flush bs->file to get the right order for L2 and refcount update.
711      */
712     if (j != 0) {
713         for (i = 0; i < j; i++) {
714             qcow2_free_any_clusters(bs, be64_to_cpu(old_cluster[i]), 1);
715         }
716     }
717 
718     ret = 0;
719 err:
720     g_free(old_cluster);
721     return ret;
722  }
723 
724 /*
725  * Returns the number of contiguous clusters that can be used for an allocating
726  * write, but require COW to be performed (this includes yet unallocated space,
727  * which must copy from the backing file)
728  */
729 static int count_cow_clusters(BDRVQcowState *s, int nb_clusters,
730     uint64_t *l2_table, int l2_index)
731 {
732     int i;
733 
734     for (i = 0; i < nb_clusters; i++) {
735         uint64_t l2_entry = be64_to_cpu(l2_table[l2_index + i]);
736         int cluster_type = qcow2_get_cluster_type(l2_entry);
737 
738         switch(cluster_type) {
739         case QCOW2_CLUSTER_NORMAL:
740             if (l2_entry & QCOW_OFLAG_COPIED) {
741                 goto out;
742             }
743             break;
744         case QCOW2_CLUSTER_UNALLOCATED:
745         case QCOW2_CLUSTER_COMPRESSED:
746         case QCOW2_CLUSTER_ZERO:
747             break;
748         default:
749             abort();
750         }
751     }
752 
753 out:
754     assert(i <= nb_clusters);
755     return i;
756 }
757 
758 /*
759  * Check if there already is an AIO write request in flight which allocates
760  * the same cluster. In this case we need to wait until the previous
761  * request has completed and updated the L2 table accordingly.
762  *
763  * Returns:
764  *   0       if there was no dependency. *cur_bytes indicates the number of
765  *           bytes from guest_offset that can be read before the next
766  *           dependency must be processed (or the request is complete)
767  *
768  *   -EAGAIN if we had to wait for another request, previously gathered
769  *           information on cluster allocation may be invalid now. The caller
770  *           must start over anyway, so consider *cur_bytes undefined.
771  */
772 static int handle_dependencies(BlockDriverState *bs, uint64_t guest_offset,
773     uint64_t *cur_bytes, QCowL2Meta **m)
774 {
775     BDRVQcowState *s = bs->opaque;
776     QCowL2Meta *old_alloc;
777     uint64_t bytes = *cur_bytes;
778 
779     QLIST_FOREACH(old_alloc, &s->cluster_allocs, next_in_flight) {
780 
781         uint64_t start = guest_offset;
782         uint64_t end = start + bytes;
783         uint64_t old_start = l2meta_cow_start(old_alloc);
784         uint64_t old_end = l2meta_cow_end(old_alloc);
785 
786         if (end <= old_start || start >= old_end) {
787             /* No intersection */
788         } else {
789             if (start < old_start) {
790                 /* Stop at the start of a running allocation */
791                 bytes = old_start - start;
792             } else {
793                 bytes = 0;
794             }
795 
796             /* Stop if already an l2meta exists. After yielding, it wouldn't
797              * be valid any more, so we'd have to clean up the old L2Metas
798              * and deal with requests depending on them before starting to
799              * gather new ones. Not worth the trouble. */
800             if (bytes == 0 && *m) {
801                 *cur_bytes = 0;
802                 return 0;
803             }
804 
805             if (bytes == 0) {
806                 /* Wait for the dependency to complete. We need to recheck
807                  * the free/allocated clusters when we continue. */
808                 qemu_co_mutex_unlock(&s->lock);
809                 qemu_co_queue_wait(&old_alloc->dependent_requests);
810                 qemu_co_mutex_lock(&s->lock);
811                 return -EAGAIN;
812             }
813         }
814     }
815 
816     /* Make sure that existing clusters and new allocations are only used up to
817      * the next dependency if we shortened the request above */
818     *cur_bytes = bytes;
819 
820     return 0;
821 }
822 
823 /*
824  * Checks how many already allocated clusters that don't require a copy on
825  * write there are at the given guest_offset (up to *bytes). If
826  * *host_offset is not zero, only physically contiguous clusters beginning at
827  * this host offset are counted.
828  *
829  * Note that guest_offset may not be cluster aligned. In this case, the
830  * returned *host_offset points to exact byte referenced by guest_offset and
831  * therefore isn't cluster aligned as well.
832  *
833  * Returns:
834  *   0:     if no allocated clusters are available at the given offset.
835  *          *bytes is normally unchanged. It is set to 0 if the cluster
836  *          is allocated and doesn't need COW, but doesn't have the right
837  *          physical offset.
838  *
839  *   1:     if allocated clusters that don't require a COW are available at
840  *          the requested offset. *bytes may have decreased and describes
841  *          the length of the area that can be written to.
842  *
843  *  -errno: in error cases
844  */
845 static int handle_copied(BlockDriverState *bs, uint64_t guest_offset,
846     uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m)
847 {
848     BDRVQcowState *s = bs->opaque;
849     int l2_index;
850     uint64_t cluster_offset;
851     uint64_t *l2_table;
852     unsigned int nb_clusters;
853     unsigned int keep_clusters;
854     int ret, pret;
855 
856     trace_qcow2_handle_copied(qemu_coroutine_self(), guest_offset, *host_offset,
857                               *bytes);
858 
859     assert(*host_offset == 0 ||    offset_into_cluster(s, guest_offset)
860                                 == offset_into_cluster(s, *host_offset));
861 
862     /*
863      * Calculate the number of clusters to look for. We stop at L2 table
864      * boundaries to keep things simple.
865      */
866     nb_clusters =
867         size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes);
868 
869     l2_index = offset_to_l2_index(s, guest_offset);
870     nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
871 
872     /* Find L2 entry for the first involved cluster */
873     ret = get_cluster_table(bs, guest_offset, &l2_table, &l2_index);
874     if (ret < 0) {
875         return ret;
876     }
877 
878     cluster_offset = be64_to_cpu(l2_table[l2_index]);
879 
880     /* Check how many clusters are already allocated and don't need COW */
881     if (qcow2_get_cluster_type(cluster_offset) == QCOW2_CLUSTER_NORMAL
882         && (cluster_offset & QCOW_OFLAG_COPIED))
883     {
884         /* If a specific host_offset is required, check it */
885         bool offset_matches =
886             (cluster_offset & L2E_OFFSET_MASK) == *host_offset;
887 
888         if (*host_offset != 0 && !offset_matches) {
889             *bytes = 0;
890             ret = 0;
891             goto out;
892         }
893 
894         /* We keep all QCOW_OFLAG_COPIED clusters */
895         keep_clusters =
896             count_contiguous_clusters(nb_clusters, s->cluster_size,
897                                       &l2_table[l2_index], 0,
898                                       QCOW_OFLAG_COPIED | QCOW_OFLAG_ZERO);
899         assert(keep_clusters <= nb_clusters);
900 
901         *bytes = MIN(*bytes,
902                  keep_clusters * s->cluster_size
903                  - offset_into_cluster(s, guest_offset));
904 
905         ret = 1;
906     } else {
907         ret = 0;
908     }
909 
910     /* Cleanup */
911 out:
912     pret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
913     if (pret < 0) {
914         return pret;
915     }
916 
917     /* Only return a host offset if we actually made progress. Otherwise we
918      * would make requirements for handle_alloc() that it can't fulfill */
919     if (ret) {
920         *host_offset = (cluster_offset & L2E_OFFSET_MASK)
921                      + offset_into_cluster(s, guest_offset);
922     }
923 
924     return ret;
925 }
926 
927 /*
928  * Allocates new clusters for the given guest_offset.
929  *
930  * At most *nb_clusters are allocated, and on return *nb_clusters is updated to
931  * contain the number of clusters that have been allocated and are contiguous
932  * in the image file.
933  *
934  * If *host_offset is non-zero, it specifies the offset in the image file at
935  * which the new clusters must start. *nb_clusters can be 0 on return in this
936  * case if the cluster at host_offset is already in use. If *host_offset is
937  * zero, the clusters can be allocated anywhere in the image file.
938  *
939  * *host_offset is updated to contain the offset into the image file at which
940  * the first allocated cluster starts.
941  *
942  * Return 0 on success and -errno in error cases. -EAGAIN means that the
943  * function has been waiting for another request and the allocation must be
944  * restarted, but the whole request should not be failed.
945  */
946 static int do_alloc_cluster_offset(BlockDriverState *bs, uint64_t guest_offset,
947     uint64_t *host_offset, unsigned int *nb_clusters)
948 {
949     BDRVQcowState *s = bs->opaque;
950 
951     trace_qcow2_do_alloc_clusters_offset(qemu_coroutine_self(), guest_offset,
952                                          *host_offset, *nb_clusters);
953 
954     /* Allocate new clusters */
955     trace_qcow2_cluster_alloc_phys(qemu_coroutine_self());
956     if (*host_offset == 0) {
957         int64_t cluster_offset =
958             qcow2_alloc_clusters(bs, *nb_clusters * s->cluster_size);
959         if (cluster_offset < 0) {
960             return cluster_offset;
961         }
962         *host_offset = cluster_offset;
963         return 0;
964     } else {
965         int ret = qcow2_alloc_clusters_at(bs, *host_offset, *nb_clusters);
966         if (ret < 0) {
967             return ret;
968         }
969         *nb_clusters = ret;
970         return 0;
971     }
972 }
973 
974 /*
975  * Allocates new clusters for an area that either is yet unallocated or needs a
976  * copy on write. If *host_offset is non-zero, clusters are only allocated if
977  * the new allocation can match the specified host offset.
978  *
979  * Note that guest_offset may not be cluster aligned. In this case, the
980  * returned *host_offset points to exact byte referenced by guest_offset and
981  * therefore isn't cluster aligned as well.
982  *
983  * Returns:
984  *   0:     if no clusters could be allocated. *bytes is set to 0,
985  *          *host_offset is left unchanged.
986  *
987  *   1:     if new clusters were allocated. *bytes may be decreased if the
988  *          new allocation doesn't cover all of the requested area.
989  *          *host_offset is updated to contain the host offset of the first
990  *          newly allocated cluster.
991  *
992  *  -errno: in error cases
993  */
994 static int handle_alloc(BlockDriverState *bs, uint64_t guest_offset,
995     uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m)
996 {
997     BDRVQcowState *s = bs->opaque;
998     int l2_index;
999     uint64_t *l2_table;
1000     uint64_t entry;
1001     unsigned int nb_clusters;
1002     int ret;
1003 
1004     uint64_t alloc_cluster_offset;
1005 
1006     trace_qcow2_handle_alloc(qemu_coroutine_self(), guest_offset, *host_offset,
1007                              *bytes);
1008     assert(*bytes > 0);
1009 
1010     /*
1011      * Calculate the number of clusters to look for. We stop at L2 table
1012      * boundaries to keep things simple.
1013      */
1014     nb_clusters =
1015         size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes);
1016 
1017     l2_index = offset_to_l2_index(s, guest_offset);
1018     nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
1019 
1020     /* Find L2 entry for the first involved cluster */
1021     ret = get_cluster_table(bs, guest_offset, &l2_table, &l2_index);
1022     if (ret < 0) {
1023         return ret;
1024     }
1025 
1026     entry = be64_to_cpu(l2_table[l2_index]);
1027 
1028     /* For the moment, overwrite compressed clusters one by one */
1029     if (entry & QCOW_OFLAG_COMPRESSED) {
1030         nb_clusters = 1;
1031     } else {
1032         nb_clusters = count_cow_clusters(s, nb_clusters, l2_table, l2_index);
1033     }
1034 
1035     /* This function is only called when there were no non-COW clusters, so if
1036      * we can't find any unallocated or COW clusters either, something is
1037      * wrong with our code. */
1038     assert(nb_clusters > 0);
1039 
1040     ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
1041     if (ret < 0) {
1042         return ret;
1043     }
1044 
1045     /* Allocate, if necessary at a given offset in the image file */
1046     alloc_cluster_offset = start_of_cluster(s, *host_offset);
1047     ret = do_alloc_cluster_offset(bs, guest_offset, &alloc_cluster_offset,
1048                                   &nb_clusters);
1049     if (ret < 0) {
1050         goto fail;
1051     }
1052 
1053     /* Can't extend contiguous allocation */
1054     if (nb_clusters == 0) {
1055         *bytes = 0;
1056         return 0;
1057     }
1058 
1059     /*
1060      * Save info needed for meta data update.
1061      *
1062      * requested_sectors: Number of sectors from the start of the first
1063      * newly allocated cluster to the end of the (possibly shortened
1064      * before) write request.
1065      *
1066      * avail_sectors: Number of sectors from the start of the first
1067      * newly allocated to the end of the last newly allocated cluster.
1068      *
1069      * nb_sectors: The number of sectors from the start of the first
1070      * newly allocated cluster to the end of the area that the write
1071      * request actually writes to (excluding COW at the end)
1072      */
1073     int requested_sectors =
1074         (*bytes + offset_into_cluster(s, guest_offset))
1075         >> BDRV_SECTOR_BITS;
1076     int avail_sectors = nb_clusters
1077                         << (s->cluster_bits - BDRV_SECTOR_BITS);
1078     int alloc_n_start = offset_into_cluster(s, guest_offset)
1079                         >> BDRV_SECTOR_BITS;
1080     int nb_sectors = MIN(requested_sectors, avail_sectors);
1081     QCowL2Meta *old_m = *m;
1082 
1083     *m = g_malloc0(sizeof(**m));
1084 
1085     **m = (QCowL2Meta) {
1086         .next           = old_m,
1087 
1088         .alloc_offset   = alloc_cluster_offset,
1089         .offset         = start_of_cluster(s, guest_offset),
1090         .nb_clusters    = nb_clusters,
1091         .nb_available   = nb_sectors,
1092 
1093         .cow_start = {
1094             .offset     = 0,
1095             .nb_sectors = alloc_n_start,
1096         },
1097         .cow_end = {
1098             .offset     = nb_sectors * BDRV_SECTOR_SIZE,
1099             .nb_sectors = avail_sectors - nb_sectors,
1100         },
1101     };
1102     qemu_co_queue_init(&(*m)->dependent_requests);
1103     QLIST_INSERT_HEAD(&s->cluster_allocs, *m, next_in_flight);
1104 
1105     *host_offset = alloc_cluster_offset + offset_into_cluster(s, guest_offset);
1106     *bytes = MIN(*bytes, (nb_sectors * BDRV_SECTOR_SIZE)
1107                          - offset_into_cluster(s, guest_offset));
1108     assert(*bytes != 0);
1109 
1110     return 1;
1111 
1112 fail:
1113     if (*m && (*m)->nb_clusters > 0) {
1114         QLIST_REMOVE(*m, next_in_flight);
1115     }
1116     return ret;
1117 }
1118 
1119 /*
1120  * alloc_cluster_offset
1121  *
1122  * For a given offset on the virtual disk, find the cluster offset in qcow2
1123  * file. If the offset is not found, allocate a new cluster.
1124  *
1125  * If the cluster was already allocated, m->nb_clusters is set to 0 and
1126  * other fields in m are meaningless.
1127  *
1128  * If the cluster is newly allocated, m->nb_clusters is set to the number of
1129  * contiguous clusters that have been allocated. In this case, the other
1130  * fields of m are valid and contain information about the first allocated
1131  * cluster.
1132  *
1133  * If the request conflicts with another write request in flight, the coroutine
1134  * is queued and will be reentered when the dependency has completed.
1135  *
1136  * Return 0 on success and -errno in error cases
1137  */
1138 int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset,
1139     int n_start, int n_end, int *num, uint64_t *host_offset, QCowL2Meta **m)
1140 {
1141     BDRVQcowState *s = bs->opaque;
1142     uint64_t start, remaining;
1143     uint64_t cluster_offset;
1144     uint64_t cur_bytes;
1145     int ret;
1146 
1147     trace_qcow2_alloc_clusters_offset(qemu_coroutine_self(), offset,
1148                                       n_start, n_end);
1149 
1150     assert(n_start * BDRV_SECTOR_SIZE == offset_into_cluster(s, offset));
1151     offset = start_of_cluster(s, offset);
1152 
1153 again:
1154     start = offset + (n_start << BDRV_SECTOR_BITS);
1155     remaining = (n_end - n_start) << BDRV_SECTOR_BITS;
1156     cluster_offset = 0;
1157     *host_offset = 0;
1158     cur_bytes = 0;
1159     *m = NULL;
1160 
1161     while (true) {
1162 
1163         if (!*host_offset) {
1164             *host_offset = start_of_cluster(s, cluster_offset);
1165         }
1166 
1167         assert(remaining >= cur_bytes);
1168 
1169         start           += cur_bytes;
1170         remaining       -= cur_bytes;
1171         cluster_offset  += cur_bytes;
1172 
1173         if (remaining == 0) {
1174             break;
1175         }
1176 
1177         cur_bytes = remaining;
1178 
1179         /*
1180          * Now start gathering as many contiguous clusters as possible:
1181          *
1182          * 1. Check for overlaps with in-flight allocations
1183          *
1184          *      a) Overlap not in the first cluster -> shorten this request and
1185          *         let the caller handle the rest in its next loop iteration.
1186          *
1187          *      b) Real overlaps of two requests. Yield and restart the search
1188          *         for contiguous clusters (the situation could have changed
1189          *         while we were sleeping)
1190          *
1191          *      c) TODO: Request starts in the same cluster as the in-flight
1192          *         allocation ends. Shorten the COW of the in-fight allocation,
1193          *         set cluster_offset to write to the same cluster and set up
1194          *         the right synchronisation between the in-flight request and
1195          *         the new one.
1196          */
1197         ret = handle_dependencies(bs, start, &cur_bytes, m);
1198         if (ret == -EAGAIN) {
1199             /* Currently handle_dependencies() doesn't yield if we already had
1200              * an allocation. If it did, we would have to clean up the L2Meta
1201              * structs before starting over. */
1202             assert(*m == NULL);
1203             goto again;
1204         } else if (ret < 0) {
1205             return ret;
1206         } else if (cur_bytes == 0) {
1207             break;
1208         } else {
1209             /* handle_dependencies() may have decreased cur_bytes (shortened
1210              * the allocations below) so that the next dependency is processed
1211              * correctly during the next loop iteration. */
1212         }
1213 
1214         /*
1215          * 2. Count contiguous COPIED clusters.
1216          */
1217         ret = handle_copied(bs, start, &cluster_offset, &cur_bytes, m);
1218         if (ret < 0) {
1219             return ret;
1220         } else if (ret) {
1221             continue;
1222         } else if (cur_bytes == 0) {
1223             break;
1224         }
1225 
1226         /*
1227          * 3. If the request still hasn't completed, allocate new clusters,
1228          *    considering any cluster_offset of steps 1c or 2.
1229          */
1230         ret = handle_alloc(bs, start, &cluster_offset, &cur_bytes, m);
1231         if (ret < 0) {
1232             return ret;
1233         } else if (ret) {
1234             continue;
1235         } else {
1236             assert(cur_bytes == 0);
1237             break;
1238         }
1239     }
1240 
1241     *num = (n_end - n_start) - (remaining >> BDRV_SECTOR_BITS);
1242     assert(*num > 0);
1243     assert(*host_offset != 0);
1244 
1245     return 0;
1246 }
1247 
1248 static int decompress_buffer(uint8_t *out_buf, int out_buf_size,
1249                              const uint8_t *buf, int buf_size)
1250 {
1251     z_stream strm1, *strm = &strm1;
1252     int ret, out_len;
1253 
1254     memset(strm, 0, sizeof(*strm));
1255 
1256     strm->next_in = (uint8_t *)buf;
1257     strm->avail_in = buf_size;
1258     strm->next_out = out_buf;
1259     strm->avail_out = out_buf_size;
1260 
1261     ret = inflateInit2(strm, -12);
1262     if (ret != Z_OK)
1263         return -1;
1264     ret = inflate(strm, Z_FINISH);
1265     out_len = strm->next_out - out_buf;
1266     if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) ||
1267         out_len != out_buf_size) {
1268         inflateEnd(strm);
1269         return -1;
1270     }
1271     inflateEnd(strm);
1272     return 0;
1273 }
1274 
1275 int qcow2_decompress_cluster(BlockDriverState *bs, uint64_t cluster_offset)
1276 {
1277     BDRVQcowState *s = bs->opaque;
1278     int ret, csize, nb_csectors, sector_offset;
1279     uint64_t coffset;
1280 
1281     coffset = cluster_offset & s->cluster_offset_mask;
1282     if (s->cluster_cache_offset != coffset) {
1283         nb_csectors = ((cluster_offset >> s->csize_shift) & s->csize_mask) + 1;
1284         sector_offset = coffset & 511;
1285         csize = nb_csectors * 512 - sector_offset;
1286         BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED);
1287         ret = bdrv_read(bs->file, coffset >> 9, s->cluster_data, nb_csectors);
1288         if (ret < 0) {
1289             return ret;
1290         }
1291         if (decompress_buffer(s->cluster_cache, s->cluster_size,
1292                               s->cluster_data + sector_offset, csize) < 0) {
1293             return -EIO;
1294         }
1295         s->cluster_cache_offset = coffset;
1296     }
1297     return 0;
1298 }
1299 
1300 /*
1301  * This discards as many clusters of nb_clusters as possible at once (i.e.
1302  * all clusters in the same L2 table) and returns the number of discarded
1303  * clusters.
1304  */
1305 static int discard_single_l2(BlockDriverState *bs, uint64_t offset,
1306     unsigned int nb_clusters)
1307 {
1308     BDRVQcowState *s = bs->opaque;
1309     uint64_t *l2_table;
1310     int l2_index;
1311     int ret;
1312     int i;
1313 
1314     ret = get_cluster_table(bs, offset, &l2_table, &l2_index);
1315     if (ret < 0) {
1316         return ret;
1317     }
1318 
1319     /* Limit nb_clusters to one L2 table */
1320     nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
1321 
1322     for (i = 0; i < nb_clusters; i++) {
1323         uint64_t old_offset;
1324 
1325         old_offset = be64_to_cpu(l2_table[l2_index + i]);
1326         if ((old_offset & L2E_OFFSET_MASK) == 0) {
1327             continue;
1328         }
1329 
1330         /* First remove L2 entries */
1331         qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
1332         l2_table[l2_index + i] = cpu_to_be64(0);
1333 
1334         /* Then decrease the refcount */
1335         qcow2_free_any_clusters(bs, old_offset, 1);
1336     }
1337 
1338     ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
1339     if (ret < 0) {
1340         return ret;
1341     }
1342 
1343     return nb_clusters;
1344 }
1345 
1346 int qcow2_discard_clusters(BlockDriverState *bs, uint64_t offset,
1347     int nb_sectors)
1348 {
1349     BDRVQcowState *s = bs->opaque;
1350     uint64_t end_offset;
1351     unsigned int nb_clusters;
1352     int ret;
1353 
1354     end_offset = offset + (nb_sectors << BDRV_SECTOR_BITS);
1355 
1356     /* Round start up and end down */
1357     offset = align_offset(offset, s->cluster_size);
1358     end_offset &= ~(s->cluster_size - 1);
1359 
1360     if (offset > end_offset) {
1361         return 0;
1362     }
1363 
1364     nb_clusters = size_to_clusters(s, end_offset - offset);
1365 
1366     /* Each L2 table is handled by its own loop iteration */
1367     while (nb_clusters > 0) {
1368         ret = discard_single_l2(bs, offset, nb_clusters);
1369         if (ret < 0) {
1370             return ret;
1371         }
1372 
1373         nb_clusters -= ret;
1374         offset += (ret * s->cluster_size);
1375     }
1376 
1377     return 0;
1378 }
1379 
1380 /*
1381  * This zeroes as many clusters of nb_clusters as possible at once (i.e.
1382  * all clusters in the same L2 table) and returns the number of zeroed
1383  * clusters.
1384  */
1385 static int zero_single_l2(BlockDriverState *bs, uint64_t offset,
1386     unsigned int nb_clusters)
1387 {
1388     BDRVQcowState *s = bs->opaque;
1389     uint64_t *l2_table;
1390     int l2_index;
1391     int ret;
1392     int i;
1393 
1394     ret = get_cluster_table(bs, offset, &l2_table, &l2_index);
1395     if (ret < 0) {
1396         return ret;
1397     }
1398 
1399     /* Limit nb_clusters to one L2 table */
1400     nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
1401 
1402     for (i = 0; i < nb_clusters; i++) {
1403         uint64_t old_offset;
1404 
1405         old_offset = be64_to_cpu(l2_table[l2_index + i]);
1406 
1407         /* Update L2 entries */
1408         qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
1409         if (old_offset & QCOW_OFLAG_COMPRESSED) {
1410             l2_table[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO);
1411             qcow2_free_any_clusters(bs, old_offset, 1);
1412         } else {
1413             l2_table[l2_index + i] |= cpu_to_be64(QCOW_OFLAG_ZERO);
1414         }
1415     }
1416 
1417     ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
1418     if (ret < 0) {
1419         return ret;
1420     }
1421 
1422     return nb_clusters;
1423 }
1424 
1425 int qcow2_zero_clusters(BlockDriverState *bs, uint64_t offset, int nb_sectors)
1426 {
1427     BDRVQcowState *s = bs->opaque;
1428     unsigned int nb_clusters;
1429     int ret;
1430 
1431     /* The zero flag is only supported by version 3 and newer */
1432     if (s->qcow_version < 3) {
1433         return -ENOTSUP;
1434     }
1435 
1436     /* Each L2 table is handled by its own loop iteration */
1437     nb_clusters = size_to_clusters(s, nb_sectors << BDRV_SECTOR_BITS);
1438 
1439     while (nb_clusters > 0) {
1440         ret = zero_single_l2(bs, offset, nb_clusters);
1441         if (ret < 0) {
1442             return ret;
1443         }
1444 
1445         nb_clusters -= ret;
1446         offset += (ret * s->cluster_size);
1447     }
1448 
1449     return 0;
1450 }
1451