xref: /openbmc/qemu/block/qcow2-cluster.c (revision 8c1f72da)
1 /*
2  * Block driver for the QCOW version 2 format
3  *
4  * Copyright (c) 2004-2006 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 #include <zlib.h>
26 
27 #include "qemu-common.h"
28 #include "block/block_int.h"
29 #include "block/qcow2.h"
30 #include "trace.h"
31 
32 int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size,
33                         bool exact_size)
34 {
35     BDRVQcowState *s = bs->opaque;
36     int new_l1_size2, ret, i;
37     uint64_t *new_l1_table;
38     int64_t new_l1_table_offset, new_l1_size;
39     uint8_t data[12];
40 
41     if (min_size <= s->l1_size)
42         return 0;
43 
44     if (exact_size) {
45         new_l1_size = min_size;
46     } else {
47         /* Bump size up to reduce the number of times we have to grow */
48         new_l1_size = s->l1_size;
49         if (new_l1_size == 0) {
50             new_l1_size = 1;
51         }
52         while (min_size > new_l1_size) {
53             new_l1_size = (new_l1_size * 3 + 1) / 2;
54         }
55     }
56 
57     if (new_l1_size > INT_MAX) {
58         return -EFBIG;
59     }
60 
61 #ifdef DEBUG_ALLOC2
62     fprintf(stderr, "grow l1_table from %d to %" PRId64 "\n",
63             s->l1_size, new_l1_size);
64 #endif
65 
66     new_l1_size2 = sizeof(uint64_t) * new_l1_size;
67     new_l1_table = g_malloc0(align_offset(new_l1_size2, 512));
68     memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t));
69 
70     /* write new table (align to cluster) */
71     BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ALLOC_TABLE);
72     new_l1_table_offset = qcow2_alloc_clusters(bs, new_l1_size2);
73     if (new_l1_table_offset < 0) {
74         g_free(new_l1_table);
75         return new_l1_table_offset;
76     }
77 
78     ret = qcow2_cache_flush(bs, s->refcount_block_cache);
79     if (ret < 0) {
80         goto fail;
81     }
82 
83     BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_WRITE_TABLE);
84     for(i = 0; i < s->l1_size; i++)
85         new_l1_table[i] = cpu_to_be64(new_l1_table[i]);
86     ret = bdrv_pwrite_sync(bs->file, new_l1_table_offset, new_l1_table, new_l1_size2);
87     if (ret < 0)
88         goto fail;
89     for(i = 0; i < s->l1_size; i++)
90         new_l1_table[i] = be64_to_cpu(new_l1_table[i]);
91 
92     /* set new table */
93     BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ACTIVATE_TABLE);
94     cpu_to_be32w((uint32_t*)data, new_l1_size);
95     cpu_to_be64wu((uint64_t*)(data + 4), new_l1_table_offset);
96     ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_size), data,sizeof(data));
97     if (ret < 0) {
98         goto fail;
99     }
100     g_free(s->l1_table);
101     qcow2_free_clusters(bs, s->l1_table_offset, s->l1_size * sizeof(uint64_t));
102     s->l1_table_offset = new_l1_table_offset;
103     s->l1_table = new_l1_table;
104     s->l1_size = new_l1_size;
105     return 0;
106  fail:
107     g_free(new_l1_table);
108     qcow2_free_clusters(bs, new_l1_table_offset, new_l1_size2);
109     return ret;
110 }
111 
112 /*
113  * l2_load
114  *
115  * Loads a L2 table into memory. If the table is in the cache, the cache
116  * is used; otherwise the L2 table is loaded from the image file.
117  *
118  * Returns a pointer to the L2 table on success, or NULL if the read from
119  * the image file failed.
120  */
121 
122 static int l2_load(BlockDriverState *bs, uint64_t l2_offset,
123     uint64_t **l2_table)
124 {
125     BDRVQcowState *s = bs->opaque;
126     int ret;
127 
128     ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset, (void**) l2_table);
129 
130     return ret;
131 }
132 
133 /*
134  * Writes one sector of the L1 table to the disk (can't update single entries
135  * and we really don't want bdrv_pread to perform a read-modify-write)
136  */
137 #define L1_ENTRIES_PER_SECTOR (512 / 8)
138 static int write_l1_entry(BlockDriverState *bs, int l1_index)
139 {
140     BDRVQcowState *s = bs->opaque;
141     uint64_t buf[L1_ENTRIES_PER_SECTOR];
142     int l1_start_index;
143     int i, ret;
144 
145     l1_start_index = l1_index & ~(L1_ENTRIES_PER_SECTOR - 1);
146     for (i = 0; i < L1_ENTRIES_PER_SECTOR; i++) {
147         buf[i] = cpu_to_be64(s->l1_table[l1_start_index + i]);
148     }
149 
150     BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE);
151     ret = bdrv_pwrite_sync(bs->file, s->l1_table_offset + 8 * l1_start_index,
152         buf, sizeof(buf));
153     if (ret < 0) {
154         return ret;
155     }
156 
157     return 0;
158 }
159 
160 /*
161  * l2_allocate
162  *
163  * Allocate a new l2 entry in the file. If l1_index points to an already
164  * used entry in the L2 table (i.e. we are doing a copy on write for the L2
165  * table) copy the contents of the old L2 table into the newly allocated one.
166  * Otherwise the new table is initialized with zeros.
167  *
168  */
169 
170 static int l2_allocate(BlockDriverState *bs, int l1_index, uint64_t **table)
171 {
172     BDRVQcowState *s = bs->opaque;
173     uint64_t old_l2_offset;
174     uint64_t *l2_table;
175     int64_t l2_offset;
176     int ret;
177 
178     old_l2_offset = s->l1_table[l1_index];
179 
180     trace_qcow2_l2_allocate(bs, l1_index);
181 
182     /* allocate a new l2 entry */
183 
184     l2_offset = qcow2_alloc_clusters(bs, s->l2_size * sizeof(uint64_t));
185     if (l2_offset < 0) {
186         return l2_offset;
187     }
188 
189     ret = qcow2_cache_flush(bs, s->refcount_block_cache);
190     if (ret < 0) {
191         goto fail;
192     }
193 
194     /* allocate a new entry in the l2 cache */
195 
196     trace_qcow2_l2_allocate_get_empty(bs, l1_index);
197     ret = qcow2_cache_get_empty(bs, s->l2_table_cache, l2_offset, (void**) table);
198     if (ret < 0) {
199         return ret;
200     }
201 
202     l2_table = *table;
203 
204     if ((old_l2_offset & L1E_OFFSET_MASK) == 0) {
205         /* if there was no old l2 table, clear the new table */
206         memset(l2_table, 0, s->l2_size * sizeof(uint64_t));
207     } else {
208         uint64_t* old_table;
209 
210         /* if there was an old l2 table, read it from the disk */
211         BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_COW_READ);
212         ret = qcow2_cache_get(bs, s->l2_table_cache,
213             old_l2_offset & L1E_OFFSET_MASK,
214             (void**) &old_table);
215         if (ret < 0) {
216             goto fail;
217         }
218 
219         memcpy(l2_table, old_table, s->cluster_size);
220 
221         ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &old_table);
222         if (ret < 0) {
223             goto fail;
224         }
225     }
226 
227     /* write the l2 table to the file */
228     BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_WRITE);
229 
230     trace_qcow2_l2_allocate_write_l2(bs, l1_index);
231     qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
232     ret = qcow2_cache_flush(bs, s->l2_table_cache);
233     if (ret < 0) {
234         goto fail;
235     }
236 
237     /* update the L1 entry */
238     trace_qcow2_l2_allocate_write_l1(bs, l1_index);
239     s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED;
240     ret = write_l1_entry(bs, l1_index);
241     if (ret < 0) {
242         goto fail;
243     }
244 
245     *table = l2_table;
246     trace_qcow2_l2_allocate_done(bs, l1_index, 0);
247     return 0;
248 
249 fail:
250     trace_qcow2_l2_allocate_done(bs, l1_index, ret);
251     qcow2_cache_put(bs, s->l2_table_cache, (void**) table);
252     s->l1_table[l1_index] = old_l2_offset;
253     return ret;
254 }
255 
256 /*
257  * Checks how many clusters in a given L2 table are contiguous in the image
258  * file. As soon as one of the flags in the bitmask stop_flags changes compared
259  * to the first cluster, the search is stopped and the cluster is not counted
260  * as contiguous. (This allows it, for example, to stop at the first compressed
261  * cluster which may require a different handling)
262  */
263 static int count_contiguous_clusters(uint64_t nb_clusters, int cluster_size,
264         uint64_t *l2_table, uint64_t start, uint64_t stop_flags)
265 {
266     int i;
267     uint64_t mask = stop_flags | L2E_OFFSET_MASK;
268     uint64_t offset = be64_to_cpu(l2_table[0]) & mask;
269 
270     if (!offset)
271         return 0;
272 
273     for (i = start; i < start + nb_clusters; i++) {
274         uint64_t l2_entry = be64_to_cpu(l2_table[i]) & mask;
275         if (offset + (uint64_t) i * cluster_size != l2_entry) {
276             break;
277         }
278     }
279 
280 	return (i - start);
281 }
282 
283 static int count_contiguous_free_clusters(uint64_t nb_clusters, uint64_t *l2_table)
284 {
285     int i;
286 
287     for (i = 0; i < nb_clusters; i++) {
288         int type = qcow2_get_cluster_type(be64_to_cpu(l2_table[i]));
289 
290         if (type != QCOW2_CLUSTER_UNALLOCATED) {
291             break;
292         }
293     }
294 
295     return i;
296 }
297 
298 /* The crypt function is compatible with the linux cryptoloop
299    algorithm for < 4 GB images. NOTE: out_buf == in_buf is
300    supported */
301 void qcow2_encrypt_sectors(BDRVQcowState *s, int64_t sector_num,
302                            uint8_t *out_buf, const uint8_t *in_buf,
303                            int nb_sectors, int enc,
304                            const AES_KEY *key)
305 {
306     union {
307         uint64_t ll[2];
308         uint8_t b[16];
309     } ivec;
310     int i;
311 
312     for(i = 0; i < nb_sectors; i++) {
313         ivec.ll[0] = cpu_to_le64(sector_num);
314         ivec.ll[1] = 0;
315         AES_cbc_encrypt(in_buf, out_buf, 512, key,
316                         ivec.b, enc);
317         sector_num++;
318         in_buf += 512;
319         out_buf += 512;
320     }
321 }
322 
323 static int coroutine_fn copy_sectors(BlockDriverState *bs,
324                                      uint64_t start_sect,
325                                      uint64_t cluster_offset,
326                                      int n_start, int n_end)
327 {
328     BDRVQcowState *s = bs->opaque;
329     QEMUIOVector qiov;
330     struct iovec iov;
331     int n, ret;
332 
333     /*
334      * If this is the last cluster and it is only partially used, we must only
335      * copy until the end of the image, or bdrv_check_request will fail for the
336      * bdrv_read/write calls below.
337      */
338     if (start_sect + n_end > bs->total_sectors) {
339         n_end = bs->total_sectors - start_sect;
340     }
341 
342     n = n_end - n_start;
343     if (n <= 0) {
344         return 0;
345     }
346 
347     iov.iov_len = n * BDRV_SECTOR_SIZE;
348     iov.iov_base = qemu_blockalign(bs, iov.iov_len);
349 
350     qemu_iovec_init_external(&qiov, &iov, 1);
351 
352     BLKDBG_EVENT(bs->file, BLKDBG_COW_READ);
353 
354     /* Call .bdrv_co_readv() directly instead of using the public block-layer
355      * interface.  This avoids double I/O throttling and request tracking,
356      * which can lead to deadlock when block layer copy-on-read is enabled.
357      */
358     ret = bs->drv->bdrv_co_readv(bs, start_sect + n_start, n, &qiov);
359     if (ret < 0) {
360         goto out;
361     }
362 
363     if (s->crypt_method) {
364         qcow2_encrypt_sectors(s, start_sect + n_start,
365                         iov.iov_base, iov.iov_base, n, 1,
366                         &s->aes_encrypt_key);
367     }
368 
369     BLKDBG_EVENT(bs->file, BLKDBG_COW_WRITE);
370     ret = bdrv_co_writev(bs->file, (cluster_offset >> 9) + n_start, n, &qiov);
371     if (ret < 0) {
372         goto out;
373     }
374 
375     ret = 0;
376 out:
377     qemu_vfree(iov.iov_base);
378     return ret;
379 }
380 
381 
382 /*
383  * get_cluster_offset
384  *
385  * For a given offset of the disk image, find the cluster offset in
386  * qcow2 file. The offset is stored in *cluster_offset.
387  *
388  * on entry, *num is the number of contiguous sectors we'd like to
389  * access following offset.
390  *
391  * on exit, *num is the number of contiguous sectors we can read.
392  *
393  * Returns the cluster type (QCOW2_CLUSTER_*) on success, -errno in error
394  * cases.
395  */
396 int qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset,
397     int *num, uint64_t *cluster_offset)
398 {
399     BDRVQcowState *s = bs->opaque;
400     unsigned int l2_index;
401     uint64_t l1_index, l2_offset, *l2_table;
402     int l1_bits, c;
403     unsigned int index_in_cluster, nb_clusters;
404     uint64_t nb_available, nb_needed;
405     int ret;
406 
407     index_in_cluster = (offset >> 9) & (s->cluster_sectors - 1);
408     nb_needed = *num + index_in_cluster;
409 
410     l1_bits = s->l2_bits + s->cluster_bits;
411 
412     /* compute how many bytes there are between the offset and
413      * the end of the l1 entry
414      */
415 
416     nb_available = (1ULL << l1_bits) - (offset & ((1ULL << l1_bits) - 1));
417 
418     /* compute the number of available sectors */
419 
420     nb_available = (nb_available >> 9) + index_in_cluster;
421 
422     if (nb_needed > nb_available) {
423         nb_needed = nb_available;
424     }
425 
426     *cluster_offset = 0;
427 
428     /* seek the the l2 offset in the l1 table */
429 
430     l1_index = offset >> l1_bits;
431     if (l1_index >= s->l1_size) {
432         ret = QCOW2_CLUSTER_UNALLOCATED;
433         goto out;
434     }
435 
436     l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK;
437     if (!l2_offset) {
438         ret = QCOW2_CLUSTER_UNALLOCATED;
439         goto out;
440     }
441 
442     /* load the l2 table in memory */
443 
444     ret = l2_load(bs, l2_offset, &l2_table);
445     if (ret < 0) {
446         return ret;
447     }
448 
449     /* find the cluster offset for the given disk offset */
450 
451     l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);
452     *cluster_offset = be64_to_cpu(l2_table[l2_index]);
453     nb_clusters = size_to_clusters(s, nb_needed << 9);
454 
455     ret = qcow2_get_cluster_type(*cluster_offset);
456     switch (ret) {
457     case QCOW2_CLUSTER_COMPRESSED:
458         /* Compressed clusters can only be processed one by one */
459         c = 1;
460         *cluster_offset &= L2E_COMPRESSED_OFFSET_SIZE_MASK;
461         break;
462     case QCOW2_CLUSTER_ZERO:
463         if (s->qcow_version < 3) {
464             return -EIO;
465         }
466         c = count_contiguous_clusters(nb_clusters, s->cluster_size,
467                 &l2_table[l2_index], 0,
468                 QCOW_OFLAG_COMPRESSED | QCOW_OFLAG_ZERO);
469         *cluster_offset = 0;
470         break;
471     case QCOW2_CLUSTER_UNALLOCATED:
472         /* how many empty clusters ? */
473         c = count_contiguous_free_clusters(nb_clusters, &l2_table[l2_index]);
474         *cluster_offset = 0;
475         break;
476     case QCOW2_CLUSTER_NORMAL:
477         /* how many allocated clusters ? */
478         c = count_contiguous_clusters(nb_clusters, s->cluster_size,
479                 &l2_table[l2_index], 0,
480                 QCOW_OFLAG_COMPRESSED | QCOW_OFLAG_ZERO);
481         *cluster_offset &= L2E_OFFSET_MASK;
482         break;
483     default:
484         abort();
485     }
486 
487     qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
488 
489     nb_available = (c * s->cluster_sectors);
490 
491 out:
492     if (nb_available > nb_needed)
493         nb_available = nb_needed;
494 
495     *num = nb_available - index_in_cluster;
496 
497     return ret;
498 }
499 
500 /*
501  * get_cluster_table
502  *
503  * for a given disk offset, load (and allocate if needed)
504  * the l2 table.
505  *
506  * the l2 table offset in the qcow2 file and the cluster index
507  * in the l2 table are given to the caller.
508  *
509  * Returns 0 on success, -errno in failure case
510  */
511 static int get_cluster_table(BlockDriverState *bs, uint64_t offset,
512                              uint64_t **new_l2_table,
513                              int *new_l2_index)
514 {
515     BDRVQcowState *s = bs->opaque;
516     unsigned int l2_index;
517     uint64_t l1_index, l2_offset;
518     uint64_t *l2_table = NULL;
519     int ret;
520 
521     /* seek the the l2 offset in the l1 table */
522 
523     l1_index = offset >> (s->l2_bits + s->cluster_bits);
524     if (l1_index >= s->l1_size) {
525         ret = qcow2_grow_l1_table(bs, l1_index + 1, false);
526         if (ret < 0) {
527             return ret;
528         }
529     }
530 
531     assert(l1_index < s->l1_size);
532     l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK;
533 
534     /* seek the l2 table of the given l2 offset */
535 
536     if (s->l1_table[l1_index] & QCOW_OFLAG_COPIED) {
537         /* load the l2 table in memory */
538         ret = l2_load(bs, l2_offset, &l2_table);
539         if (ret < 0) {
540             return ret;
541         }
542     } else {
543         /* First allocate a new L2 table (and do COW if needed) */
544         ret = l2_allocate(bs, l1_index, &l2_table);
545         if (ret < 0) {
546             return ret;
547         }
548 
549         /* Then decrease the refcount of the old table */
550         if (l2_offset) {
551             qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t));
552         }
553     }
554 
555     /* find the cluster offset for the given disk offset */
556 
557     l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);
558 
559     *new_l2_table = l2_table;
560     *new_l2_index = l2_index;
561 
562     return 0;
563 }
564 
565 /*
566  * alloc_compressed_cluster_offset
567  *
568  * For a given offset of the disk image, return cluster offset in
569  * qcow2 file.
570  *
571  * If the offset is not found, allocate a new compressed cluster.
572  *
573  * Return the cluster offset if successful,
574  * Return 0, otherwise.
575  *
576  */
577 
578 uint64_t qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs,
579                                                uint64_t offset,
580                                                int compressed_size)
581 {
582     BDRVQcowState *s = bs->opaque;
583     int l2_index, ret;
584     uint64_t *l2_table;
585     int64_t cluster_offset;
586     int nb_csectors;
587 
588     ret = get_cluster_table(bs, offset, &l2_table, &l2_index);
589     if (ret < 0) {
590         return 0;
591     }
592 
593     /* Compression can't overwrite anything. Fail if the cluster was already
594      * allocated. */
595     cluster_offset = be64_to_cpu(l2_table[l2_index]);
596     if (cluster_offset & L2E_OFFSET_MASK) {
597         qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
598         return 0;
599     }
600 
601     cluster_offset = qcow2_alloc_bytes(bs, compressed_size);
602     if (cluster_offset < 0) {
603         qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
604         return 0;
605     }
606 
607     nb_csectors = ((cluster_offset + compressed_size - 1) >> 9) -
608                   (cluster_offset >> 9);
609 
610     cluster_offset |= QCOW_OFLAG_COMPRESSED |
611                       ((uint64_t)nb_csectors << s->csize_shift);
612 
613     /* update L2 table */
614 
615     /* compressed clusters never have the copied flag */
616 
617     BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED);
618     qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
619     l2_table[l2_index] = cpu_to_be64(cluster_offset);
620     ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
621     if (ret < 0) {
622         return 0;
623     }
624 
625     return cluster_offset;
626 }
627 
628 static int perform_cow(BlockDriverState *bs, QCowL2Meta *m, Qcow2COWRegion *r)
629 {
630     BDRVQcowState *s = bs->opaque;
631     int ret;
632 
633     if (r->nb_sectors == 0) {
634         return 0;
635     }
636 
637     qemu_co_mutex_unlock(&s->lock);
638     ret = copy_sectors(bs, m->offset / BDRV_SECTOR_SIZE, m->alloc_offset,
639                        r->offset / BDRV_SECTOR_SIZE,
640                        r->offset / BDRV_SECTOR_SIZE + r->nb_sectors);
641     qemu_co_mutex_lock(&s->lock);
642 
643     if (ret < 0) {
644         return ret;
645     }
646 
647     /*
648      * Before we update the L2 table to actually point to the new cluster, we
649      * need to be sure that the refcounts have been increased and COW was
650      * handled.
651      */
652     qcow2_cache_depends_on_flush(s->l2_table_cache);
653 
654     return 0;
655 }
656 
657 int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m)
658 {
659     BDRVQcowState *s = bs->opaque;
660     int i, j = 0, l2_index, ret;
661     uint64_t *old_cluster, *l2_table;
662     uint64_t cluster_offset = m->alloc_offset;
663 
664     trace_qcow2_cluster_link_l2(qemu_coroutine_self(), m->nb_clusters);
665     assert(m->nb_clusters > 0);
666 
667     old_cluster = g_malloc(m->nb_clusters * sizeof(uint64_t));
668 
669     /* copy content of unmodified sectors */
670     ret = perform_cow(bs, m, &m->cow_start);
671     if (ret < 0) {
672         goto err;
673     }
674 
675     ret = perform_cow(bs, m, &m->cow_end);
676     if (ret < 0) {
677         goto err;
678     }
679 
680     /* Update L2 table. */
681     if (s->use_lazy_refcounts) {
682         qcow2_mark_dirty(bs);
683     }
684     if (qcow2_need_accurate_refcounts(s)) {
685         qcow2_cache_set_dependency(bs, s->l2_table_cache,
686                                    s->refcount_block_cache);
687     }
688 
689     ret = get_cluster_table(bs, m->offset, &l2_table, &l2_index);
690     if (ret < 0) {
691         goto err;
692     }
693     qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
694 
695     for (i = 0; i < m->nb_clusters; i++) {
696         /* if two concurrent writes happen to the same unallocated cluster
697 	 * each write allocates separate cluster and writes data concurrently.
698 	 * The first one to complete updates l2 table with pointer to its
699 	 * cluster the second one has to do RMW (which is done above by
700 	 * copy_sectors()), update l2 table with its cluster pointer and free
701 	 * old cluster. This is what this loop does */
702         if(l2_table[l2_index + i] != 0)
703             old_cluster[j++] = l2_table[l2_index + i];
704 
705         l2_table[l2_index + i] = cpu_to_be64((cluster_offset +
706                     (i << s->cluster_bits)) | QCOW_OFLAG_COPIED);
707      }
708 
709 
710     ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
711     if (ret < 0) {
712         goto err;
713     }
714 
715     /*
716      * If this was a COW, we need to decrease the refcount of the old cluster.
717      * Also flush bs->file to get the right order for L2 and refcount update.
718      */
719     if (j != 0) {
720         for (i = 0; i < j; i++) {
721             qcow2_free_any_clusters(bs, be64_to_cpu(old_cluster[i]), 1);
722         }
723     }
724 
725     ret = 0;
726 err:
727     g_free(old_cluster);
728     return ret;
729  }
730 
731 /*
732  * Returns the number of contiguous clusters that can be used for an allocating
733  * write, but require COW to be performed (this includes yet unallocated space,
734  * which must copy from the backing file)
735  */
736 static int count_cow_clusters(BDRVQcowState *s, int nb_clusters,
737     uint64_t *l2_table, int l2_index)
738 {
739     int i;
740 
741     for (i = 0; i < nb_clusters; i++) {
742         uint64_t l2_entry = be64_to_cpu(l2_table[l2_index + i]);
743         int cluster_type = qcow2_get_cluster_type(l2_entry);
744 
745         switch(cluster_type) {
746         case QCOW2_CLUSTER_NORMAL:
747             if (l2_entry & QCOW_OFLAG_COPIED) {
748                 goto out;
749             }
750             break;
751         case QCOW2_CLUSTER_UNALLOCATED:
752         case QCOW2_CLUSTER_COMPRESSED:
753         case QCOW2_CLUSTER_ZERO:
754             break;
755         default:
756             abort();
757         }
758     }
759 
760 out:
761     assert(i <= nb_clusters);
762     return i;
763 }
764 
765 /*
766  * Check if there already is an AIO write request in flight which allocates
767  * the same cluster. In this case we need to wait until the previous
768  * request has completed and updated the L2 table accordingly.
769  *
770  * Returns:
771  *   0       if there was no dependency. *cur_bytes indicates the number of
772  *           bytes from guest_offset that can be read before the next
773  *           dependency must be processed (or the request is complete)
774  *
775  *   -EAGAIN if we had to wait for another request, previously gathered
776  *           information on cluster allocation may be invalid now. The caller
777  *           must start over anyway, so consider *cur_bytes undefined.
778  */
779 static int handle_dependencies(BlockDriverState *bs, uint64_t guest_offset,
780     uint64_t *cur_bytes, QCowL2Meta **m)
781 {
782     BDRVQcowState *s = bs->opaque;
783     QCowL2Meta *old_alloc;
784     uint64_t bytes = *cur_bytes;
785 
786     QLIST_FOREACH(old_alloc, &s->cluster_allocs, next_in_flight) {
787 
788         uint64_t start = guest_offset;
789         uint64_t end = start + bytes;
790         uint64_t old_start = l2meta_cow_start(old_alloc);
791         uint64_t old_end = l2meta_cow_end(old_alloc);
792 
793         if (end <= old_start || start >= old_end) {
794             /* No intersection */
795         } else {
796             if (start < old_start) {
797                 /* Stop at the start of a running allocation */
798                 bytes = old_start - start;
799             } else {
800                 bytes = 0;
801             }
802 
803             /* Stop if already an l2meta exists. After yielding, it wouldn't
804              * be valid any more, so we'd have to clean up the old L2Metas
805              * and deal with requests depending on them before starting to
806              * gather new ones. Not worth the trouble. */
807             if (bytes == 0 && *m) {
808                 *cur_bytes = 0;
809                 return 0;
810             }
811 
812             if (bytes == 0) {
813                 /* Wait for the dependency to complete. We need to recheck
814                  * the free/allocated clusters when we continue. */
815                 qemu_co_mutex_unlock(&s->lock);
816                 qemu_co_queue_wait(&old_alloc->dependent_requests);
817                 qemu_co_mutex_lock(&s->lock);
818                 return -EAGAIN;
819             }
820         }
821     }
822 
823     /* Make sure that existing clusters and new allocations are only used up to
824      * the next dependency if we shortened the request above */
825     *cur_bytes = bytes;
826 
827     return 0;
828 }
829 
830 /*
831  * Checks how many already allocated clusters that don't require a copy on
832  * write there are at the given guest_offset (up to *bytes). If
833  * *host_offset is not zero, only physically contiguous clusters beginning at
834  * this host offset are counted.
835  *
836  * Note that guest_offset may not be cluster aligned. In this case, the
837  * returned *host_offset points to exact byte referenced by guest_offset and
838  * therefore isn't cluster aligned as well.
839  *
840  * Returns:
841  *   0:     if no allocated clusters are available at the given offset.
842  *          *bytes is normally unchanged. It is set to 0 if the cluster
843  *          is allocated and doesn't need COW, but doesn't have the right
844  *          physical offset.
845  *
846  *   1:     if allocated clusters that don't require a COW are available at
847  *          the requested offset. *bytes may have decreased and describes
848  *          the length of the area that can be written to.
849  *
850  *  -errno: in error cases
851  */
852 static int handle_copied(BlockDriverState *bs, uint64_t guest_offset,
853     uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m)
854 {
855     BDRVQcowState *s = bs->opaque;
856     int l2_index;
857     uint64_t cluster_offset;
858     uint64_t *l2_table;
859     unsigned int nb_clusters;
860     unsigned int keep_clusters;
861     int ret, pret;
862 
863     trace_qcow2_handle_copied(qemu_coroutine_self(), guest_offset, *host_offset,
864                               *bytes);
865 
866     assert(*host_offset == 0 ||    offset_into_cluster(s, guest_offset)
867                                 == offset_into_cluster(s, *host_offset));
868 
869     /*
870      * Calculate the number of clusters to look for. We stop at L2 table
871      * boundaries to keep things simple.
872      */
873     nb_clusters =
874         size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes);
875 
876     l2_index = offset_to_l2_index(s, guest_offset);
877     nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
878 
879     /* Find L2 entry for the first involved cluster */
880     ret = get_cluster_table(bs, guest_offset, &l2_table, &l2_index);
881     if (ret < 0) {
882         return ret;
883     }
884 
885     cluster_offset = be64_to_cpu(l2_table[l2_index]);
886 
887     /* Check how many clusters are already allocated and don't need COW */
888     if (qcow2_get_cluster_type(cluster_offset) == QCOW2_CLUSTER_NORMAL
889         && (cluster_offset & QCOW_OFLAG_COPIED))
890     {
891         /* If a specific host_offset is required, check it */
892         bool offset_matches =
893             (cluster_offset & L2E_OFFSET_MASK) == *host_offset;
894 
895         if (*host_offset != 0 && !offset_matches) {
896             *bytes = 0;
897             ret = 0;
898             goto out;
899         }
900 
901         /* We keep all QCOW_OFLAG_COPIED clusters */
902         keep_clusters =
903             count_contiguous_clusters(nb_clusters, s->cluster_size,
904                                       &l2_table[l2_index], 0,
905                                       QCOW_OFLAG_COPIED | QCOW_OFLAG_ZERO);
906         assert(keep_clusters <= nb_clusters);
907 
908         *bytes = MIN(*bytes,
909                  keep_clusters * s->cluster_size
910                  - offset_into_cluster(s, guest_offset));
911 
912         ret = 1;
913     } else {
914         ret = 0;
915     }
916 
917     /* Cleanup */
918 out:
919     pret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
920     if (pret < 0) {
921         return pret;
922     }
923 
924     /* Only return a host offset if we actually made progress. Otherwise we
925      * would make requirements for handle_alloc() that it can't fulfill */
926     if (ret) {
927         *host_offset = (cluster_offset & L2E_OFFSET_MASK)
928                      + offset_into_cluster(s, guest_offset);
929     }
930 
931     return ret;
932 }
933 
934 /*
935  * Allocates new clusters for the given guest_offset.
936  *
937  * At most *nb_clusters are allocated, and on return *nb_clusters is updated to
938  * contain the number of clusters that have been allocated and are contiguous
939  * in the image file.
940  *
941  * If *host_offset is non-zero, it specifies the offset in the image file at
942  * which the new clusters must start. *nb_clusters can be 0 on return in this
943  * case if the cluster at host_offset is already in use. If *host_offset is
944  * zero, the clusters can be allocated anywhere in the image file.
945  *
946  * *host_offset is updated to contain the offset into the image file at which
947  * the first allocated cluster starts.
948  *
949  * Return 0 on success and -errno in error cases. -EAGAIN means that the
950  * function has been waiting for another request and the allocation must be
951  * restarted, but the whole request should not be failed.
952  */
953 static int do_alloc_cluster_offset(BlockDriverState *bs, uint64_t guest_offset,
954     uint64_t *host_offset, unsigned int *nb_clusters)
955 {
956     BDRVQcowState *s = bs->opaque;
957 
958     trace_qcow2_do_alloc_clusters_offset(qemu_coroutine_self(), guest_offset,
959                                          *host_offset, *nb_clusters);
960 
961     /* Allocate new clusters */
962     trace_qcow2_cluster_alloc_phys(qemu_coroutine_self());
963     if (*host_offset == 0) {
964         int64_t cluster_offset =
965             qcow2_alloc_clusters(bs, *nb_clusters * s->cluster_size);
966         if (cluster_offset < 0) {
967             return cluster_offset;
968         }
969         *host_offset = cluster_offset;
970         return 0;
971     } else {
972         int ret = qcow2_alloc_clusters_at(bs, *host_offset, *nb_clusters);
973         if (ret < 0) {
974             return ret;
975         }
976         *nb_clusters = ret;
977         return 0;
978     }
979 }
980 
981 /*
982  * Allocates new clusters for an area that either is yet unallocated or needs a
983  * copy on write. If *host_offset is non-zero, clusters are only allocated if
984  * the new allocation can match the specified host offset.
985  *
986  * Note that guest_offset may not be cluster aligned. In this case, the
987  * returned *host_offset points to exact byte referenced by guest_offset and
988  * therefore isn't cluster aligned as well.
989  *
990  * Returns:
991  *   0:     if no clusters could be allocated. *bytes is set to 0,
992  *          *host_offset is left unchanged.
993  *
994  *   1:     if new clusters were allocated. *bytes may be decreased if the
995  *          new allocation doesn't cover all of the requested area.
996  *          *host_offset is updated to contain the host offset of the first
997  *          newly allocated cluster.
998  *
999  *  -errno: in error cases
1000  */
1001 static int handle_alloc(BlockDriverState *bs, uint64_t guest_offset,
1002     uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m)
1003 {
1004     BDRVQcowState *s = bs->opaque;
1005     int l2_index;
1006     uint64_t *l2_table;
1007     uint64_t entry;
1008     unsigned int nb_clusters;
1009     int ret;
1010 
1011     uint64_t alloc_cluster_offset;
1012 
1013     trace_qcow2_handle_alloc(qemu_coroutine_self(), guest_offset, *host_offset,
1014                              *bytes);
1015     assert(*bytes > 0);
1016 
1017     /*
1018      * Calculate the number of clusters to look for. We stop at L2 table
1019      * boundaries to keep things simple.
1020      */
1021     nb_clusters =
1022         size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes);
1023 
1024     l2_index = offset_to_l2_index(s, guest_offset);
1025     nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
1026 
1027     /* Find L2 entry for the first involved cluster */
1028     ret = get_cluster_table(bs, guest_offset, &l2_table, &l2_index);
1029     if (ret < 0) {
1030         return ret;
1031     }
1032 
1033     entry = be64_to_cpu(l2_table[l2_index]);
1034 
1035     /* For the moment, overwrite compressed clusters one by one */
1036     if (entry & QCOW_OFLAG_COMPRESSED) {
1037         nb_clusters = 1;
1038     } else {
1039         nb_clusters = count_cow_clusters(s, nb_clusters, l2_table, l2_index);
1040     }
1041 
1042     /* This function is only called when there were no non-COW clusters, so if
1043      * we can't find any unallocated or COW clusters either, something is
1044      * wrong with our code. */
1045     assert(nb_clusters > 0);
1046 
1047     ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
1048     if (ret < 0) {
1049         return ret;
1050     }
1051 
1052     /* Allocate, if necessary at a given offset in the image file */
1053     alloc_cluster_offset = start_of_cluster(s, *host_offset);
1054     ret = do_alloc_cluster_offset(bs, guest_offset, &alloc_cluster_offset,
1055                                   &nb_clusters);
1056     if (ret < 0) {
1057         goto fail;
1058     }
1059 
1060     /* Can't extend contiguous allocation */
1061     if (nb_clusters == 0) {
1062         *bytes = 0;
1063         return 0;
1064     }
1065 
1066     /*
1067      * Save info needed for meta data update.
1068      *
1069      * requested_sectors: Number of sectors from the start of the first
1070      * newly allocated cluster to the end of the (possibly shortened
1071      * before) write request.
1072      *
1073      * avail_sectors: Number of sectors from the start of the first
1074      * newly allocated to the end of the last newly allocated cluster.
1075      *
1076      * nb_sectors: The number of sectors from the start of the first
1077      * newly allocated cluster to the end of the area that the write
1078      * request actually writes to (excluding COW at the end)
1079      */
1080     int requested_sectors =
1081         (*bytes + offset_into_cluster(s, guest_offset))
1082         >> BDRV_SECTOR_BITS;
1083     int avail_sectors = nb_clusters
1084                         << (s->cluster_bits - BDRV_SECTOR_BITS);
1085     int alloc_n_start = offset_into_cluster(s, guest_offset)
1086                         >> BDRV_SECTOR_BITS;
1087     int nb_sectors = MIN(requested_sectors, avail_sectors);
1088     QCowL2Meta *old_m = *m;
1089 
1090     *m = g_malloc0(sizeof(**m));
1091 
1092     **m = (QCowL2Meta) {
1093         .next           = old_m,
1094 
1095         .alloc_offset   = alloc_cluster_offset,
1096         .offset         = start_of_cluster(s, guest_offset),
1097         .nb_clusters    = nb_clusters,
1098         .nb_available   = nb_sectors,
1099 
1100         .cow_start = {
1101             .offset     = 0,
1102             .nb_sectors = alloc_n_start,
1103         },
1104         .cow_end = {
1105             .offset     = nb_sectors * BDRV_SECTOR_SIZE,
1106             .nb_sectors = avail_sectors - nb_sectors,
1107         },
1108     };
1109     qemu_co_queue_init(&(*m)->dependent_requests);
1110     QLIST_INSERT_HEAD(&s->cluster_allocs, *m, next_in_flight);
1111 
1112     *host_offset = alloc_cluster_offset + offset_into_cluster(s, guest_offset);
1113     *bytes = MIN(*bytes, (nb_sectors * BDRV_SECTOR_SIZE)
1114                          - offset_into_cluster(s, guest_offset));
1115     assert(*bytes != 0);
1116 
1117     return 1;
1118 
1119 fail:
1120     if (*m && (*m)->nb_clusters > 0) {
1121         QLIST_REMOVE(*m, next_in_flight);
1122     }
1123     return ret;
1124 }
1125 
1126 /*
1127  * alloc_cluster_offset
1128  *
1129  * For a given offset on the virtual disk, find the cluster offset in qcow2
1130  * file. If the offset is not found, allocate a new cluster.
1131  *
1132  * If the cluster was already allocated, m->nb_clusters is set to 0 and
1133  * other fields in m are meaningless.
1134  *
1135  * If the cluster is newly allocated, m->nb_clusters is set to the number of
1136  * contiguous clusters that have been allocated. In this case, the other
1137  * fields of m are valid and contain information about the first allocated
1138  * cluster.
1139  *
1140  * If the request conflicts with another write request in flight, the coroutine
1141  * is queued and will be reentered when the dependency has completed.
1142  *
1143  * Return 0 on success and -errno in error cases
1144  */
1145 int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset,
1146     int n_start, int n_end, int *num, uint64_t *host_offset, QCowL2Meta **m)
1147 {
1148     BDRVQcowState *s = bs->opaque;
1149     uint64_t start, remaining;
1150     uint64_t cluster_offset;
1151     uint64_t cur_bytes;
1152     int ret;
1153 
1154     trace_qcow2_alloc_clusters_offset(qemu_coroutine_self(), offset,
1155                                       n_start, n_end);
1156 
1157     assert(n_start * BDRV_SECTOR_SIZE == offset_into_cluster(s, offset));
1158     offset = start_of_cluster(s, offset);
1159 
1160 again:
1161     start = offset + (n_start << BDRV_SECTOR_BITS);
1162     remaining = (n_end - n_start) << BDRV_SECTOR_BITS;
1163     cluster_offset = 0;
1164     *host_offset = 0;
1165     cur_bytes = 0;
1166     *m = NULL;
1167 
1168     while (true) {
1169 
1170         if (!*host_offset) {
1171             *host_offset = start_of_cluster(s, cluster_offset);
1172         }
1173 
1174         assert(remaining >= cur_bytes);
1175 
1176         start           += cur_bytes;
1177         remaining       -= cur_bytes;
1178         cluster_offset  += cur_bytes;
1179 
1180         if (remaining == 0) {
1181             break;
1182         }
1183 
1184         cur_bytes = remaining;
1185 
1186         /*
1187          * Now start gathering as many contiguous clusters as possible:
1188          *
1189          * 1. Check for overlaps with in-flight allocations
1190          *
1191          *      a) Overlap not in the first cluster -> shorten this request and
1192          *         let the caller handle the rest in its next loop iteration.
1193          *
1194          *      b) Real overlaps of two requests. Yield and restart the search
1195          *         for contiguous clusters (the situation could have changed
1196          *         while we were sleeping)
1197          *
1198          *      c) TODO: Request starts in the same cluster as the in-flight
1199          *         allocation ends. Shorten the COW of the in-fight allocation,
1200          *         set cluster_offset to write to the same cluster and set up
1201          *         the right synchronisation between the in-flight request and
1202          *         the new one.
1203          */
1204         ret = handle_dependencies(bs, start, &cur_bytes, m);
1205         if (ret == -EAGAIN) {
1206             /* Currently handle_dependencies() doesn't yield if we already had
1207              * an allocation. If it did, we would have to clean up the L2Meta
1208              * structs before starting over. */
1209             assert(*m == NULL);
1210             goto again;
1211         } else if (ret < 0) {
1212             return ret;
1213         } else if (cur_bytes == 0) {
1214             break;
1215         } else {
1216             /* handle_dependencies() may have decreased cur_bytes (shortened
1217              * the allocations below) so that the next dependency is processed
1218              * correctly during the next loop iteration. */
1219         }
1220 
1221         /*
1222          * 2. Count contiguous COPIED clusters.
1223          */
1224         ret = handle_copied(bs, start, &cluster_offset, &cur_bytes, m);
1225         if (ret < 0) {
1226             return ret;
1227         } else if (ret) {
1228             continue;
1229         } else if (cur_bytes == 0) {
1230             break;
1231         }
1232 
1233         /*
1234          * 3. If the request still hasn't completed, allocate new clusters,
1235          *    considering any cluster_offset of steps 1c or 2.
1236          */
1237         ret = handle_alloc(bs, start, &cluster_offset, &cur_bytes, m);
1238         if (ret < 0) {
1239             return ret;
1240         } else if (ret) {
1241             continue;
1242         } else {
1243             assert(cur_bytes == 0);
1244             break;
1245         }
1246     }
1247 
1248     *num = (n_end - n_start) - (remaining >> BDRV_SECTOR_BITS);
1249     assert(*num > 0);
1250     assert(*host_offset != 0);
1251 
1252     return 0;
1253 }
1254 
1255 static int decompress_buffer(uint8_t *out_buf, int out_buf_size,
1256                              const uint8_t *buf, int buf_size)
1257 {
1258     z_stream strm1, *strm = &strm1;
1259     int ret, out_len;
1260 
1261     memset(strm, 0, sizeof(*strm));
1262 
1263     strm->next_in = (uint8_t *)buf;
1264     strm->avail_in = buf_size;
1265     strm->next_out = out_buf;
1266     strm->avail_out = out_buf_size;
1267 
1268     ret = inflateInit2(strm, -12);
1269     if (ret != Z_OK)
1270         return -1;
1271     ret = inflate(strm, Z_FINISH);
1272     out_len = strm->next_out - out_buf;
1273     if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) ||
1274         out_len != out_buf_size) {
1275         inflateEnd(strm);
1276         return -1;
1277     }
1278     inflateEnd(strm);
1279     return 0;
1280 }
1281 
1282 int qcow2_decompress_cluster(BlockDriverState *bs, uint64_t cluster_offset)
1283 {
1284     BDRVQcowState *s = bs->opaque;
1285     int ret, csize, nb_csectors, sector_offset;
1286     uint64_t coffset;
1287 
1288     coffset = cluster_offset & s->cluster_offset_mask;
1289     if (s->cluster_cache_offset != coffset) {
1290         nb_csectors = ((cluster_offset >> s->csize_shift) & s->csize_mask) + 1;
1291         sector_offset = coffset & 511;
1292         csize = nb_csectors * 512 - sector_offset;
1293         BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED);
1294         ret = bdrv_read(bs->file, coffset >> 9, s->cluster_data, nb_csectors);
1295         if (ret < 0) {
1296             return ret;
1297         }
1298         if (decompress_buffer(s->cluster_cache, s->cluster_size,
1299                               s->cluster_data + sector_offset, csize) < 0) {
1300             return -EIO;
1301         }
1302         s->cluster_cache_offset = coffset;
1303     }
1304     return 0;
1305 }
1306 
1307 /*
1308  * This discards as many clusters of nb_clusters as possible at once (i.e.
1309  * all clusters in the same L2 table) and returns the number of discarded
1310  * clusters.
1311  */
1312 static int discard_single_l2(BlockDriverState *bs, uint64_t offset,
1313     unsigned int nb_clusters)
1314 {
1315     BDRVQcowState *s = bs->opaque;
1316     uint64_t *l2_table;
1317     int l2_index;
1318     int ret;
1319     int i;
1320 
1321     ret = get_cluster_table(bs, offset, &l2_table, &l2_index);
1322     if (ret < 0) {
1323         return ret;
1324     }
1325 
1326     /* Limit nb_clusters to one L2 table */
1327     nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
1328 
1329     for (i = 0; i < nb_clusters; i++) {
1330         uint64_t old_offset;
1331 
1332         old_offset = be64_to_cpu(l2_table[l2_index + i]);
1333         if ((old_offset & L2E_OFFSET_MASK) == 0) {
1334             continue;
1335         }
1336 
1337         /* First remove L2 entries */
1338         qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
1339         l2_table[l2_index + i] = cpu_to_be64(0);
1340 
1341         /* Then decrease the refcount */
1342         qcow2_free_any_clusters(bs, old_offset, 1);
1343     }
1344 
1345     ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
1346     if (ret < 0) {
1347         return ret;
1348     }
1349 
1350     return nb_clusters;
1351 }
1352 
1353 int qcow2_discard_clusters(BlockDriverState *bs, uint64_t offset,
1354     int nb_sectors)
1355 {
1356     BDRVQcowState *s = bs->opaque;
1357     uint64_t end_offset;
1358     unsigned int nb_clusters;
1359     int ret;
1360 
1361     end_offset = offset + (nb_sectors << BDRV_SECTOR_BITS);
1362 
1363     /* Round start up and end down */
1364     offset = align_offset(offset, s->cluster_size);
1365     end_offset &= ~(s->cluster_size - 1);
1366 
1367     if (offset > end_offset) {
1368         return 0;
1369     }
1370 
1371     nb_clusters = size_to_clusters(s, end_offset - offset);
1372 
1373     /* Each L2 table is handled by its own loop iteration */
1374     while (nb_clusters > 0) {
1375         ret = discard_single_l2(bs, offset, nb_clusters);
1376         if (ret < 0) {
1377             return ret;
1378         }
1379 
1380         nb_clusters -= ret;
1381         offset += (ret * s->cluster_size);
1382     }
1383 
1384     return 0;
1385 }
1386 
1387 /*
1388  * This zeroes as many clusters of nb_clusters as possible at once (i.e.
1389  * all clusters in the same L2 table) and returns the number of zeroed
1390  * clusters.
1391  */
1392 static int zero_single_l2(BlockDriverState *bs, uint64_t offset,
1393     unsigned int nb_clusters)
1394 {
1395     BDRVQcowState *s = bs->opaque;
1396     uint64_t *l2_table;
1397     int l2_index;
1398     int ret;
1399     int i;
1400 
1401     ret = get_cluster_table(bs, offset, &l2_table, &l2_index);
1402     if (ret < 0) {
1403         return ret;
1404     }
1405 
1406     /* Limit nb_clusters to one L2 table */
1407     nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
1408 
1409     for (i = 0; i < nb_clusters; i++) {
1410         uint64_t old_offset;
1411 
1412         old_offset = be64_to_cpu(l2_table[l2_index + i]);
1413 
1414         /* Update L2 entries */
1415         qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
1416         if (old_offset & QCOW_OFLAG_COMPRESSED) {
1417             l2_table[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO);
1418             qcow2_free_any_clusters(bs, old_offset, 1);
1419         } else {
1420             l2_table[l2_index + i] |= cpu_to_be64(QCOW_OFLAG_ZERO);
1421         }
1422     }
1423 
1424     ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
1425     if (ret < 0) {
1426         return ret;
1427     }
1428 
1429     return nb_clusters;
1430 }
1431 
1432 int qcow2_zero_clusters(BlockDriverState *bs, uint64_t offset, int nb_sectors)
1433 {
1434     BDRVQcowState *s = bs->opaque;
1435     unsigned int nb_clusters;
1436     int ret;
1437 
1438     /* The zero flag is only supported by version 3 and newer */
1439     if (s->qcow_version < 3) {
1440         return -ENOTSUP;
1441     }
1442 
1443     /* Each L2 table is handled by its own loop iteration */
1444     nb_clusters = size_to_clusters(s, nb_sectors << BDRV_SECTOR_BITS);
1445 
1446     while (nb_clusters > 0) {
1447         ret = zero_single_l2(bs, offset, nb_clusters);
1448         if (ret < 0) {
1449             return ret;
1450         }
1451 
1452         nb_clusters -= ret;
1453         offset += (ret * s->cluster_size);
1454     }
1455 
1456     return 0;
1457 }
1458