xref: /openbmc/qemu/block/qcow2-cluster.c (revision a42e9c41)
1 /*
2  * Block driver for the QCOW version 2 format
3  *
4  * Copyright (c) 2004-2006 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 #include <zlib.h>
26 
27 #include "qemu-common.h"
28 #include "block/block_int.h"
29 #include "block/qcow2.h"
30 #include "trace.h"
31 
32 int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size,
33                         bool exact_size)
34 {
35     BDRVQcowState *s = bs->opaque;
36     int new_l1_size2, ret, i;
37     uint64_t *new_l1_table;
38     int64_t new_l1_table_offset, new_l1_size;
39     uint8_t data[12];
40 
41     if (min_size <= s->l1_size)
42         return 0;
43 
44     if (exact_size) {
45         new_l1_size = min_size;
46     } else {
47         /* Bump size up to reduce the number of times we have to grow */
48         new_l1_size = s->l1_size;
49         if (new_l1_size == 0) {
50             new_l1_size = 1;
51         }
52         while (min_size > new_l1_size) {
53             new_l1_size = (new_l1_size * 3 + 1) / 2;
54         }
55     }
56 
57     if (new_l1_size > INT_MAX) {
58         return -EFBIG;
59     }
60 
61 #ifdef DEBUG_ALLOC2
62     fprintf(stderr, "grow l1_table from %d to %" PRId64 "\n",
63             s->l1_size, new_l1_size);
64 #endif
65 
66     new_l1_size2 = sizeof(uint64_t) * new_l1_size;
67     new_l1_table = g_malloc0(align_offset(new_l1_size2, 512));
68     memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t));
69 
70     /* write new table (align to cluster) */
71     BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ALLOC_TABLE);
72     new_l1_table_offset = qcow2_alloc_clusters(bs, new_l1_size2);
73     if (new_l1_table_offset < 0) {
74         g_free(new_l1_table);
75         return new_l1_table_offset;
76     }
77 
78     ret = qcow2_cache_flush(bs, s->refcount_block_cache);
79     if (ret < 0) {
80         goto fail;
81     }
82 
83     /* the L1 position has not yet been updated, so these clusters must
84      * indeed be completely free */
85     ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_DEFAULT,
86                                         new_l1_table_offset, new_l1_size2);
87     if (ret < 0) {
88         goto fail;
89     }
90 
91     BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_WRITE_TABLE);
92     for(i = 0; i < s->l1_size; i++)
93         new_l1_table[i] = cpu_to_be64(new_l1_table[i]);
94     ret = bdrv_pwrite_sync(bs->file, new_l1_table_offset, new_l1_table, new_l1_size2);
95     if (ret < 0)
96         goto fail;
97     for(i = 0; i < s->l1_size; i++)
98         new_l1_table[i] = be64_to_cpu(new_l1_table[i]);
99 
100     /* set new table */
101     BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ACTIVATE_TABLE);
102     cpu_to_be32w((uint32_t*)data, new_l1_size);
103     cpu_to_be64wu((uint64_t*)(data + 4), new_l1_table_offset);
104     ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_size), data,sizeof(data));
105     if (ret < 0) {
106         goto fail;
107     }
108     g_free(s->l1_table);
109     qcow2_free_clusters(bs, s->l1_table_offset, s->l1_size * sizeof(uint64_t),
110                         QCOW2_DISCARD_OTHER);
111     s->l1_table_offset = new_l1_table_offset;
112     s->l1_table = new_l1_table;
113     s->l1_size = new_l1_size;
114     return 0;
115  fail:
116     g_free(new_l1_table);
117     qcow2_free_clusters(bs, new_l1_table_offset, new_l1_size2,
118                         QCOW2_DISCARD_OTHER);
119     return ret;
120 }
121 
122 /*
123  * l2_load
124  *
125  * Loads a L2 table into memory. If the table is in the cache, the cache
126  * is used; otherwise the L2 table is loaded from the image file.
127  *
128  * Returns a pointer to the L2 table on success, or NULL if the read from
129  * the image file failed.
130  */
131 
132 static int l2_load(BlockDriverState *bs, uint64_t l2_offset,
133     uint64_t **l2_table)
134 {
135     BDRVQcowState *s = bs->opaque;
136     int ret;
137 
138     ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset, (void**) l2_table);
139 
140     return ret;
141 }
142 
143 /*
144  * Writes one sector of the L1 table to the disk (can't update single entries
145  * and we really don't want bdrv_pread to perform a read-modify-write)
146  */
147 #define L1_ENTRIES_PER_SECTOR (512 / 8)
148 int qcow2_write_l1_entry(BlockDriverState *bs, int l1_index)
149 {
150     BDRVQcowState *s = bs->opaque;
151     uint64_t buf[L1_ENTRIES_PER_SECTOR];
152     int l1_start_index;
153     int i, ret;
154 
155     l1_start_index = l1_index & ~(L1_ENTRIES_PER_SECTOR - 1);
156     for (i = 0; i < L1_ENTRIES_PER_SECTOR; i++) {
157         buf[i] = cpu_to_be64(s->l1_table[l1_start_index + i]);
158     }
159 
160     ret = qcow2_pre_write_overlap_check(bs,
161             QCOW2_OL_DEFAULT & ~QCOW2_OL_ACTIVE_L1,
162             s->l1_table_offset + 8 * l1_start_index, sizeof(buf));
163     if (ret < 0) {
164         return ret;
165     }
166 
167     BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE);
168     ret = bdrv_pwrite_sync(bs->file, s->l1_table_offset + 8 * l1_start_index,
169         buf, sizeof(buf));
170     if (ret < 0) {
171         return ret;
172     }
173 
174     return 0;
175 }
176 
177 /*
178  * l2_allocate
179  *
180  * Allocate a new l2 entry in the file. If l1_index points to an already
181  * used entry in the L2 table (i.e. we are doing a copy on write for the L2
182  * table) copy the contents of the old L2 table into the newly allocated one.
183  * Otherwise the new table is initialized with zeros.
184  *
185  */
186 
187 static int l2_allocate(BlockDriverState *bs, int l1_index, uint64_t **table)
188 {
189     BDRVQcowState *s = bs->opaque;
190     uint64_t old_l2_offset;
191     uint64_t *l2_table;
192     int64_t l2_offset;
193     int ret;
194 
195     old_l2_offset = s->l1_table[l1_index];
196 
197     trace_qcow2_l2_allocate(bs, l1_index);
198 
199     /* allocate a new l2 entry */
200 
201     l2_offset = qcow2_alloc_clusters(bs, s->l2_size * sizeof(uint64_t));
202     if (l2_offset < 0) {
203         return l2_offset;
204     }
205 
206     ret = qcow2_cache_flush(bs, s->refcount_block_cache);
207     if (ret < 0) {
208         goto fail;
209     }
210 
211     /* allocate a new entry in the l2 cache */
212 
213     trace_qcow2_l2_allocate_get_empty(bs, l1_index);
214     ret = qcow2_cache_get_empty(bs, s->l2_table_cache, l2_offset, (void**) table);
215     if (ret < 0) {
216         return ret;
217     }
218 
219     l2_table = *table;
220 
221     if ((old_l2_offset & L1E_OFFSET_MASK) == 0) {
222         /* if there was no old l2 table, clear the new table */
223         memset(l2_table, 0, s->l2_size * sizeof(uint64_t));
224     } else {
225         uint64_t* old_table;
226 
227         /* if there was an old l2 table, read it from the disk */
228         BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_COW_READ);
229         ret = qcow2_cache_get(bs, s->l2_table_cache,
230             old_l2_offset & L1E_OFFSET_MASK,
231             (void**) &old_table);
232         if (ret < 0) {
233             goto fail;
234         }
235 
236         memcpy(l2_table, old_table, s->cluster_size);
237 
238         ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &old_table);
239         if (ret < 0) {
240             goto fail;
241         }
242     }
243 
244     /* write the l2 table to the file */
245     BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_WRITE);
246 
247     trace_qcow2_l2_allocate_write_l2(bs, l1_index);
248     qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
249     ret = qcow2_cache_flush(bs, s->l2_table_cache);
250     if (ret < 0) {
251         goto fail;
252     }
253 
254     /* update the L1 entry */
255     trace_qcow2_l2_allocate_write_l1(bs, l1_index);
256     s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED;
257     ret = qcow2_write_l1_entry(bs, l1_index);
258     if (ret < 0) {
259         goto fail;
260     }
261 
262     *table = l2_table;
263     trace_qcow2_l2_allocate_done(bs, l1_index, 0);
264     return 0;
265 
266 fail:
267     trace_qcow2_l2_allocate_done(bs, l1_index, ret);
268     qcow2_cache_put(bs, s->l2_table_cache, (void**) table);
269     s->l1_table[l1_index] = old_l2_offset;
270     return ret;
271 }
272 
273 /*
274  * Checks how many clusters in a given L2 table are contiguous in the image
275  * file. As soon as one of the flags in the bitmask stop_flags changes compared
276  * to the first cluster, the search is stopped and the cluster is not counted
277  * as contiguous. (This allows it, for example, to stop at the first compressed
278  * cluster which may require a different handling)
279  */
280 static int count_contiguous_clusters(uint64_t nb_clusters, int cluster_size,
281         uint64_t *l2_table, uint64_t start, uint64_t stop_flags)
282 {
283     int i;
284     uint64_t mask = stop_flags | L2E_OFFSET_MASK;
285     uint64_t offset = be64_to_cpu(l2_table[0]) & mask;
286 
287     if (!offset)
288         return 0;
289 
290     for (i = start; i < start + nb_clusters; i++) {
291         uint64_t l2_entry = be64_to_cpu(l2_table[i]) & mask;
292         if (offset + (uint64_t) i * cluster_size != l2_entry) {
293             break;
294         }
295     }
296 
297 	return (i - start);
298 }
299 
300 static int count_contiguous_free_clusters(uint64_t nb_clusters, uint64_t *l2_table)
301 {
302     int i;
303 
304     for (i = 0; i < nb_clusters; i++) {
305         int type = qcow2_get_cluster_type(be64_to_cpu(l2_table[i]));
306 
307         if (type != QCOW2_CLUSTER_UNALLOCATED) {
308             break;
309         }
310     }
311 
312     return i;
313 }
314 
315 /* The crypt function is compatible with the linux cryptoloop
316    algorithm for < 4 GB images. NOTE: out_buf == in_buf is
317    supported */
318 void qcow2_encrypt_sectors(BDRVQcowState *s, int64_t sector_num,
319                            uint8_t *out_buf, const uint8_t *in_buf,
320                            int nb_sectors, int enc,
321                            const AES_KEY *key)
322 {
323     union {
324         uint64_t ll[2];
325         uint8_t b[16];
326     } ivec;
327     int i;
328 
329     for(i = 0; i < nb_sectors; i++) {
330         ivec.ll[0] = cpu_to_le64(sector_num);
331         ivec.ll[1] = 0;
332         AES_cbc_encrypt(in_buf, out_buf, 512, key,
333                         ivec.b, enc);
334         sector_num++;
335         in_buf += 512;
336         out_buf += 512;
337     }
338 }
339 
340 static int coroutine_fn copy_sectors(BlockDriverState *bs,
341                                      uint64_t start_sect,
342                                      uint64_t cluster_offset,
343                                      int n_start, int n_end)
344 {
345     BDRVQcowState *s = bs->opaque;
346     QEMUIOVector qiov;
347     struct iovec iov;
348     int n, ret;
349 
350     /*
351      * If this is the last cluster and it is only partially used, we must only
352      * copy until the end of the image, or bdrv_check_request will fail for the
353      * bdrv_read/write calls below.
354      */
355     if (start_sect + n_end > bs->total_sectors) {
356         n_end = bs->total_sectors - start_sect;
357     }
358 
359     n = n_end - n_start;
360     if (n <= 0) {
361         return 0;
362     }
363 
364     iov.iov_len = n * BDRV_SECTOR_SIZE;
365     iov.iov_base = qemu_blockalign(bs, iov.iov_len);
366 
367     qemu_iovec_init_external(&qiov, &iov, 1);
368 
369     BLKDBG_EVENT(bs->file, BLKDBG_COW_READ);
370 
371     /* Call .bdrv_co_readv() directly instead of using the public block-layer
372      * interface.  This avoids double I/O throttling and request tracking,
373      * which can lead to deadlock when block layer copy-on-read is enabled.
374      */
375     ret = bs->drv->bdrv_co_readv(bs, start_sect + n_start, n, &qiov);
376     if (ret < 0) {
377         goto out;
378     }
379 
380     if (s->crypt_method) {
381         qcow2_encrypt_sectors(s, start_sect + n_start,
382                         iov.iov_base, iov.iov_base, n, 1,
383                         &s->aes_encrypt_key);
384     }
385 
386     ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_DEFAULT,
387             cluster_offset + n_start * BDRV_SECTOR_SIZE, n * BDRV_SECTOR_SIZE);
388     if (ret < 0) {
389         goto out;
390     }
391 
392     BLKDBG_EVENT(bs->file, BLKDBG_COW_WRITE);
393     ret = bdrv_co_writev(bs->file, (cluster_offset >> 9) + n_start, n, &qiov);
394     if (ret < 0) {
395         goto out;
396     }
397 
398     ret = 0;
399 out:
400     qemu_vfree(iov.iov_base);
401     return ret;
402 }
403 
404 
405 /*
406  * get_cluster_offset
407  *
408  * For a given offset of the disk image, find the cluster offset in
409  * qcow2 file. The offset is stored in *cluster_offset.
410  *
411  * on entry, *num is the number of contiguous sectors we'd like to
412  * access following offset.
413  *
414  * on exit, *num is the number of contiguous sectors we can read.
415  *
416  * Returns the cluster type (QCOW2_CLUSTER_*) on success, -errno in error
417  * cases.
418  */
419 int qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset,
420     int *num, uint64_t *cluster_offset)
421 {
422     BDRVQcowState *s = bs->opaque;
423     unsigned int l2_index;
424     uint64_t l1_index, l2_offset, *l2_table;
425     int l1_bits, c;
426     unsigned int index_in_cluster, nb_clusters;
427     uint64_t nb_available, nb_needed;
428     int ret;
429 
430     index_in_cluster = (offset >> 9) & (s->cluster_sectors - 1);
431     nb_needed = *num + index_in_cluster;
432 
433     l1_bits = s->l2_bits + s->cluster_bits;
434 
435     /* compute how many bytes there are between the offset and
436      * the end of the l1 entry
437      */
438 
439     nb_available = (1ULL << l1_bits) - (offset & ((1ULL << l1_bits) - 1));
440 
441     /* compute the number of available sectors */
442 
443     nb_available = (nb_available >> 9) + index_in_cluster;
444 
445     if (nb_needed > nb_available) {
446         nb_needed = nb_available;
447     }
448 
449     *cluster_offset = 0;
450 
451     /* seek the the l2 offset in the l1 table */
452 
453     l1_index = offset >> l1_bits;
454     if (l1_index >= s->l1_size) {
455         ret = QCOW2_CLUSTER_UNALLOCATED;
456         goto out;
457     }
458 
459     l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK;
460     if (!l2_offset) {
461         ret = QCOW2_CLUSTER_UNALLOCATED;
462         goto out;
463     }
464 
465     /* load the l2 table in memory */
466 
467     ret = l2_load(bs, l2_offset, &l2_table);
468     if (ret < 0) {
469         return ret;
470     }
471 
472     /* find the cluster offset for the given disk offset */
473 
474     l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);
475     *cluster_offset = be64_to_cpu(l2_table[l2_index]);
476     nb_clusters = size_to_clusters(s, nb_needed << 9);
477 
478     ret = qcow2_get_cluster_type(*cluster_offset);
479     switch (ret) {
480     case QCOW2_CLUSTER_COMPRESSED:
481         /* Compressed clusters can only be processed one by one */
482         c = 1;
483         *cluster_offset &= L2E_COMPRESSED_OFFSET_SIZE_MASK;
484         break;
485     case QCOW2_CLUSTER_ZERO:
486         if (s->qcow_version < 3) {
487             return -EIO;
488         }
489         c = count_contiguous_clusters(nb_clusters, s->cluster_size,
490                 &l2_table[l2_index], 0,
491                 QCOW_OFLAG_COMPRESSED | QCOW_OFLAG_ZERO);
492         *cluster_offset = 0;
493         break;
494     case QCOW2_CLUSTER_UNALLOCATED:
495         /* how many empty clusters ? */
496         c = count_contiguous_free_clusters(nb_clusters, &l2_table[l2_index]);
497         *cluster_offset = 0;
498         break;
499     case QCOW2_CLUSTER_NORMAL:
500         /* how many allocated clusters ? */
501         c = count_contiguous_clusters(nb_clusters, s->cluster_size,
502                 &l2_table[l2_index], 0,
503                 QCOW_OFLAG_COMPRESSED | QCOW_OFLAG_ZERO);
504         *cluster_offset &= L2E_OFFSET_MASK;
505         break;
506     default:
507         abort();
508     }
509 
510     qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
511 
512     nb_available = (c * s->cluster_sectors);
513 
514 out:
515     if (nb_available > nb_needed)
516         nb_available = nb_needed;
517 
518     *num = nb_available - index_in_cluster;
519 
520     return ret;
521 }
522 
523 /*
524  * get_cluster_table
525  *
526  * for a given disk offset, load (and allocate if needed)
527  * the l2 table.
528  *
529  * the l2 table offset in the qcow2 file and the cluster index
530  * in the l2 table are given to the caller.
531  *
532  * Returns 0 on success, -errno in failure case
533  */
534 static int get_cluster_table(BlockDriverState *bs, uint64_t offset,
535                              uint64_t **new_l2_table,
536                              int *new_l2_index)
537 {
538     BDRVQcowState *s = bs->opaque;
539     unsigned int l2_index;
540     uint64_t l1_index, l2_offset;
541     uint64_t *l2_table = NULL;
542     int ret;
543 
544     /* seek the the l2 offset in the l1 table */
545 
546     l1_index = offset >> (s->l2_bits + s->cluster_bits);
547     if (l1_index >= s->l1_size) {
548         ret = qcow2_grow_l1_table(bs, l1_index + 1, false);
549         if (ret < 0) {
550             return ret;
551         }
552     }
553 
554     assert(l1_index < s->l1_size);
555     l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK;
556 
557     /* seek the l2 table of the given l2 offset */
558 
559     if (s->l1_table[l1_index] & QCOW_OFLAG_COPIED) {
560         /* load the l2 table in memory */
561         ret = l2_load(bs, l2_offset, &l2_table);
562         if (ret < 0) {
563             return ret;
564         }
565     } else {
566         /* First allocate a new L2 table (and do COW if needed) */
567         ret = l2_allocate(bs, l1_index, &l2_table);
568         if (ret < 0) {
569             return ret;
570         }
571 
572         /* Then decrease the refcount of the old table */
573         if (l2_offset) {
574             qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t),
575                                 QCOW2_DISCARD_OTHER);
576         }
577     }
578 
579     /* find the cluster offset for the given disk offset */
580 
581     l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);
582 
583     *new_l2_table = l2_table;
584     *new_l2_index = l2_index;
585 
586     return 0;
587 }
588 
589 /*
590  * alloc_compressed_cluster_offset
591  *
592  * For a given offset of the disk image, return cluster offset in
593  * qcow2 file.
594  *
595  * If the offset is not found, allocate a new compressed cluster.
596  *
597  * Return the cluster offset if successful,
598  * Return 0, otherwise.
599  *
600  */
601 
602 uint64_t qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs,
603                                                uint64_t offset,
604                                                int compressed_size)
605 {
606     BDRVQcowState *s = bs->opaque;
607     int l2_index, ret;
608     uint64_t *l2_table;
609     int64_t cluster_offset;
610     int nb_csectors;
611 
612     ret = get_cluster_table(bs, offset, &l2_table, &l2_index);
613     if (ret < 0) {
614         return 0;
615     }
616 
617     /* Compression can't overwrite anything. Fail if the cluster was already
618      * allocated. */
619     cluster_offset = be64_to_cpu(l2_table[l2_index]);
620     if (cluster_offset & L2E_OFFSET_MASK) {
621         qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
622         return 0;
623     }
624 
625     cluster_offset = qcow2_alloc_bytes(bs, compressed_size);
626     if (cluster_offset < 0) {
627         qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
628         return 0;
629     }
630 
631     nb_csectors = ((cluster_offset + compressed_size - 1) >> 9) -
632                   (cluster_offset >> 9);
633 
634     cluster_offset |= QCOW_OFLAG_COMPRESSED |
635                       ((uint64_t)nb_csectors << s->csize_shift);
636 
637     /* update L2 table */
638 
639     /* compressed clusters never have the copied flag */
640 
641     BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED);
642     qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
643     l2_table[l2_index] = cpu_to_be64(cluster_offset);
644     ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
645     if (ret < 0) {
646         return 0;
647     }
648 
649     return cluster_offset;
650 }
651 
652 static int perform_cow(BlockDriverState *bs, QCowL2Meta *m, Qcow2COWRegion *r)
653 {
654     BDRVQcowState *s = bs->opaque;
655     int ret;
656 
657     if (r->nb_sectors == 0) {
658         return 0;
659     }
660 
661     qemu_co_mutex_unlock(&s->lock);
662     ret = copy_sectors(bs, m->offset / BDRV_SECTOR_SIZE, m->alloc_offset,
663                        r->offset / BDRV_SECTOR_SIZE,
664                        r->offset / BDRV_SECTOR_SIZE + r->nb_sectors);
665     qemu_co_mutex_lock(&s->lock);
666 
667     if (ret < 0) {
668         return ret;
669     }
670 
671     /*
672      * Before we update the L2 table to actually point to the new cluster, we
673      * need to be sure that the refcounts have been increased and COW was
674      * handled.
675      */
676     qcow2_cache_depends_on_flush(s->l2_table_cache);
677 
678     return 0;
679 }
680 
681 int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m)
682 {
683     BDRVQcowState *s = bs->opaque;
684     int i, j = 0, l2_index, ret;
685     uint64_t *old_cluster, *l2_table;
686     uint64_t cluster_offset = m->alloc_offset;
687 
688     trace_qcow2_cluster_link_l2(qemu_coroutine_self(), m->nb_clusters);
689     assert(m->nb_clusters > 0);
690 
691     old_cluster = g_malloc(m->nb_clusters * sizeof(uint64_t));
692 
693     /* copy content of unmodified sectors */
694     ret = perform_cow(bs, m, &m->cow_start);
695     if (ret < 0) {
696         goto err;
697     }
698 
699     ret = perform_cow(bs, m, &m->cow_end);
700     if (ret < 0) {
701         goto err;
702     }
703 
704     /* Update L2 table. */
705     if (s->use_lazy_refcounts) {
706         qcow2_mark_dirty(bs);
707     }
708     if (qcow2_need_accurate_refcounts(s)) {
709         qcow2_cache_set_dependency(bs, s->l2_table_cache,
710                                    s->refcount_block_cache);
711     }
712 
713     ret = get_cluster_table(bs, m->offset, &l2_table, &l2_index);
714     if (ret < 0) {
715         goto err;
716     }
717     qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
718 
719     for (i = 0; i < m->nb_clusters; i++) {
720         /* if two concurrent writes happen to the same unallocated cluster
721 	 * each write allocates separate cluster and writes data concurrently.
722 	 * The first one to complete updates l2 table with pointer to its
723 	 * cluster the second one has to do RMW (which is done above by
724 	 * copy_sectors()), update l2 table with its cluster pointer and free
725 	 * old cluster. This is what this loop does */
726         if(l2_table[l2_index + i] != 0)
727             old_cluster[j++] = l2_table[l2_index + i];
728 
729         l2_table[l2_index + i] = cpu_to_be64((cluster_offset +
730                     (i << s->cluster_bits)) | QCOW_OFLAG_COPIED);
731      }
732 
733 
734     ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
735     if (ret < 0) {
736         goto err;
737     }
738 
739     /*
740      * If this was a COW, we need to decrease the refcount of the old cluster.
741      * Also flush bs->file to get the right order for L2 and refcount update.
742      *
743      * Don't discard clusters that reach a refcount of 0 (e.g. compressed
744      * clusters), the next write will reuse them anyway.
745      */
746     if (j != 0) {
747         for (i = 0; i < j; i++) {
748             qcow2_free_any_clusters(bs, be64_to_cpu(old_cluster[i]), 1,
749                                     QCOW2_DISCARD_NEVER);
750         }
751     }
752 
753     ret = 0;
754 err:
755     g_free(old_cluster);
756     return ret;
757  }
758 
759 /*
760  * Returns the number of contiguous clusters that can be used for an allocating
761  * write, but require COW to be performed (this includes yet unallocated space,
762  * which must copy from the backing file)
763  */
764 static int count_cow_clusters(BDRVQcowState *s, int nb_clusters,
765     uint64_t *l2_table, int l2_index)
766 {
767     int i;
768 
769     for (i = 0; i < nb_clusters; i++) {
770         uint64_t l2_entry = be64_to_cpu(l2_table[l2_index + i]);
771         int cluster_type = qcow2_get_cluster_type(l2_entry);
772 
773         switch(cluster_type) {
774         case QCOW2_CLUSTER_NORMAL:
775             if (l2_entry & QCOW_OFLAG_COPIED) {
776                 goto out;
777             }
778             break;
779         case QCOW2_CLUSTER_UNALLOCATED:
780         case QCOW2_CLUSTER_COMPRESSED:
781         case QCOW2_CLUSTER_ZERO:
782             break;
783         default:
784             abort();
785         }
786     }
787 
788 out:
789     assert(i <= nb_clusters);
790     return i;
791 }
792 
793 /*
794  * Check if there already is an AIO write request in flight which allocates
795  * the same cluster. In this case we need to wait until the previous
796  * request has completed and updated the L2 table accordingly.
797  *
798  * Returns:
799  *   0       if there was no dependency. *cur_bytes indicates the number of
800  *           bytes from guest_offset that can be read before the next
801  *           dependency must be processed (or the request is complete)
802  *
803  *   -EAGAIN if we had to wait for another request, previously gathered
804  *           information on cluster allocation may be invalid now. The caller
805  *           must start over anyway, so consider *cur_bytes undefined.
806  */
807 static int handle_dependencies(BlockDriverState *bs, uint64_t guest_offset,
808     uint64_t *cur_bytes, QCowL2Meta **m)
809 {
810     BDRVQcowState *s = bs->opaque;
811     QCowL2Meta *old_alloc;
812     uint64_t bytes = *cur_bytes;
813 
814     QLIST_FOREACH(old_alloc, &s->cluster_allocs, next_in_flight) {
815 
816         uint64_t start = guest_offset;
817         uint64_t end = start + bytes;
818         uint64_t old_start = l2meta_cow_start(old_alloc);
819         uint64_t old_end = l2meta_cow_end(old_alloc);
820 
821         if (end <= old_start || start >= old_end) {
822             /* No intersection */
823         } else {
824             if (start < old_start) {
825                 /* Stop at the start of a running allocation */
826                 bytes = old_start - start;
827             } else {
828                 bytes = 0;
829             }
830 
831             /* Stop if already an l2meta exists. After yielding, it wouldn't
832              * be valid any more, so we'd have to clean up the old L2Metas
833              * and deal with requests depending on them before starting to
834              * gather new ones. Not worth the trouble. */
835             if (bytes == 0 && *m) {
836                 *cur_bytes = 0;
837                 return 0;
838             }
839 
840             if (bytes == 0) {
841                 /* Wait for the dependency to complete. We need to recheck
842                  * the free/allocated clusters when we continue. */
843                 qemu_co_mutex_unlock(&s->lock);
844                 qemu_co_queue_wait(&old_alloc->dependent_requests);
845                 qemu_co_mutex_lock(&s->lock);
846                 return -EAGAIN;
847             }
848         }
849     }
850 
851     /* Make sure that existing clusters and new allocations are only used up to
852      * the next dependency if we shortened the request above */
853     *cur_bytes = bytes;
854 
855     return 0;
856 }
857 
858 /*
859  * Checks how many already allocated clusters that don't require a copy on
860  * write there are at the given guest_offset (up to *bytes). If
861  * *host_offset is not zero, only physically contiguous clusters beginning at
862  * this host offset are counted.
863  *
864  * Note that guest_offset may not be cluster aligned. In this case, the
865  * returned *host_offset points to exact byte referenced by guest_offset and
866  * therefore isn't cluster aligned as well.
867  *
868  * Returns:
869  *   0:     if no allocated clusters are available at the given offset.
870  *          *bytes is normally unchanged. It is set to 0 if the cluster
871  *          is allocated and doesn't need COW, but doesn't have the right
872  *          physical offset.
873  *
874  *   1:     if allocated clusters that don't require a COW are available at
875  *          the requested offset. *bytes may have decreased and describes
876  *          the length of the area that can be written to.
877  *
878  *  -errno: in error cases
879  */
880 static int handle_copied(BlockDriverState *bs, uint64_t guest_offset,
881     uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m)
882 {
883     BDRVQcowState *s = bs->opaque;
884     int l2_index;
885     uint64_t cluster_offset;
886     uint64_t *l2_table;
887     unsigned int nb_clusters;
888     unsigned int keep_clusters;
889     int ret, pret;
890 
891     trace_qcow2_handle_copied(qemu_coroutine_self(), guest_offset, *host_offset,
892                               *bytes);
893 
894     assert(*host_offset == 0 ||    offset_into_cluster(s, guest_offset)
895                                 == offset_into_cluster(s, *host_offset));
896 
897     /*
898      * Calculate the number of clusters to look for. We stop at L2 table
899      * boundaries to keep things simple.
900      */
901     nb_clusters =
902         size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes);
903 
904     l2_index = offset_to_l2_index(s, guest_offset);
905     nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
906 
907     /* Find L2 entry for the first involved cluster */
908     ret = get_cluster_table(bs, guest_offset, &l2_table, &l2_index);
909     if (ret < 0) {
910         return ret;
911     }
912 
913     cluster_offset = be64_to_cpu(l2_table[l2_index]);
914 
915     /* Check how many clusters are already allocated and don't need COW */
916     if (qcow2_get_cluster_type(cluster_offset) == QCOW2_CLUSTER_NORMAL
917         && (cluster_offset & QCOW_OFLAG_COPIED))
918     {
919         /* If a specific host_offset is required, check it */
920         bool offset_matches =
921             (cluster_offset & L2E_OFFSET_MASK) == *host_offset;
922 
923         if (*host_offset != 0 && !offset_matches) {
924             *bytes = 0;
925             ret = 0;
926             goto out;
927         }
928 
929         /* We keep all QCOW_OFLAG_COPIED clusters */
930         keep_clusters =
931             count_contiguous_clusters(nb_clusters, s->cluster_size,
932                                       &l2_table[l2_index], 0,
933                                       QCOW_OFLAG_COPIED | QCOW_OFLAG_ZERO);
934         assert(keep_clusters <= nb_clusters);
935 
936         *bytes = MIN(*bytes,
937                  keep_clusters * s->cluster_size
938                  - offset_into_cluster(s, guest_offset));
939 
940         ret = 1;
941     } else {
942         ret = 0;
943     }
944 
945     /* Cleanup */
946 out:
947     pret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
948     if (pret < 0) {
949         return pret;
950     }
951 
952     /* Only return a host offset if we actually made progress. Otherwise we
953      * would make requirements for handle_alloc() that it can't fulfill */
954     if (ret) {
955         *host_offset = (cluster_offset & L2E_OFFSET_MASK)
956                      + offset_into_cluster(s, guest_offset);
957     }
958 
959     return ret;
960 }
961 
962 /*
963  * Allocates new clusters for the given guest_offset.
964  *
965  * At most *nb_clusters are allocated, and on return *nb_clusters is updated to
966  * contain the number of clusters that have been allocated and are contiguous
967  * in the image file.
968  *
969  * If *host_offset is non-zero, it specifies the offset in the image file at
970  * which the new clusters must start. *nb_clusters can be 0 on return in this
971  * case if the cluster at host_offset is already in use. If *host_offset is
972  * zero, the clusters can be allocated anywhere in the image file.
973  *
974  * *host_offset is updated to contain the offset into the image file at which
975  * the first allocated cluster starts.
976  *
977  * Return 0 on success and -errno in error cases. -EAGAIN means that the
978  * function has been waiting for another request and the allocation must be
979  * restarted, but the whole request should not be failed.
980  */
981 static int do_alloc_cluster_offset(BlockDriverState *bs, uint64_t guest_offset,
982     uint64_t *host_offset, unsigned int *nb_clusters)
983 {
984     BDRVQcowState *s = bs->opaque;
985 
986     trace_qcow2_do_alloc_clusters_offset(qemu_coroutine_self(), guest_offset,
987                                          *host_offset, *nb_clusters);
988 
989     /* Allocate new clusters */
990     trace_qcow2_cluster_alloc_phys(qemu_coroutine_self());
991     if (*host_offset == 0) {
992         int64_t cluster_offset =
993             qcow2_alloc_clusters(bs, *nb_clusters * s->cluster_size);
994         if (cluster_offset < 0) {
995             return cluster_offset;
996         }
997         *host_offset = cluster_offset;
998         return 0;
999     } else {
1000         int ret = qcow2_alloc_clusters_at(bs, *host_offset, *nb_clusters);
1001         if (ret < 0) {
1002             return ret;
1003         }
1004         *nb_clusters = ret;
1005         return 0;
1006     }
1007 }
1008 
1009 /*
1010  * Allocates new clusters for an area that either is yet unallocated or needs a
1011  * copy on write. If *host_offset is non-zero, clusters are only allocated if
1012  * the new allocation can match the specified host offset.
1013  *
1014  * Note that guest_offset may not be cluster aligned. In this case, the
1015  * returned *host_offset points to exact byte referenced by guest_offset and
1016  * therefore isn't cluster aligned as well.
1017  *
1018  * Returns:
1019  *   0:     if no clusters could be allocated. *bytes is set to 0,
1020  *          *host_offset is left unchanged.
1021  *
1022  *   1:     if new clusters were allocated. *bytes may be decreased if the
1023  *          new allocation doesn't cover all of the requested area.
1024  *          *host_offset is updated to contain the host offset of the first
1025  *          newly allocated cluster.
1026  *
1027  *  -errno: in error cases
1028  */
1029 static int handle_alloc(BlockDriverState *bs, uint64_t guest_offset,
1030     uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m)
1031 {
1032     BDRVQcowState *s = bs->opaque;
1033     int l2_index;
1034     uint64_t *l2_table;
1035     uint64_t entry;
1036     unsigned int nb_clusters;
1037     int ret;
1038 
1039     uint64_t alloc_cluster_offset;
1040 
1041     trace_qcow2_handle_alloc(qemu_coroutine_self(), guest_offset, *host_offset,
1042                              *bytes);
1043     assert(*bytes > 0);
1044 
1045     /*
1046      * Calculate the number of clusters to look for. We stop at L2 table
1047      * boundaries to keep things simple.
1048      */
1049     nb_clusters =
1050         size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes);
1051 
1052     l2_index = offset_to_l2_index(s, guest_offset);
1053     nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
1054 
1055     /* Find L2 entry for the first involved cluster */
1056     ret = get_cluster_table(bs, guest_offset, &l2_table, &l2_index);
1057     if (ret < 0) {
1058         return ret;
1059     }
1060 
1061     entry = be64_to_cpu(l2_table[l2_index]);
1062 
1063     /* For the moment, overwrite compressed clusters one by one */
1064     if (entry & QCOW_OFLAG_COMPRESSED) {
1065         nb_clusters = 1;
1066     } else {
1067         nb_clusters = count_cow_clusters(s, nb_clusters, l2_table, l2_index);
1068     }
1069 
1070     /* This function is only called when there were no non-COW clusters, so if
1071      * we can't find any unallocated or COW clusters either, something is
1072      * wrong with our code. */
1073     assert(nb_clusters > 0);
1074 
1075     ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
1076     if (ret < 0) {
1077         return ret;
1078     }
1079 
1080     /* Allocate, if necessary at a given offset in the image file */
1081     alloc_cluster_offset = start_of_cluster(s, *host_offset);
1082     ret = do_alloc_cluster_offset(bs, guest_offset, &alloc_cluster_offset,
1083                                   &nb_clusters);
1084     if (ret < 0) {
1085         goto fail;
1086     }
1087 
1088     /* Can't extend contiguous allocation */
1089     if (nb_clusters == 0) {
1090         *bytes = 0;
1091         return 0;
1092     }
1093 
1094     /*
1095      * Save info needed for meta data update.
1096      *
1097      * requested_sectors: Number of sectors from the start of the first
1098      * newly allocated cluster to the end of the (possibly shortened
1099      * before) write request.
1100      *
1101      * avail_sectors: Number of sectors from the start of the first
1102      * newly allocated to the end of the last newly allocated cluster.
1103      *
1104      * nb_sectors: The number of sectors from the start of the first
1105      * newly allocated cluster to the end of the area that the write
1106      * request actually writes to (excluding COW at the end)
1107      */
1108     int requested_sectors =
1109         (*bytes + offset_into_cluster(s, guest_offset))
1110         >> BDRV_SECTOR_BITS;
1111     int avail_sectors = nb_clusters
1112                         << (s->cluster_bits - BDRV_SECTOR_BITS);
1113     int alloc_n_start = offset_into_cluster(s, guest_offset)
1114                         >> BDRV_SECTOR_BITS;
1115     int nb_sectors = MIN(requested_sectors, avail_sectors);
1116     QCowL2Meta *old_m = *m;
1117 
1118     *m = g_malloc0(sizeof(**m));
1119 
1120     **m = (QCowL2Meta) {
1121         .next           = old_m,
1122 
1123         .alloc_offset   = alloc_cluster_offset,
1124         .offset         = start_of_cluster(s, guest_offset),
1125         .nb_clusters    = nb_clusters,
1126         .nb_available   = nb_sectors,
1127 
1128         .cow_start = {
1129             .offset     = 0,
1130             .nb_sectors = alloc_n_start,
1131         },
1132         .cow_end = {
1133             .offset     = nb_sectors * BDRV_SECTOR_SIZE,
1134             .nb_sectors = avail_sectors - nb_sectors,
1135         },
1136     };
1137     qemu_co_queue_init(&(*m)->dependent_requests);
1138     QLIST_INSERT_HEAD(&s->cluster_allocs, *m, next_in_flight);
1139 
1140     *host_offset = alloc_cluster_offset + offset_into_cluster(s, guest_offset);
1141     *bytes = MIN(*bytes, (nb_sectors * BDRV_SECTOR_SIZE)
1142                          - offset_into_cluster(s, guest_offset));
1143     assert(*bytes != 0);
1144 
1145     return 1;
1146 
1147 fail:
1148     if (*m && (*m)->nb_clusters > 0) {
1149         QLIST_REMOVE(*m, next_in_flight);
1150     }
1151     return ret;
1152 }
1153 
1154 /*
1155  * alloc_cluster_offset
1156  *
1157  * For a given offset on the virtual disk, find the cluster offset in qcow2
1158  * file. If the offset is not found, allocate a new cluster.
1159  *
1160  * If the cluster was already allocated, m->nb_clusters is set to 0 and
1161  * other fields in m are meaningless.
1162  *
1163  * If the cluster is newly allocated, m->nb_clusters is set to the number of
1164  * contiguous clusters that have been allocated. In this case, the other
1165  * fields of m are valid and contain information about the first allocated
1166  * cluster.
1167  *
1168  * If the request conflicts with another write request in flight, the coroutine
1169  * is queued and will be reentered when the dependency has completed.
1170  *
1171  * Return 0 on success and -errno in error cases
1172  */
1173 int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset,
1174     int n_start, int n_end, int *num, uint64_t *host_offset, QCowL2Meta **m)
1175 {
1176     BDRVQcowState *s = bs->opaque;
1177     uint64_t start, remaining;
1178     uint64_t cluster_offset;
1179     uint64_t cur_bytes;
1180     int ret;
1181 
1182     trace_qcow2_alloc_clusters_offset(qemu_coroutine_self(), offset,
1183                                       n_start, n_end);
1184 
1185     assert(n_start * BDRV_SECTOR_SIZE == offset_into_cluster(s, offset));
1186     offset = start_of_cluster(s, offset);
1187 
1188 again:
1189     start = offset + (n_start << BDRV_SECTOR_BITS);
1190     remaining = (n_end - n_start) << BDRV_SECTOR_BITS;
1191     cluster_offset = 0;
1192     *host_offset = 0;
1193     cur_bytes = 0;
1194     *m = NULL;
1195 
1196     while (true) {
1197 
1198         if (!*host_offset) {
1199             *host_offset = start_of_cluster(s, cluster_offset);
1200         }
1201 
1202         assert(remaining >= cur_bytes);
1203 
1204         start           += cur_bytes;
1205         remaining       -= cur_bytes;
1206         cluster_offset  += cur_bytes;
1207 
1208         if (remaining == 0) {
1209             break;
1210         }
1211 
1212         cur_bytes = remaining;
1213 
1214         /*
1215          * Now start gathering as many contiguous clusters as possible:
1216          *
1217          * 1. Check for overlaps with in-flight allocations
1218          *
1219          *      a) Overlap not in the first cluster -> shorten this request and
1220          *         let the caller handle the rest in its next loop iteration.
1221          *
1222          *      b) Real overlaps of two requests. Yield and restart the search
1223          *         for contiguous clusters (the situation could have changed
1224          *         while we were sleeping)
1225          *
1226          *      c) TODO: Request starts in the same cluster as the in-flight
1227          *         allocation ends. Shorten the COW of the in-fight allocation,
1228          *         set cluster_offset to write to the same cluster and set up
1229          *         the right synchronisation between the in-flight request and
1230          *         the new one.
1231          */
1232         ret = handle_dependencies(bs, start, &cur_bytes, m);
1233         if (ret == -EAGAIN) {
1234             /* Currently handle_dependencies() doesn't yield if we already had
1235              * an allocation. If it did, we would have to clean up the L2Meta
1236              * structs before starting over. */
1237             assert(*m == NULL);
1238             goto again;
1239         } else if (ret < 0) {
1240             return ret;
1241         } else if (cur_bytes == 0) {
1242             break;
1243         } else {
1244             /* handle_dependencies() may have decreased cur_bytes (shortened
1245              * the allocations below) so that the next dependency is processed
1246              * correctly during the next loop iteration. */
1247         }
1248 
1249         /*
1250          * 2. Count contiguous COPIED clusters.
1251          */
1252         ret = handle_copied(bs, start, &cluster_offset, &cur_bytes, m);
1253         if (ret < 0) {
1254             return ret;
1255         } else if (ret) {
1256             continue;
1257         } else if (cur_bytes == 0) {
1258             break;
1259         }
1260 
1261         /*
1262          * 3. If the request still hasn't completed, allocate new clusters,
1263          *    considering any cluster_offset of steps 1c or 2.
1264          */
1265         ret = handle_alloc(bs, start, &cluster_offset, &cur_bytes, m);
1266         if (ret < 0) {
1267             return ret;
1268         } else if (ret) {
1269             continue;
1270         } else {
1271             assert(cur_bytes == 0);
1272             break;
1273         }
1274     }
1275 
1276     *num = (n_end - n_start) - (remaining >> BDRV_SECTOR_BITS);
1277     assert(*num > 0);
1278     assert(*host_offset != 0);
1279 
1280     return 0;
1281 }
1282 
1283 static int decompress_buffer(uint8_t *out_buf, int out_buf_size,
1284                              const uint8_t *buf, int buf_size)
1285 {
1286     z_stream strm1, *strm = &strm1;
1287     int ret, out_len;
1288 
1289     memset(strm, 0, sizeof(*strm));
1290 
1291     strm->next_in = (uint8_t *)buf;
1292     strm->avail_in = buf_size;
1293     strm->next_out = out_buf;
1294     strm->avail_out = out_buf_size;
1295 
1296     ret = inflateInit2(strm, -12);
1297     if (ret != Z_OK)
1298         return -1;
1299     ret = inflate(strm, Z_FINISH);
1300     out_len = strm->next_out - out_buf;
1301     if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) ||
1302         out_len != out_buf_size) {
1303         inflateEnd(strm);
1304         return -1;
1305     }
1306     inflateEnd(strm);
1307     return 0;
1308 }
1309 
1310 int qcow2_decompress_cluster(BlockDriverState *bs, uint64_t cluster_offset)
1311 {
1312     BDRVQcowState *s = bs->opaque;
1313     int ret, csize, nb_csectors, sector_offset;
1314     uint64_t coffset;
1315 
1316     coffset = cluster_offset & s->cluster_offset_mask;
1317     if (s->cluster_cache_offset != coffset) {
1318         nb_csectors = ((cluster_offset >> s->csize_shift) & s->csize_mask) + 1;
1319         sector_offset = coffset & 511;
1320         csize = nb_csectors * 512 - sector_offset;
1321         BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED);
1322         ret = bdrv_read(bs->file, coffset >> 9, s->cluster_data, nb_csectors);
1323         if (ret < 0) {
1324             return ret;
1325         }
1326         if (decompress_buffer(s->cluster_cache, s->cluster_size,
1327                               s->cluster_data + sector_offset, csize) < 0) {
1328             return -EIO;
1329         }
1330         s->cluster_cache_offset = coffset;
1331     }
1332     return 0;
1333 }
1334 
1335 /*
1336  * This discards as many clusters of nb_clusters as possible at once (i.e.
1337  * all clusters in the same L2 table) and returns the number of discarded
1338  * clusters.
1339  */
1340 static int discard_single_l2(BlockDriverState *bs, uint64_t offset,
1341     unsigned int nb_clusters)
1342 {
1343     BDRVQcowState *s = bs->opaque;
1344     uint64_t *l2_table;
1345     int l2_index;
1346     int ret;
1347     int i;
1348 
1349     ret = get_cluster_table(bs, offset, &l2_table, &l2_index);
1350     if (ret < 0) {
1351         return ret;
1352     }
1353 
1354     /* Limit nb_clusters to one L2 table */
1355     nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
1356 
1357     for (i = 0; i < nb_clusters; i++) {
1358         uint64_t old_offset;
1359 
1360         old_offset = be64_to_cpu(l2_table[l2_index + i]);
1361         if ((old_offset & L2E_OFFSET_MASK) == 0) {
1362             continue;
1363         }
1364 
1365         /* First remove L2 entries */
1366         qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
1367         l2_table[l2_index + i] = cpu_to_be64(0);
1368 
1369         /* Then decrease the refcount */
1370         qcow2_free_any_clusters(bs, old_offset, 1, QCOW2_DISCARD_REQUEST);
1371     }
1372 
1373     ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
1374     if (ret < 0) {
1375         return ret;
1376     }
1377 
1378     return nb_clusters;
1379 }
1380 
1381 int qcow2_discard_clusters(BlockDriverState *bs, uint64_t offset,
1382     int nb_sectors)
1383 {
1384     BDRVQcowState *s = bs->opaque;
1385     uint64_t end_offset;
1386     unsigned int nb_clusters;
1387     int ret;
1388 
1389     end_offset = offset + (nb_sectors << BDRV_SECTOR_BITS);
1390 
1391     /* Round start up and end down */
1392     offset = align_offset(offset, s->cluster_size);
1393     end_offset &= ~(s->cluster_size - 1);
1394 
1395     if (offset > end_offset) {
1396         return 0;
1397     }
1398 
1399     nb_clusters = size_to_clusters(s, end_offset - offset);
1400 
1401     s->cache_discards = true;
1402 
1403     /* Each L2 table is handled by its own loop iteration */
1404     while (nb_clusters > 0) {
1405         ret = discard_single_l2(bs, offset, nb_clusters);
1406         if (ret < 0) {
1407             goto fail;
1408         }
1409 
1410         nb_clusters -= ret;
1411         offset += (ret * s->cluster_size);
1412     }
1413 
1414     ret = 0;
1415 fail:
1416     s->cache_discards = false;
1417     qcow2_process_discards(bs, ret);
1418 
1419     return ret;
1420 }
1421 
1422 /*
1423  * This zeroes as many clusters of nb_clusters as possible at once (i.e.
1424  * all clusters in the same L2 table) and returns the number of zeroed
1425  * clusters.
1426  */
1427 static int zero_single_l2(BlockDriverState *bs, uint64_t offset,
1428     unsigned int nb_clusters)
1429 {
1430     BDRVQcowState *s = bs->opaque;
1431     uint64_t *l2_table;
1432     int l2_index;
1433     int ret;
1434     int i;
1435 
1436     ret = get_cluster_table(bs, offset, &l2_table, &l2_index);
1437     if (ret < 0) {
1438         return ret;
1439     }
1440 
1441     /* Limit nb_clusters to one L2 table */
1442     nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
1443 
1444     for (i = 0; i < nb_clusters; i++) {
1445         uint64_t old_offset;
1446 
1447         old_offset = be64_to_cpu(l2_table[l2_index + i]);
1448 
1449         /* Update L2 entries */
1450         qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
1451         if (old_offset & QCOW_OFLAG_COMPRESSED) {
1452             l2_table[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO);
1453             qcow2_free_any_clusters(bs, old_offset, 1, QCOW2_DISCARD_REQUEST);
1454         } else {
1455             l2_table[l2_index + i] |= cpu_to_be64(QCOW_OFLAG_ZERO);
1456         }
1457     }
1458 
1459     ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
1460     if (ret < 0) {
1461         return ret;
1462     }
1463 
1464     return nb_clusters;
1465 }
1466 
1467 int qcow2_zero_clusters(BlockDriverState *bs, uint64_t offset, int nb_sectors)
1468 {
1469     BDRVQcowState *s = bs->opaque;
1470     unsigned int nb_clusters;
1471     int ret;
1472 
1473     /* The zero flag is only supported by version 3 and newer */
1474     if (s->qcow_version < 3) {
1475         return -ENOTSUP;
1476     }
1477 
1478     /* Each L2 table is handled by its own loop iteration */
1479     nb_clusters = size_to_clusters(s, nb_sectors << BDRV_SECTOR_BITS);
1480 
1481     s->cache_discards = true;
1482 
1483     while (nb_clusters > 0) {
1484         ret = zero_single_l2(bs, offset, nb_clusters);
1485         if (ret < 0) {
1486             goto fail;
1487         }
1488 
1489         nb_clusters -= ret;
1490         offset += (ret * s->cluster_size);
1491     }
1492 
1493     ret = 0;
1494 fail:
1495     s->cache_discards = false;
1496     qcow2_process_discards(bs, ret);
1497 
1498     return ret;
1499 }
1500