xref: /openbmc/qemu/block/qcow2-cluster.c (revision dd5614d6)
1 /*
2  * Block driver for the QCOW version 2 format
3  *
4  * Copyright (c) 2004-2006 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 #include <zlib.h>
26 
27 #include "qemu-common.h"
28 #include "block_int.h"
29 #include "block/qcow2.h"
30 #include "trace.h"
31 
32 int qcow2_grow_l1_table(BlockDriverState *bs, int min_size, bool exact_size)
33 {
34     BDRVQcowState *s = bs->opaque;
35     int new_l1_size, new_l1_size2, ret, i;
36     uint64_t *new_l1_table;
37     int64_t new_l1_table_offset;
38     uint8_t data[12];
39 
40     if (min_size <= s->l1_size)
41         return 0;
42 
43     if (exact_size) {
44         new_l1_size = min_size;
45     } else {
46         /* Bump size up to reduce the number of times we have to grow */
47         new_l1_size = s->l1_size;
48         if (new_l1_size == 0) {
49             new_l1_size = 1;
50         }
51         while (min_size > new_l1_size) {
52             new_l1_size = (new_l1_size * 3 + 1) / 2;
53         }
54     }
55 
56 #ifdef DEBUG_ALLOC2
57     fprintf(stderr, "grow l1_table from %d to %d\n", s->l1_size, new_l1_size);
58 #endif
59 
60     new_l1_size2 = sizeof(uint64_t) * new_l1_size;
61     new_l1_table = g_malloc0(align_offset(new_l1_size2, 512));
62     memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t));
63 
64     /* write new table (align to cluster) */
65     BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ALLOC_TABLE);
66     new_l1_table_offset = qcow2_alloc_clusters(bs, new_l1_size2);
67     if (new_l1_table_offset < 0) {
68         g_free(new_l1_table);
69         return new_l1_table_offset;
70     }
71 
72     ret = qcow2_cache_flush(bs, s->refcount_block_cache);
73     if (ret < 0) {
74         goto fail;
75     }
76 
77     BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_WRITE_TABLE);
78     for(i = 0; i < s->l1_size; i++)
79         new_l1_table[i] = cpu_to_be64(new_l1_table[i]);
80     ret = bdrv_pwrite_sync(bs->file, new_l1_table_offset, new_l1_table, new_l1_size2);
81     if (ret < 0)
82         goto fail;
83     for(i = 0; i < s->l1_size; i++)
84         new_l1_table[i] = be64_to_cpu(new_l1_table[i]);
85 
86     /* set new table */
87     BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ACTIVATE_TABLE);
88     cpu_to_be32w((uint32_t*)data, new_l1_size);
89     cpu_to_be64wu((uint64_t*)(data + 4), new_l1_table_offset);
90     ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_size), data,sizeof(data));
91     if (ret < 0) {
92         goto fail;
93     }
94     g_free(s->l1_table);
95     qcow2_free_clusters(bs, s->l1_table_offset, s->l1_size * sizeof(uint64_t));
96     s->l1_table_offset = new_l1_table_offset;
97     s->l1_table = new_l1_table;
98     s->l1_size = new_l1_size;
99     return 0;
100  fail:
101     g_free(new_l1_table);
102     qcow2_free_clusters(bs, new_l1_table_offset, new_l1_size2);
103     return ret;
104 }
105 
106 /*
107  * l2_load
108  *
109  * Loads a L2 table into memory. If the table is in the cache, the cache
110  * is used; otherwise the L2 table is loaded from the image file.
111  *
112  * Returns a pointer to the L2 table on success, or NULL if the read from
113  * the image file failed.
114  */
115 
116 static int l2_load(BlockDriverState *bs, uint64_t l2_offset,
117     uint64_t **l2_table)
118 {
119     BDRVQcowState *s = bs->opaque;
120     int ret;
121 
122     ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset, (void**) l2_table);
123 
124     return ret;
125 }
126 
127 /*
128  * Writes one sector of the L1 table to the disk (can't update single entries
129  * and we really don't want bdrv_pread to perform a read-modify-write)
130  */
131 #define L1_ENTRIES_PER_SECTOR (512 / 8)
132 static int write_l1_entry(BlockDriverState *bs, int l1_index)
133 {
134     BDRVQcowState *s = bs->opaque;
135     uint64_t buf[L1_ENTRIES_PER_SECTOR];
136     int l1_start_index;
137     int i, ret;
138 
139     l1_start_index = l1_index & ~(L1_ENTRIES_PER_SECTOR - 1);
140     for (i = 0; i < L1_ENTRIES_PER_SECTOR; i++) {
141         buf[i] = cpu_to_be64(s->l1_table[l1_start_index + i]);
142     }
143 
144     BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE);
145     ret = bdrv_pwrite_sync(bs->file, s->l1_table_offset + 8 * l1_start_index,
146         buf, sizeof(buf));
147     if (ret < 0) {
148         return ret;
149     }
150 
151     return 0;
152 }
153 
154 /*
155  * l2_allocate
156  *
157  * Allocate a new l2 entry in the file. If l1_index points to an already
158  * used entry in the L2 table (i.e. we are doing a copy on write for the L2
159  * table) copy the contents of the old L2 table into the newly allocated one.
160  * Otherwise the new table is initialized with zeros.
161  *
162  */
163 
164 static int l2_allocate(BlockDriverState *bs, int l1_index, uint64_t **table)
165 {
166     BDRVQcowState *s = bs->opaque;
167     uint64_t old_l2_offset;
168     uint64_t *l2_table;
169     int64_t l2_offset;
170     int ret;
171 
172     old_l2_offset = s->l1_table[l1_index];
173 
174     trace_qcow2_l2_allocate(bs, l1_index);
175 
176     /* allocate a new l2 entry */
177 
178     l2_offset = qcow2_alloc_clusters(bs, s->l2_size * sizeof(uint64_t));
179     if (l2_offset < 0) {
180         return l2_offset;
181     }
182 
183     ret = qcow2_cache_flush(bs, s->refcount_block_cache);
184     if (ret < 0) {
185         goto fail;
186     }
187 
188     /* allocate a new entry in the l2 cache */
189 
190     trace_qcow2_l2_allocate_get_empty(bs, l1_index);
191     ret = qcow2_cache_get_empty(bs, s->l2_table_cache, l2_offset, (void**) table);
192     if (ret < 0) {
193         return ret;
194     }
195 
196     l2_table = *table;
197 
198     if ((old_l2_offset & L1E_OFFSET_MASK) == 0) {
199         /* if there was no old l2 table, clear the new table */
200         memset(l2_table, 0, s->l2_size * sizeof(uint64_t));
201     } else {
202         uint64_t* old_table;
203 
204         /* if there was an old l2 table, read it from the disk */
205         BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_COW_READ);
206         ret = qcow2_cache_get(bs, s->l2_table_cache,
207             old_l2_offset & L1E_OFFSET_MASK,
208             (void**) &old_table);
209         if (ret < 0) {
210             goto fail;
211         }
212 
213         memcpy(l2_table, old_table, s->cluster_size);
214 
215         ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &old_table);
216         if (ret < 0) {
217             goto fail;
218         }
219     }
220 
221     /* write the l2 table to the file */
222     BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_WRITE);
223 
224     trace_qcow2_l2_allocate_write_l2(bs, l1_index);
225     qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
226     ret = qcow2_cache_flush(bs, s->l2_table_cache);
227     if (ret < 0) {
228         goto fail;
229     }
230 
231     /* update the L1 entry */
232     trace_qcow2_l2_allocate_write_l1(bs, l1_index);
233     s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED;
234     ret = write_l1_entry(bs, l1_index);
235     if (ret < 0) {
236         goto fail;
237     }
238 
239     *table = l2_table;
240     trace_qcow2_l2_allocate_done(bs, l1_index, 0);
241     return 0;
242 
243 fail:
244     trace_qcow2_l2_allocate_done(bs, l1_index, ret);
245     qcow2_cache_put(bs, s->l2_table_cache, (void**) table);
246     s->l1_table[l1_index] = old_l2_offset;
247     return ret;
248 }
249 
250 /*
251  * Checks how many clusters in a given L2 table are contiguous in the image
252  * file. As soon as one of the flags in the bitmask stop_flags changes compared
253  * to the first cluster, the search is stopped and the cluster is not counted
254  * as contiguous. (This allows it, for example, to stop at the first compressed
255  * cluster which may require a different handling)
256  */
257 static int count_contiguous_clusters(uint64_t nb_clusters, int cluster_size,
258         uint64_t *l2_table, uint64_t start, uint64_t stop_flags)
259 {
260     int i;
261     uint64_t mask = stop_flags | L2E_OFFSET_MASK;
262     uint64_t offset = be64_to_cpu(l2_table[0]) & mask;
263 
264     if (!offset)
265         return 0;
266 
267     for (i = start; i < start + nb_clusters; i++) {
268         uint64_t l2_entry = be64_to_cpu(l2_table[i]) & mask;
269         if (offset + (uint64_t) i * cluster_size != l2_entry) {
270             break;
271         }
272     }
273 
274 	return (i - start);
275 }
276 
277 static int count_contiguous_free_clusters(uint64_t nb_clusters, uint64_t *l2_table)
278 {
279     int i;
280 
281     for (i = 0; i < nb_clusters; i++) {
282         int type = qcow2_get_cluster_type(be64_to_cpu(l2_table[i]));
283 
284         if (type != QCOW2_CLUSTER_UNALLOCATED) {
285             break;
286         }
287     }
288 
289     return i;
290 }
291 
292 /* The crypt function is compatible with the linux cryptoloop
293    algorithm for < 4 GB images. NOTE: out_buf == in_buf is
294    supported */
295 void qcow2_encrypt_sectors(BDRVQcowState *s, int64_t sector_num,
296                            uint8_t *out_buf, const uint8_t *in_buf,
297                            int nb_sectors, int enc,
298                            const AES_KEY *key)
299 {
300     union {
301         uint64_t ll[2];
302         uint8_t b[16];
303     } ivec;
304     int i;
305 
306     for(i = 0; i < nb_sectors; i++) {
307         ivec.ll[0] = cpu_to_le64(sector_num);
308         ivec.ll[1] = 0;
309         AES_cbc_encrypt(in_buf, out_buf, 512, key,
310                         ivec.b, enc);
311         sector_num++;
312         in_buf += 512;
313         out_buf += 512;
314     }
315 }
316 
317 static int coroutine_fn copy_sectors(BlockDriverState *bs,
318                                      uint64_t start_sect,
319                                      uint64_t cluster_offset,
320                                      int n_start, int n_end)
321 {
322     BDRVQcowState *s = bs->opaque;
323     QEMUIOVector qiov;
324     struct iovec iov;
325     int n, ret;
326 
327     /*
328      * If this is the last cluster and it is only partially used, we must only
329      * copy until the end of the image, or bdrv_check_request will fail for the
330      * bdrv_read/write calls below.
331      */
332     if (start_sect + n_end > bs->total_sectors) {
333         n_end = bs->total_sectors - start_sect;
334     }
335 
336     n = n_end - n_start;
337     if (n <= 0) {
338         return 0;
339     }
340 
341     iov.iov_len = n * BDRV_SECTOR_SIZE;
342     iov.iov_base = qemu_blockalign(bs, iov.iov_len);
343 
344     qemu_iovec_init_external(&qiov, &iov, 1);
345 
346     BLKDBG_EVENT(bs->file, BLKDBG_COW_READ);
347 
348     /* Call .bdrv_co_readv() directly instead of using the public block-layer
349      * interface.  This avoids double I/O throttling and request tracking,
350      * which can lead to deadlock when block layer copy-on-read is enabled.
351      */
352     ret = bs->drv->bdrv_co_readv(bs, start_sect + n_start, n, &qiov);
353     if (ret < 0) {
354         goto out;
355     }
356 
357     if (s->crypt_method) {
358         qcow2_encrypt_sectors(s, start_sect + n_start,
359                         iov.iov_base, iov.iov_base, n, 1,
360                         &s->aes_encrypt_key);
361     }
362 
363     BLKDBG_EVENT(bs->file, BLKDBG_COW_WRITE);
364     ret = bdrv_co_writev(bs->file, (cluster_offset >> 9) + n_start, n, &qiov);
365     if (ret < 0) {
366         goto out;
367     }
368 
369     ret = 0;
370 out:
371     qemu_vfree(iov.iov_base);
372     return ret;
373 }
374 
375 
376 /*
377  * get_cluster_offset
378  *
379  * For a given offset of the disk image, find the cluster offset in
380  * qcow2 file. The offset is stored in *cluster_offset.
381  *
382  * on entry, *num is the number of contiguous sectors we'd like to
383  * access following offset.
384  *
385  * on exit, *num is the number of contiguous sectors we can read.
386  *
387  * Returns the cluster type (QCOW2_CLUSTER_*) on success, -errno in error
388  * cases.
389  */
390 int qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset,
391     int *num, uint64_t *cluster_offset)
392 {
393     BDRVQcowState *s = bs->opaque;
394     unsigned int l1_index, l2_index;
395     uint64_t l2_offset, *l2_table;
396     int l1_bits, c;
397     unsigned int index_in_cluster, nb_clusters;
398     uint64_t nb_available, nb_needed;
399     int ret;
400 
401     index_in_cluster = (offset >> 9) & (s->cluster_sectors - 1);
402     nb_needed = *num + index_in_cluster;
403 
404     l1_bits = s->l2_bits + s->cluster_bits;
405 
406     /* compute how many bytes there are between the offset and
407      * the end of the l1 entry
408      */
409 
410     nb_available = (1ULL << l1_bits) - (offset & ((1ULL << l1_bits) - 1));
411 
412     /* compute the number of available sectors */
413 
414     nb_available = (nb_available >> 9) + index_in_cluster;
415 
416     if (nb_needed > nb_available) {
417         nb_needed = nb_available;
418     }
419 
420     *cluster_offset = 0;
421 
422     /* seek the the l2 offset in the l1 table */
423 
424     l1_index = offset >> l1_bits;
425     if (l1_index >= s->l1_size) {
426         ret = QCOW2_CLUSTER_UNALLOCATED;
427         goto out;
428     }
429 
430     l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK;
431     if (!l2_offset) {
432         ret = QCOW2_CLUSTER_UNALLOCATED;
433         goto out;
434     }
435 
436     /* load the l2 table in memory */
437 
438     ret = l2_load(bs, l2_offset, &l2_table);
439     if (ret < 0) {
440         return ret;
441     }
442 
443     /* find the cluster offset for the given disk offset */
444 
445     l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);
446     *cluster_offset = be64_to_cpu(l2_table[l2_index]);
447     nb_clusters = size_to_clusters(s, nb_needed << 9);
448 
449     ret = qcow2_get_cluster_type(*cluster_offset);
450     switch (ret) {
451     case QCOW2_CLUSTER_COMPRESSED:
452         /* Compressed clusters can only be processed one by one */
453         c = 1;
454         *cluster_offset &= L2E_COMPRESSED_OFFSET_SIZE_MASK;
455         break;
456     case QCOW2_CLUSTER_ZERO:
457         c = count_contiguous_clusters(nb_clusters, s->cluster_size,
458                 &l2_table[l2_index], 0,
459                 QCOW_OFLAG_COMPRESSED | QCOW_OFLAG_ZERO);
460         *cluster_offset = 0;
461         break;
462     case QCOW2_CLUSTER_UNALLOCATED:
463         /* how many empty clusters ? */
464         c = count_contiguous_free_clusters(nb_clusters, &l2_table[l2_index]);
465         *cluster_offset = 0;
466         break;
467     case QCOW2_CLUSTER_NORMAL:
468         /* how many allocated clusters ? */
469         c = count_contiguous_clusters(nb_clusters, s->cluster_size,
470                 &l2_table[l2_index], 0,
471                 QCOW_OFLAG_COMPRESSED | QCOW_OFLAG_ZERO);
472         *cluster_offset &= L2E_OFFSET_MASK;
473         break;
474     }
475 
476     qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
477 
478     nb_available = (c * s->cluster_sectors);
479 
480 out:
481     if (nb_available > nb_needed)
482         nb_available = nb_needed;
483 
484     *num = nb_available - index_in_cluster;
485 
486     return ret;
487 }
488 
489 /*
490  * get_cluster_table
491  *
492  * for a given disk offset, load (and allocate if needed)
493  * the l2 table.
494  *
495  * the l2 table offset in the qcow2 file and the cluster index
496  * in the l2 table are given to the caller.
497  *
498  * Returns 0 on success, -errno in failure case
499  */
500 static int get_cluster_table(BlockDriverState *bs, uint64_t offset,
501                              uint64_t **new_l2_table,
502                              int *new_l2_index)
503 {
504     BDRVQcowState *s = bs->opaque;
505     unsigned int l1_index, l2_index;
506     uint64_t l2_offset;
507     uint64_t *l2_table = NULL;
508     int ret;
509 
510     /* seek the the l2 offset in the l1 table */
511 
512     l1_index = offset >> (s->l2_bits + s->cluster_bits);
513     if (l1_index >= s->l1_size) {
514         ret = qcow2_grow_l1_table(bs, l1_index + 1, false);
515         if (ret < 0) {
516             return ret;
517         }
518     }
519 
520     l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK;
521 
522     /* seek the l2 table of the given l2 offset */
523 
524     if (s->l1_table[l1_index] & QCOW_OFLAG_COPIED) {
525         /* load the l2 table in memory */
526         ret = l2_load(bs, l2_offset, &l2_table);
527         if (ret < 0) {
528             return ret;
529         }
530     } else {
531         /* First allocate a new L2 table (and do COW if needed) */
532         ret = l2_allocate(bs, l1_index, &l2_table);
533         if (ret < 0) {
534             return ret;
535         }
536 
537         /* Then decrease the refcount of the old table */
538         if (l2_offset) {
539             qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t));
540         }
541         l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK;
542     }
543 
544     /* find the cluster offset for the given disk offset */
545 
546     l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);
547 
548     *new_l2_table = l2_table;
549     *new_l2_index = l2_index;
550 
551     return 0;
552 }
553 
554 /*
555  * alloc_compressed_cluster_offset
556  *
557  * For a given offset of the disk image, return cluster offset in
558  * qcow2 file.
559  *
560  * If the offset is not found, allocate a new compressed cluster.
561  *
562  * Return the cluster offset if successful,
563  * Return 0, otherwise.
564  *
565  */
566 
567 uint64_t qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs,
568                                                uint64_t offset,
569                                                int compressed_size)
570 {
571     BDRVQcowState *s = bs->opaque;
572     int l2_index, ret;
573     uint64_t *l2_table;
574     int64_t cluster_offset;
575     int nb_csectors;
576 
577     ret = get_cluster_table(bs, offset, &l2_table, &l2_index);
578     if (ret < 0) {
579         return 0;
580     }
581 
582     /* Compression can't overwrite anything. Fail if the cluster was already
583      * allocated. */
584     cluster_offset = be64_to_cpu(l2_table[l2_index]);
585     if (cluster_offset & L2E_OFFSET_MASK) {
586         qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
587         return 0;
588     }
589 
590     cluster_offset = qcow2_alloc_bytes(bs, compressed_size);
591     if (cluster_offset < 0) {
592         qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
593         return 0;
594     }
595 
596     nb_csectors = ((cluster_offset + compressed_size - 1) >> 9) -
597                   (cluster_offset >> 9);
598 
599     cluster_offset |= QCOW_OFLAG_COMPRESSED |
600                       ((uint64_t)nb_csectors << s->csize_shift);
601 
602     /* update L2 table */
603 
604     /* compressed clusters never have the copied flag */
605 
606     BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED);
607     qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
608     l2_table[l2_index] = cpu_to_be64(cluster_offset);
609     ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
610     if (ret < 0) {
611         return 0;
612     }
613 
614     return cluster_offset;
615 }
616 
617 int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m)
618 {
619     BDRVQcowState *s = bs->opaque;
620     int i, j = 0, l2_index, ret;
621     uint64_t *old_cluster, start_sect, *l2_table;
622     uint64_t cluster_offset = m->alloc_offset;
623     bool cow = false;
624 
625     trace_qcow2_cluster_link_l2(qemu_coroutine_self(), m->nb_clusters);
626 
627     if (m->nb_clusters == 0)
628         return 0;
629 
630     old_cluster = g_malloc(m->nb_clusters * sizeof(uint64_t));
631 
632     /* copy content of unmodified sectors */
633     start_sect = (m->offset & ~(s->cluster_size - 1)) >> 9;
634     if (m->n_start) {
635         cow = true;
636         qemu_co_mutex_unlock(&s->lock);
637         ret = copy_sectors(bs, start_sect, cluster_offset, 0, m->n_start);
638         qemu_co_mutex_lock(&s->lock);
639         if (ret < 0)
640             goto err;
641     }
642 
643     if (m->nb_available & (s->cluster_sectors - 1)) {
644         uint64_t end = m->nb_available & ~(uint64_t)(s->cluster_sectors - 1);
645         cow = true;
646         qemu_co_mutex_unlock(&s->lock);
647         ret = copy_sectors(bs, start_sect + end, cluster_offset + (end << 9),
648                 m->nb_available - end, s->cluster_sectors);
649         qemu_co_mutex_lock(&s->lock);
650         if (ret < 0)
651             goto err;
652     }
653 
654     /*
655      * Update L2 table.
656      *
657      * Before we update the L2 table to actually point to the new cluster, we
658      * need to be sure that the refcounts have been increased and COW was
659      * handled.
660      */
661     if (cow) {
662         qcow2_cache_depends_on_flush(s->l2_table_cache);
663     }
664 
665     qcow2_cache_set_dependency(bs, s->l2_table_cache, s->refcount_block_cache);
666     ret = get_cluster_table(bs, m->offset, &l2_table, &l2_index);
667     if (ret < 0) {
668         goto err;
669     }
670     qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
671 
672     for (i = 0; i < m->nb_clusters; i++) {
673         /* if two concurrent writes happen to the same unallocated cluster
674 	 * each write allocates separate cluster and writes data concurrently.
675 	 * The first one to complete updates l2 table with pointer to its
676 	 * cluster the second one has to do RMW (which is done above by
677 	 * copy_sectors()), update l2 table with its cluster pointer and free
678 	 * old cluster. This is what this loop does */
679         if(l2_table[l2_index + i] != 0)
680             old_cluster[j++] = l2_table[l2_index + i];
681 
682         l2_table[l2_index + i] = cpu_to_be64((cluster_offset +
683                     (i << s->cluster_bits)) | QCOW_OFLAG_COPIED);
684      }
685 
686 
687     ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
688     if (ret < 0) {
689         goto err;
690     }
691 
692     /*
693      * If this was a COW, we need to decrease the refcount of the old cluster.
694      * Also flush bs->file to get the right order for L2 and refcount update.
695      */
696     if (j != 0) {
697         for (i = 0; i < j; i++) {
698             qcow2_free_any_clusters(bs, be64_to_cpu(old_cluster[i]), 1);
699         }
700     }
701 
702     ret = 0;
703 err:
704     g_free(old_cluster);
705     return ret;
706  }
707 
708 /*
709  * Returns the number of contiguous clusters that can be used for an allocating
710  * write, but require COW to be performed (this includes yet unallocated space,
711  * which must copy from the backing file)
712  */
713 static int count_cow_clusters(BDRVQcowState *s, int nb_clusters,
714     uint64_t *l2_table, int l2_index)
715 {
716     int i;
717 
718     for (i = 0; i < nb_clusters; i++) {
719         uint64_t l2_entry = be64_to_cpu(l2_table[l2_index + i]);
720         int cluster_type = qcow2_get_cluster_type(l2_entry);
721 
722         switch(cluster_type) {
723         case QCOW2_CLUSTER_NORMAL:
724             if (l2_entry & QCOW_OFLAG_COPIED) {
725                 goto out;
726             }
727             break;
728         case QCOW2_CLUSTER_UNALLOCATED:
729         case QCOW2_CLUSTER_COMPRESSED:
730         case QCOW2_CLUSTER_ZERO:
731             break;
732         default:
733             abort();
734         }
735     }
736 
737 out:
738     assert(i <= nb_clusters);
739     return i;
740 }
741 
742 /*
743  * Allocates new clusters for the given guest_offset.
744  *
745  * At most *nb_clusters are allocated, and on return *nb_clusters is updated to
746  * contain the number of clusters that have been allocated and are contiguous
747  * in the image file.
748  *
749  * If *host_offset is non-zero, it specifies the offset in the image file at
750  * which the new clusters must start. *nb_clusters can be 0 on return in this
751  * case if the cluster at host_offset is already in use. If *host_offset is
752  * zero, the clusters can be allocated anywhere in the image file.
753  *
754  * *host_offset is updated to contain the offset into the image file at which
755  * the first allocated cluster starts.
756  *
757  * Return 0 on success and -errno in error cases. -EAGAIN means that the
758  * function has been waiting for another request and the allocation must be
759  * restarted, but the whole request should not be failed.
760  */
761 static int do_alloc_cluster_offset(BlockDriverState *bs, uint64_t guest_offset,
762     uint64_t *host_offset, unsigned int *nb_clusters)
763 {
764     BDRVQcowState *s = bs->opaque;
765     QCowL2Meta *old_alloc;
766 
767     trace_qcow2_do_alloc_clusters_offset(qemu_coroutine_self(), guest_offset,
768                                          *host_offset, *nb_clusters);
769 
770     /*
771      * Check if there already is an AIO write request in flight which allocates
772      * the same cluster. In this case we need to wait until the previous
773      * request has completed and updated the L2 table accordingly.
774      */
775     QLIST_FOREACH(old_alloc, &s->cluster_allocs, next_in_flight) {
776 
777         uint64_t start = guest_offset >> s->cluster_bits;
778         uint64_t end = start + *nb_clusters;
779         uint64_t old_start = old_alloc->offset >> s->cluster_bits;
780         uint64_t old_end = old_start + old_alloc->nb_clusters;
781 
782         if (end < old_start || start > old_end) {
783             /* No intersection */
784         } else {
785             if (start < old_start) {
786                 /* Stop at the start of a running allocation */
787                 *nb_clusters = old_start - start;
788             } else {
789                 *nb_clusters = 0;
790             }
791 
792             if (*nb_clusters == 0) {
793                 /* Wait for the dependency to complete. We need to recheck
794                  * the free/allocated clusters when we continue. */
795                 qemu_co_mutex_unlock(&s->lock);
796                 qemu_co_queue_wait(&old_alloc->dependent_requests);
797                 qemu_co_mutex_lock(&s->lock);
798                 return -EAGAIN;
799             }
800         }
801     }
802 
803     if (!*nb_clusters) {
804         abort();
805     }
806 
807     /* Allocate new clusters */
808     trace_qcow2_cluster_alloc_phys(qemu_coroutine_self());
809     if (*host_offset == 0) {
810         int64_t cluster_offset =
811             qcow2_alloc_clusters(bs, *nb_clusters * s->cluster_size);
812         if (cluster_offset < 0) {
813             return cluster_offset;
814         }
815         *host_offset = cluster_offset;
816         return 0;
817     } else {
818         int ret = qcow2_alloc_clusters_at(bs, *host_offset, *nb_clusters);
819         if (ret < 0) {
820             return ret;
821         }
822         *nb_clusters = ret;
823         return 0;
824     }
825 }
826 
827 /*
828  * alloc_cluster_offset
829  *
830  * For a given offset on the virtual disk, find the cluster offset in qcow2
831  * file. If the offset is not found, allocate a new cluster.
832  *
833  * If the cluster was already allocated, m->nb_clusters is set to 0 and
834  * other fields in m are meaningless.
835  *
836  * If the cluster is newly allocated, m->nb_clusters is set to the number of
837  * contiguous clusters that have been allocated. In this case, the other
838  * fields of m are valid and contain information about the first allocated
839  * cluster.
840  *
841  * If the request conflicts with another write request in flight, the coroutine
842  * is queued and will be reentered when the dependency has completed.
843  *
844  * Return 0 on success and -errno in error cases
845  */
846 int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset,
847     int n_start, int n_end, int *num, QCowL2Meta *m)
848 {
849     BDRVQcowState *s = bs->opaque;
850     int l2_index, ret, sectors;
851     uint64_t *l2_table;
852     unsigned int nb_clusters, keep_clusters;
853     uint64_t cluster_offset;
854 
855     trace_qcow2_alloc_clusters_offset(qemu_coroutine_self(), offset,
856                                       n_start, n_end);
857 
858     /* Find L2 entry for the first involved cluster */
859 again:
860     ret = get_cluster_table(bs, offset, &l2_table, &l2_index);
861     if (ret < 0) {
862         return ret;
863     }
864 
865     /*
866      * Calculate the number of clusters to look for. We stop at L2 table
867      * boundaries to keep things simple.
868      */
869     nb_clusters = MIN(size_to_clusters(s, n_end << BDRV_SECTOR_BITS),
870                       s->l2_size - l2_index);
871 
872     cluster_offset = be64_to_cpu(l2_table[l2_index]);
873 
874     /*
875      * Check how many clusters are already allocated and don't need COW, and how
876      * many need a new allocation.
877      */
878     if (qcow2_get_cluster_type(cluster_offset) == QCOW2_CLUSTER_NORMAL
879         && (cluster_offset & QCOW_OFLAG_COPIED))
880     {
881         /* We keep all QCOW_OFLAG_COPIED clusters */
882         keep_clusters =
883             count_contiguous_clusters(nb_clusters, s->cluster_size,
884                                       &l2_table[l2_index], 0,
885                                       QCOW_OFLAG_COPIED | QCOW_OFLAG_ZERO);
886         assert(keep_clusters <= nb_clusters);
887         nb_clusters -= keep_clusters;
888     } else {
889         keep_clusters = 0;
890         cluster_offset = 0;
891     }
892 
893     if (nb_clusters > 0) {
894         /* For the moment, overwrite compressed clusters one by one */
895         uint64_t entry = be64_to_cpu(l2_table[l2_index + keep_clusters]);
896         if (entry & QCOW_OFLAG_COMPRESSED) {
897             nb_clusters = 1;
898         } else {
899             nb_clusters = count_cow_clusters(s, nb_clusters, l2_table,
900                                              l2_index + keep_clusters);
901         }
902     }
903 
904     cluster_offset &= L2E_OFFSET_MASK;
905 
906     /*
907      * The L2 table isn't used any more after this. As long as the cache works
908      * synchronously, it's important to release it before calling
909      * do_alloc_cluster_offset, which may yield if we need to wait for another
910      * request to complete. If we still had the reference, we could use up the
911      * whole cache with sleeping requests.
912      */
913     ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
914     if (ret < 0) {
915         return ret;
916     }
917 
918     /* If there is something left to allocate, do that now */
919     *m = (QCowL2Meta) {
920         .cluster_offset     = cluster_offset,
921         .nb_clusters        = 0,
922     };
923     qemu_co_queue_init(&m->dependent_requests);
924 
925     if (nb_clusters > 0) {
926         uint64_t alloc_offset;
927         uint64_t alloc_cluster_offset;
928         uint64_t keep_bytes = keep_clusters * s->cluster_size;
929 
930         /* Calculate start and size of allocation */
931         alloc_offset = offset + keep_bytes;
932 
933         if (keep_clusters == 0) {
934             alloc_cluster_offset = 0;
935         } else {
936             alloc_cluster_offset = cluster_offset + keep_bytes;
937         }
938 
939         /* Allocate, if necessary at a given offset in the image file */
940         ret = do_alloc_cluster_offset(bs, alloc_offset, &alloc_cluster_offset,
941                                       &nb_clusters);
942         if (ret == -EAGAIN) {
943             goto again;
944         } else if (ret < 0) {
945             goto fail;
946         }
947 
948         /* save info needed for meta data update */
949         if (nb_clusters > 0) {
950             int requested_sectors = n_end - keep_clusters * s->cluster_sectors;
951             int avail_sectors = (keep_clusters + nb_clusters)
952                                 << (s->cluster_bits - BDRV_SECTOR_BITS);
953 
954             *m = (QCowL2Meta) {
955                 .cluster_offset = keep_clusters == 0 ?
956                                   alloc_cluster_offset : cluster_offset,
957                 .alloc_offset   = alloc_cluster_offset,
958                 .offset         = alloc_offset,
959                 .n_start        = keep_clusters == 0 ? n_start : 0,
960                 .nb_clusters    = nb_clusters,
961                 .nb_available   = MIN(requested_sectors, avail_sectors),
962             };
963             qemu_co_queue_init(&m->dependent_requests);
964             QLIST_INSERT_HEAD(&s->cluster_allocs, m, next_in_flight);
965         }
966     }
967 
968     /* Some cleanup work */
969     sectors = (keep_clusters + nb_clusters) << (s->cluster_bits - 9);
970     if (sectors > n_end) {
971         sectors = n_end;
972     }
973 
974     assert(sectors > n_start);
975     *num = sectors - n_start;
976 
977     return 0;
978 
979 fail:
980     if (m->nb_clusters > 0) {
981         QLIST_REMOVE(m, next_in_flight);
982     }
983     return ret;
984 }
985 
986 static int decompress_buffer(uint8_t *out_buf, int out_buf_size,
987                              const uint8_t *buf, int buf_size)
988 {
989     z_stream strm1, *strm = &strm1;
990     int ret, out_len;
991 
992     memset(strm, 0, sizeof(*strm));
993 
994     strm->next_in = (uint8_t *)buf;
995     strm->avail_in = buf_size;
996     strm->next_out = out_buf;
997     strm->avail_out = out_buf_size;
998 
999     ret = inflateInit2(strm, -12);
1000     if (ret != Z_OK)
1001         return -1;
1002     ret = inflate(strm, Z_FINISH);
1003     out_len = strm->next_out - out_buf;
1004     if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) ||
1005         out_len != out_buf_size) {
1006         inflateEnd(strm);
1007         return -1;
1008     }
1009     inflateEnd(strm);
1010     return 0;
1011 }
1012 
1013 int qcow2_decompress_cluster(BlockDriverState *bs, uint64_t cluster_offset)
1014 {
1015     BDRVQcowState *s = bs->opaque;
1016     int ret, csize, nb_csectors, sector_offset;
1017     uint64_t coffset;
1018 
1019     coffset = cluster_offset & s->cluster_offset_mask;
1020     if (s->cluster_cache_offset != coffset) {
1021         nb_csectors = ((cluster_offset >> s->csize_shift) & s->csize_mask) + 1;
1022         sector_offset = coffset & 511;
1023         csize = nb_csectors * 512 - sector_offset;
1024         BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED);
1025         ret = bdrv_read(bs->file, coffset >> 9, s->cluster_data, nb_csectors);
1026         if (ret < 0) {
1027             return ret;
1028         }
1029         if (decompress_buffer(s->cluster_cache, s->cluster_size,
1030                               s->cluster_data + sector_offset, csize) < 0) {
1031             return -EIO;
1032         }
1033         s->cluster_cache_offset = coffset;
1034     }
1035     return 0;
1036 }
1037 
1038 /*
1039  * This discards as many clusters of nb_clusters as possible at once (i.e.
1040  * all clusters in the same L2 table) and returns the number of discarded
1041  * clusters.
1042  */
1043 static int discard_single_l2(BlockDriverState *bs, uint64_t offset,
1044     unsigned int nb_clusters)
1045 {
1046     BDRVQcowState *s = bs->opaque;
1047     uint64_t *l2_table;
1048     int l2_index;
1049     int ret;
1050     int i;
1051 
1052     ret = get_cluster_table(bs, offset, &l2_table, &l2_index);
1053     if (ret < 0) {
1054         return ret;
1055     }
1056 
1057     /* Limit nb_clusters to one L2 table */
1058     nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
1059 
1060     for (i = 0; i < nb_clusters; i++) {
1061         uint64_t old_offset;
1062 
1063         old_offset = be64_to_cpu(l2_table[l2_index + i]);
1064         if ((old_offset & L2E_OFFSET_MASK) == 0) {
1065             continue;
1066         }
1067 
1068         /* First remove L2 entries */
1069         qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
1070         l2_table[l2_index + i] = cpu_to_be64(0);
1071 
1072         /* Then decrease the refcount */
1073         qcow2_free_any_clusters(bs, old_offset, 1);
1074     }
1075 
1076     ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
1077     if (ret < 0) {
1078         return ret;
1079     }
1080 
1081     return nb_clusters;
1082 }
1083 
1084 int qcow2_discard_clusters(BlockDriverState *bs, uint64_t offset,
1085     int nb_sectors)
1086 {
1087     BDRVQcowState *s = bs->opaque;
1088     uint64_t end_offset;
1089     unsigned int nb_clusters;
1090     int ret;
1091 
1092     end_offset = offset + (nb_sectors << BDRV_SECTOR_BITS);
1093 
1094     /* Round start up and end down */
1095     offset = align_offset(offset, s->cluster_size);
1096     end_offset &= ~(s->cluster_size - 1);
1097 
1098     if (offset > end_offset) {
1099         return 0;
1100     }
1101 
1102     nb_clusters = size_to_clusters(s, end_offset - offset);
1103 
1104     /* Each L2 table is handled by its own loop iteration */
1105     while (nb_clusters > 0) {
1106         ret = discard_single_l2(bs, offset, nb_clusters);
1107         if (ret < 0) {
1108             return ret;
1109         }
1110 
1111         nb_clusters -= ret;
1112         offset += (ret * s->cluster_size);
1113     }
1114 
1115     return 0;
1116 }
1117 
1118 /*
1119  * This zeroes as many clusters of nb_clusters as possible at once (i.e.
1120  * all clusters in the same L2 table) and returns the number of zeroed
1121  * clusters.
1122  */
1123 static int zero_single_l2(BlockDriverState *bs, uint64_t offset,
1124     unsigned int nb_clusters)
1125 {
1126     BDRVQcowState *s = bs->opaque;
1127     uint64_t *l2_table;
1128     int l2_index;
1129     int ret;
1130     int i;
1131 
1132     ret = get_cluster_table(bs, offset, &l2_table, &l2_index);
1133     if (ret < 0) {
1134         return ret;
1135     }
1136 
1137     /* Limit nb_clusters to one L2 table */
1138     nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
1139 
1140     for (i = 0; i < nb_clusters; i++) {
1141         uint64_t old_offset;
1142 
1143         old_offset = be64_to_cpu(l2_table[l2_index + i]);
1144 
1145         /* Update L2 entries */
1146         qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
1147         if (old_offset & QCOW_OFLAG_COMPRESSED) {
1148             l2_table[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO);
1149             qcow2_free_any_clusters(bs, old_offset, 1);
1150         } else {
1151             l2_table[l2_index + i] |= cpu_to_be64(QCOW_OFLAG_ZERO);
1152         }
1153     }
1154 
1155     ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
1156     if (ret < 0) {
1157         return ret;
1158     }
1159 
1160     return nb_clusters;
1161 }
1162 
1163 int qcow2_zero_clusters(BlockDriverState *bs, uint64_t offset, int nb_sectors)
1164 {
1165     BDRVQcowState *s = bs->opaque;
1166     unsigned int nb_clusters;
1167     int ret;
1168 
1169     /* The zero flag is only supported by version 3 and newer */
1170     if (s->qcow_version < 3) {
1171         return -ENOTSUP;
1172     }
1173 
1174     /* Each L2 table is handled by its own loop iteration */
1175     nb_clusters = size_to_clusters(s, nb_sectors << BDRV_SECTOR_BITS);
1176 
1177     while (nb_clusters > 0) {
1178         ret = zero_single_l2(bs, offset, nb_clusters);
1179         if (ret < 0) {
1180             return ret;
1181         }
1182 
1183         nb_clusters -= ret;
1184         offset += (ret * s->cluster_size);
1185     }
1186 
1187     return 0;
1188 }
1189