xref: /openbmc/qemu/block/parallels.c (revision a37eaa53)
1 /*
2  * Block driver for Parallels disk image format
3  *
4  * Copyright (c) 2007 Alex Beregszaszi
5  * Copyright (c) 2015 Denis V. Lunev <den@openvz.org>
6  *
7  * This code was originally based on comparing different disk images created
8  * by Parallels. Currently it is based on opened OpenVZ sources
9  * available at
10  *     http://git.openvz.org/?p=ploop;a=summary
11  *
12  * Permission is hereby granted, free of charge, to any person obtaining a copy
13  * of this software and associated documentation files (the "Software"), to deal
14  * in the Software without restriction, including without limitation the rights
15  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
16  * copies of the Software, and to permit persons to whom the Software is
17  * furnished to do so, subject to the following conditions:
18  *
19  * The above copyright notice and this permission notice shall be included in
20  * all copies or substantial portions of the Software.
21  *
22  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
25  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
28  * THE SOFTWARE.
29  */
30 #include "qemu/osdep.h"
31 #include "qapi/error.h"
32 #include "qemu-common.h"
33 #include "block/block_int.h"
34 #include "sysemu/block-backend.h"
35 #include "qemu/module.h"
36 #include "qemu/bswap.h"
37 #include "qemu/bitmap.h"
38 #include "migration/blocker.h"
39 #include "parallels.h"
40 
41 /**************************************************************/
42 
43 #define HEADER_MAGIC "WithoutFreeSpace"
44 #define HEADER_MAGIC2 "WithouFreSpacExt"
45 #define HEADER_VERSION 2
46 #define HEADER_INUSE_MAGIC  (0x746F6E59)
47 #define MAX_PARALLELS_IMAGE_FACTOR (1ull << 32)
48 
49 static QEnumLookup prealloc_mode_lookup = {
50     .array = (const char *const[]) {
51         "falloc",
52         "truncate",
53     },
54     .size = PRL_PREALLOC_MODE__MAX
55 };
56 
57 #define PARALLELS_OPT_PREALLOC_MODE     "prealloc-mode"
58 #define PARALLELS_OPT_PREALLOC_SIZE     "prealloc-size"
59 
60 static QemuOptsList parallels_runtime_opts = {
61     .name = "parallels",
62     .head = QTAILQ_HEAD_INITIALIZER(parallels_runtime_opts.head),
63     .desc = {
64         {
65             .name = PARALLELS_OPT_PREALLOC_SIZE,
66             .type = QEMU_OPT_SIZE,
67             .help = "Preallocation size on image expansion",
68             .def_value_str = "128M",
69         },
70         {
71             .name = PARALLELS_OPT_PREALLOC_MODE,
72             .type = QEMU_OPT_STRING,
73             .help = "Preallocation mode on image expansion "
74                     "(allowed values: falloc, truncate)",
75             .def_value_str = "falloc",
76         },
77         { /* end of list */ },
78     },
79 };
80 
81 
82 static int64_t bat2sect(BDRVParallelsState *s, uint32_t idx)
83 {
84     return (uint64_t)le32_to_cpu(s->bat_bitmap[idx]) * s->off_multiplier;
85 }
86 
87 static uint32_t bat_entry_off(uint32_t idx)
88 {
89     return sizeof(ParallelsHeader) + sizeof(uint32_t) * idx;
90 }
91 
92 static int64_t seek_to_sector(BDRVParallelsState *s, int64_t sector_num)
93 {
94     uint32_t index, offset;
95 
96     index = sector_num / s->tracks;
97     offset = sector_num % s->tracks;
98 
99     /* not allocated */
100     if ((index >= s->bat_size) || (s->bat_bitmap[index] == 0)) {
101         return -1;
102     }
103     return bat2sect(s, index) + offset;
104 }
105 
106 static int cluster_remainder(BDRVParallelsState *s, int64_t sector_num,
107         int nb_sectors)
108 {
109     int ret = s->tracks - sector_num % s->tracks;
110     return MIN(nb_sectors, ret);
111 }
112 
113 static int64_t block_status(BDRVParallelsState *s, int64_t sector_num,
114                             int nb_sectors, int *pnum)
115 {
116     int64_t start_off = -2, prev_end_off = -2;
117 
118     *pnum = 0;
119     while (nb_sectors > 0 || start_off == -2) {
120         int64_t offset = seek_to_sector(s, sector_num);
121         int to_end;
122 
123         if (start_off == -2) {
124             start_off = offset;
125             prev_end_off = offset;
126         } else if (offset != prev_end_off) {
127             break;
128         }
129 
130         to_end = cluster_remainder(s, sector_num, nb_sectors);
131         nb_sectors -= to_end;
132         sector_num += to_end;
133         *pnum += to_end;
134 
135         if (offset > 0) {
136             prev_end_off += to_end;
137         }
138     }
139     return start_off;
140 }
141 
142 static int64_t allocate_clusters(BlockDriverState *bs, int64_t sector_num,
143                                  int nb_sectors, int *pnum)
144 {
145     int ret;
146     BDRVParallelsState *s = bs->opaque;
147     int64_t pos, space, idx, to_allocate, i, len;
148 
149     pos = block_status(s, sector_num, nb_sectors, pnum);
150     if (pos > 0) {
151         return pos;
152     }
153 
154     idx = sector_num / s->tracks;
155     to_allocate = DIV_ROUND_UP(sector_num + *pnum, s->tracks) - idx;
156 
157     /* This function is called only by parallels_co_writev(), which will never
158      * pass a sector_num at or beyond the end of the image (because the block
159      * layer never passes such a sector_num to that function). Therefore, idx
160      * is always below s->bat_size.
161      * block_status() will limit *pnum so that sector_num + *pnum will not
162      * exceed the image end. Therefore, idx + to_allocate cannot exceed
163      * s->bat_size.
164      * Note that s->bat_size is an unsigned int, therefore idx + to_allocate
165      * will always fit into a uint32_t. */
166     assert(idx < s->bat_size && idx + to_allocate <= s->bat_size);
167 
168     space = to_allocate * s->tracks;
169     len = bdrv_getlength(bs->file->bs);
170     if (len < 0) {
171         return len;
172     }
173     if (s->data_end + space > (len >> BDRV_SECTOR_BITS)) {
174         space += s->prealloc_size;
175         if (s->prealloc_mode == PRL_PREALLOC_MODE_FALLOCATE) {
176             ret = bdrv_pwrite_zeroes(bs->file,
177                                      s->data_end << BDRV_SECTOR_BITS,
178                                      space << BDRV_SECTOR_BITS, 0);
179         } else {
180             ret = bdrv_truncate(bs->file,
181                                 (s->data_end + space) << BDRV_SECTOR_BITS,
182                                 PREALLOC_MODE_OFF, NULL);
183         }
184         if (ret < 0) {
185             return ret;
186         }
187     }
188 
189     /* Try to read from backing to fill empty clusters
190      * FIXME: 1. previous write_zeroes may be redundant
191      *        2. most of data we read from backing will be rewritten by
192      *           parallels_co_writev. On aligned-to-cluster write we do not need
193      *           this read at all.
194      *        3. it would be good to combine write of data from backing and new
195      *           data into one write call */
196     if (bs->backing) {
197         int64_t nb_cow_sectors = to_allocate * s->tracks;
198         int64_t nb_cow_bytes = nb_cow_sectors << BDRV_SECTOR_BITS;
199         QEMUIOVector qiov;
200         struct iovec iov = {
201             .iov_len = nb_cow_bytes,
202             .iov_base = qemu_blockalign(bs, nb_cow_bytes)
203         };
204         qemu_iovec_init_external(&qiov, &iov, 1);
205 
206         ret = bdrv_co_readv(bs->backing, idx * s->tracks, nb_cow_sectors,
207                             &qiov);
208         if (ret < 0) {
209             qemu_vfree(iov.iov_base);
210             return ret;
211         }
212 
213         ret = bdrv_co_writev(bs->file, s->data_end, nb_cow_sectors, &qiov);
214         qemu_vfree(iov.iov_base);
215         if (ret < 0) {
216             return ret;
217         }
218     }
219 
220     for (i = 0; i < to_allocate; i++) {
221         s->bat_bitmap[idx + i] = cpu_to_le32(s->data_end / s->off_multiplier);
222         s->data_end += s->tracks;
223         bitmap_set(s->bat_dirty_bmap,
224                    bat_entry_off(idx + i) / s->bat_dirty_block, 1);
225     }
226 
227     return bat2sect(s, idx) + sector_num % s->tracks;
228 }
229 
230 
231 static coroutine_fn int parallels_co_flush_to_os(BlockDriverState *bs)
232 {
233     BDRVParallelsState *s = bs->opaque;
234     unsigned long size = DIV_ROUND_UP(s->header_size, s->bat_dirty_block);
235     unsigned long bit;
236 
237     qemu_co_mutex_lock(&s->lock);
238 
239     bit = find_first_bit(s->bat_dirty_bmap, size);
240     while (bit < size) {
241         uint32_t off = bit * s->bat_dirty_block;
242         uint32_t to_write = s->bat_dirty_block;
243         int ret;
244 
245         if (off + to_write > s->header_size) {
246             to_write = s->header_size - off;
247         }
248         ret = bdrv_pwrite(bs->file, off, (uint8_t *)s->header + off,
249                           to_write);
250         if (ret < 0) {
251             qemu_co_mutex_unlock(&s->lock);
252             return ret;
253         }
254         bit = find_next_bit(s->bat_dirty_bmap, size, bit + 1);
255     }
256     bitmap_zero(s->bat_dirty_bmap, size);
257 
258     qemu_co_mutex_unlock(&s->lock);
259     return 0;
260 }
261 
262 
263 static int64_t coroutine_fn parallels_co_get_block_status(BlockDriverState *bs,
264         int64_t sector_num, int nb_sectors, int *pnum, BlockDriverState **file)
265 {
266     BDRVParallelsState *s = bs->opaque;
267     int64_t offset;
268 
269     qemu_co_mutex_lock(&s->lock);
270     offset = block_status(s, sector_num, nb_sectors, pnum);
271     qemu_co_mutex_unlock(&s->lock);
272 
273     if (offset < 0) {
274         return 0;
275     }
276 
277     *file = bs->file->bs;
278     return (offset << BDRV_SECTOR_BITS) |
279         BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID;
280 }
281 
282 static coroutine_fn int parallels_co_writev(BlockDriverState *bs,
283         int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
284 {
285     BDRVParallelsState *s = bs->opaque;
286     uint64_t bytes_done = 0;
287     QEMUIOVector hd_qiov;
288     int ret = 0;
289 
290     qemu_iovec_init(&hd_qiov, qiov->niov);
291 
292     while (nb_sectors > 0) {
293         int64_t position;
294         int n, nbytes;
295 
296         qemu_co_mutex_lock(&s->lock);
297         position = allocate_clusters(bs, sector_num, nb_sectors, &n);
298         qemu_co_mutex_unlock(&s->lock);
299         if (position < 0) {
300             ret = (int)position;
301             break;
302         }
303 
304         nbytes = n << BDRV_SECTOR_BITS;
305 
306         qemu_iovec_reset(&hd_qiov);
307         qemu_iovec_concat(&hd_qiov, qiov, bytes_done, nbytes);
308 
309         ret = bdrv_co_writev(bs->file, position, n, &hd_qiov);
310         if (ret < 0) {
311             break;
312         }
313 
314         nb_sectors -= n;
315         sector_num += n;
316         bytes_done += nbytes;
317     }
318 
319     qemu_iovec_destroy(&hd_qiov);
320     return ret;
321 }
322 
323 static coroutine_fn int parallels_co_readv(BlockDriverState *bs,
324         int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
325 {
326     BDRVParallelsState *s = bs->opaque;
327     uint64_t bytes_done = 0;
328     QEMUIOVector hd_qiov;
329     int ret = 0;
330 
331     qemu_iovec_init(&hd_qiov, qiov->niov);
332 
333     while (nb_sectors > 0) {
334         int64_t position;
335         int n, nbytes;
336 
337         qemu_co_mutex_lock(&s->lock);
338         position = block_status(s, sector_num, nb_sectors, &n);
339         qemu_co_mutex_unlock(&s->lock);
340 
341         nbytes = n << BDRV_SECTOR_BITS;
342 
343         qemu_iovec_reset(&hd_qiov);
344         qemu_iovec_concat(&hd_qiov, qiov, bytes_done, nbytes);
345 
346         if (position < 0) {
347             if (bs->backing) {
348                 ret = bdrv_co_readv(bs->backing, sector_num, n, &hd_qiov);
349                 if (ret < 0) {
350                     break;
351                 }
352             } else {
353                 qemu_iovec_memset(&hd_qiov, 0, 0, nbytes);
354             }
355         } else {
356             ret = bdrv_co_readv(bs->file, position, n, &hd_qiov);
357             if (ret < 0) {
358                 break;
359             }
360         }
361 
362         nb_sectors -= n;
363         sector_num += n;
364         bytes_done += nbytes;
365     }
366 
367     qemu_iovec_destroy(&hd_qiov);
368     return ret;
369 }
370 
371 
372 static int parallels_check(BlockDriverState *bs, BdrvCheckResult *res,
373                            BdrvCheckMode fix)
374 {
375     BDRVParallelsState *s = bs->opaque;
376     int64_t size, prev_off, high_off;
377     int ret;
378     uint32_t i;
379     bool flush_bat = false;
380     int cluster_size = s->tracks << BDRV_SECTOR_BITS;
381 
382     size = bdrv_getlength(bs->file->bs);
383     if (size < 0) {
384         res->check_errors++;
385         return size;
386     }
387 
388     if (s->header_unclean) {
389         fprintf(stderr, "%s image was not closed correctly\n",
390                 fix & BDRV_FIX_ERRORS ? "Repairing" : "ERROR");
391         res->corruptions++;
392         if (fix & BDRV_FIX_ERRORS) {
393             /* parallels_close will do the job right */
394             res->corruptions_fixed++;
395             s->header_unclean = false;
396         }
397     }
398 
399     res->bfi.total_clusters = s->bat_size;
400     res->bfi.compressed_clusters = 0; /* compression is not supported */
401 
402     high_off = 0;
403     prev_off = 0;
404     for (i = 0; i < s->bat_size; i++) {
405         int64_t off = bat2sect(s, i) << BDRV_SECTOR_BITS;
406         if (off == 0) {
407             prev_off = 0;
408             continue;
409         }
410 
411         /* cluster outside the image */
412         if (off > size) {
413             fprintf(stderr, "%s cluster %u is outside image\n",
414                     fix & BDRV_FIX_ERRORS ? "Repairing" : "ERROR", i);
415             res->corruptions++;
416             if (fix & BDRV_FIX_ERRORS) {
417                 prev_off = 0;
418                 s->bat_bitmap[i] = 0;
419                 res->corruptions_fixed++;
420                 flush_bat = true;
421                 continue;
422             }
423         }
424 
425         res->bfi.allocated_clusters++;
426         if (off > high_off) {
427             high_off = off;
428         }
429 
430         if (prev_off != 0 && (prev_off + cluster_size) != off) {
431             res->bfi.fragmented_clusters++;
432         }
433         prev_off = off;
434     }
435 
436     if (flush_bat) {
437         ret = bdrv_pwrite_sync(bs->file, 0, s->header, s->header_size);
438         if (ret < 0) {
439             res->check_errors++;
440             return ret;
441         }
442     }
443 
444     res->image_end_offset = high_off + cluster_size;
445     if (size > res->image_end_offset) {
446         int64_t count;
447         count = DIV_ROUND_UP(size - res->image_end_offset, cluster_size);
448         fprintf(stderr, "%s space leaked at the end of the image %" PRId64 "\n",
449                 fix & BDRV_FIX_LEAKS ? "Repairing" : "ERROR",
450                 size - res->image_end_offset);
451         res->leaks += count;
452         if (fix & BDRV_FIX_LEAKS) {
453             Error *local_err = NULL;
454             ret = bdrv_truncate(bs->file, res->image_end_offset,
455                                 PREALLOC_MODE_OFF, &local_err);
456             if (ret < 0) {
457                 error_report_err(local_err);
458                 res->check_errors++;
459                 return ret;
460             }
461             res->leaks_fixed += count;
462         }
463     }
464 
465     return 0;
466 }
467 
468 
469 static int parallels_create(const char *filename, QemuOpts *opts, Error **errp)
470 {
471     int64_t total_size, cl_size;
472     uint8_t tmp[BDRV_SECTOR_SIZE];
473     Error *local_err = NULL;
474     BlockBackend *file;
475     uint32_t bat_entries, bat_sectors;
476     ParallelsHeader header;
477     int ret;
478 
479     total_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0),
480                           BDRV_SECTOR_SIZE);
481     cl_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_CLUSTER_SIZE,
482                           DEFAULT_CLUSTER_SIZE), BDRV_SECTOR_SIZE);
483     if (total_size >= MAX_PARALLELS_IMAGE_FACTOR * cl_size) {
484         error_propagate(errp, local_err);
485         return -E2BIG;
486     }
487 
488     ret = bdrv_create_file(filename, opts, &local_err);
489     if (ret < 0) {
490         error_propagate(errp, local_err);
491         return ret;
492     }
493 
494     file = blk_new_open(filename, NULL, NULL,
495                         BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL,
496                         &local_err);
497     if (file == NULL) {
498         error_propagate(errp, local_err);
499         return -EIO;
500     }
501 
502     blk_set_allow_write_beyond_eof(file, true);
503 
504     ret = blk_truncate(file, 0, PREALLOC_MODE_OFF, errp);
505     if (ret < 0) {
506         goto exit;
507     }
508 
509     bat_entries = DIV_ROUND_UP(total_size, cl_size);
510     bat_sectors = DIV_ROUND_UP(bat_entry_off(bat_entries), cl_size);
511     bat_sectors = (bat_sectors *  cl_size) >> BDRV_SECTOR_BITS;
512 
513     memset(&header, 0, sizeof(header));
514     memcpy(header.magic, HEADER_MAGIC2, sizeof(header.magic));
515     header.version = cpu_to_le32(HEADER_VERSION);
516     /* don't care much about geometry, it is not used on image level */
517     header.heads = cpu_to_le32(HEADS_NUMBER);
518     header.cylinders = cpu_to_le32(total_size / BDRV_SECTOR_SIZE
519                                    / HEADS_NUMBER / SEC_IN_CYL);
520     header.tracks = cpu_to_le32(cl_size >> BDRV_SECTOR_BITS);
521     header.bat_entries = cpu_to_le32(bat_entries);
522     header.nb_sectors = cpu_to_le64(DIV_ROUND_UP(total_size, BDRV_SECTOR_SIZE));
523     header.data_off = cpu_to_le32(bat_sectors);
524 
525     /* write all the data */
526     memset(tmp, 0, sizeof(tmp));
527     memcpy(tmp, &header, sizeof(header));
528 
529     ret = blk_pwrite(file, 0, tmp, BDRV_SECTOR_SIZE, 0);
530     if (ret < 0) {
531         goto exit;
532     }
533     ret = blk_pwrite_zeroes(file, BDRV_SECTOR_SIZE,
534                             (bat_sectors - 1) << BDRV_SECTOR_BITS, 0);
535     if (ret < 0) {
536         goto exit;
537     }
538     ret = 0;
539 
540 done:
541     blk_unref(file);
542     return ret;
543 
544 exit:
545     error_setg_errno(errp, -ret, "Failed to create Parallels image");
546     goto done;
547 }
548 
549 
550 static int parallels_probe(const uint8_t *buf, int buf_size,
551                            const char *filename)
552 {
553     const ParallelsHeader *ph = (const void *)buf;
554 
555     if (buf_size < sizeof(ParallelsHeader)) {
556         return 0;
557     }
558 
559     if ((!memcmp(ph->magic, HEADER_MAGIC, 16) ||
560            !memcmp(ph->magic, HEADER_MAGIC2, 16)) &&
561            (le32_to_cpu(ph->version) == HEADER_VERSION)) {
562         return 100;
563     }
564 
565     return 0;
566 }
567 
568 static int parallels_update_header(BlockDriverState *bs)
569 {
570     BDRVParallelsState *s = bs->opaque;
571     unsigned size = MAX(bdrv_opt_mem_align(bs->file->bs),
572                         sizeof(ParallelsHeader));
573 
574     if (size > s->header_size) {
575         size = s->header_size;
576     }
577     return bdrv_pwrite_sync(bs->file, 0, s->header, size);
578 }
579 
580 static int parallels_open(BlockDriverState *bs, QDict *options, int flags,
581                           Error **errp)
582 {
583     BDRVParallelsState *s = bs->opaque;
584     ParallelsHeader ph;
585     int ret, size, i;
586     QemuOpts *opts = NULL;
587     Error *local_err = NULL;
588     char *buf;
589 
590     bs->file = bdrv_open_child(NULL, options, "file", bs, &child_file,
591                                false, errp);
592     if (!bs->file) {
593         return -EINVAL;
594     }
595 
596     ret = bdrv_pread(bs->file, 0, &ph, sizeof(ph));
597     if (ret < 0) {
598         goto fail;
599     }
600 
601     bs->total_sectors = le64_to_cpu(ph.nb_sectors);
602 
603     if (le32_to_cpu(ph.version) != HEADER_VERSION) {
604         goto fail_format;
605     }
606     if (!memcmp(ph.magic, HEADER_MAGIC, 16)) {
607         s->off_multiplier = 1;
608         bs->total_sectors = 0xffffffff & bs->total_sectors;
609     } else if (!memcmp(ph.magic, HEADER_MAGIC2, 16)) {
610         s->off_multiplier = le32_to_cpu(ph.tracks);
611     } else {
612         goto fail_format;
613     }
614 
615     s->tracks = le32_to_cpu(ph.tracks);
616     if (s->tracks == 0) {
617         error_setg(errp, "Invalid image: Zero sectors per track");
618         ret = -EINVAL;
619         goto fail;
620     }
621     if (s->tracks > INT32_MAX/513) {
622         error_setg(errp, "Invalid image: Too big cluster");
623         ret = -EFBIG;
624         goto fail;
625     }
626 
627     s->bat_size = le32_to_cpu(ph.bat_entries);
628     if (s->bat_size > INT_MAX / sizeof(uint32_t)) {
629         error_setg(errp, "Catalog too large");
630         ret = -EFBIG;
631         goto fail;
632     }
633 
634     size = bat_entry_off(s->bat_size);
635     s->header_size = ROUND_UP(size, bdrv_opt_mem_align(bs->file->bs));
636     s->header = qemu_try_blockalign(bs->file->bs, s->header_size);
637     if (s->header == NULL) {
638         ret = -ENOMEM;
639         goto fail;
640     }
641     s->data_end = le32_to_cpu(ph.data_off);
642     if (s->data_end == 0) {
643         s->data_end = ROUND_UP(bat_entry_off(s->bat_size), BDRV_SECTOR_SIZE);
644     }
645     if (s->data_end < s->header_size) {
646         /* there is not enough unused space to fit to block align between BAT
647            and actual data. We can't avoid read-modify-write... */
648         s->header_size = size;
649     }
650 
651     ret = bdrv_pread(bs->file, 0, s->header, s->header_size);
652     if (ret < 0) {
653         goto fail;
654     }
655     s->bat_bitmap = (uint32_t *)(s->header + 1);
656 
657     for (i = 0; i < s->bat_size; i++) {
658         int64_t off = bat2sect(s, i);
659         if (off >= s->data_end) {
660             s->data_end = off + s->tracks;
661         }
662     }
663 
664     if (le32_to_cpu(ph.inuse) == HEADER_INUSE_MAGIC) {
665         /* Image was not closed correctly. The check is mandatory */
666         s->header_unclean = true;
667         if ((flags & BDRV_O_RDWR) && !(flags & BDRV_O_CHECK)) {
668             error_setg(errp, "parallels: Image was not closed correctly; "
669                        "cannot be opened read/write");
670             ret = -EACCES;
671             goto fail;
672         }
673     }
674 
675     opts = qemu_opts_create(&parallels_runtime_opts, NULL, 0, &local_err);
676     if (local_err != NULL) {
677         goto fail_options;
678     }
679 
680     qemu_opts_absorb_qdict(opts, options, &local_err);
681     if (local_err != NULL) {
682         goto fail_options;
683     }
684 
685     s->prealloc_size =
686         qemu_opt_get_size_del(opts, PARALLELS_OPT_PREALLOC_SIZE, 0);
687     s->prealloc_size = MAX(s->tracks, s->prealloc_size >> BDRV_SECTOR_BITS);
688     buf = qemu_opt_get_del(opts, PARALLELS_OPT_PREALLOC_MODE);
689     s->prealloc_mode = qapi_enum_parse(&prealloc_mode_lookup, buf,
690                                        PRL_PREALLOC_MODE_FALLOCATE,
691                                        &local_err);
692     g_free(buf);
693     if (local_err != NULL) {
694         goto fail_options;
695     }
696 
697     if (!bdrv_has_zero_init(bs->file->bs)) {
698         s->prealloc_mode = PRL_PREALLOC_MODE_FALLOCATE;
699     }
700 
701     if ((flags & BDRV_O_RDWR) && !(flags & BDRV_O_INACTIVE)) {
702         s->header->inuse = cpu_to_le32(HEADER_INUSE_MAGIC);
703         ret = parallels_update_header(bs);
704         if (ret < 0) {
705             goto fail;
706         }
707     }
708 
709     s->bat_dirty_block = 4 * getpagesize();
710     s->bat_dirty_bmap =
711         bitmap_new(DIV_ROUND_UP(s->header_size, s->bat_dirty_block));
712 
713     /* Disable migration until bdrv_invalidate_cache method is added */
714     error_setg(&s->migration_blocker, "The Parallels format used by node '%s' "
715                "does not support live migration",
716                bdrv_get_device_or_node_name(bs));
717     ret = migrate_add_blocker(s->migration_blocker, &local_err);
718     if (local_err) {
719         error_propagate(errp, local_err);
720         error_free(s->migration_blocker);
721         goto fail;
722     }
723     qemu_co_mutex_init(&s->lock);
724     return 0;
725 
726 fail_format:
727     error_setg(errp, "Image not in Parallels format");
728     ret = -EINVAL;
729 fail:
730     qemu_vfree(s->header);
731     return ret;
732 
733 fail_options:
734     error_propagate(errp, local_err);
735     ret = -EINVAL;
736     goto fail;
737 }
738 
739 
740 static void parallels_close(BlockDriverState *bs)
741 {
742     BDRVParallelsState *s = bs->opaque;
743 
744     if ((bs->open_flags & BDRV_O_RDWR) && !(bs->open_flags & BDRV_O_INACTIVE)) {
745         s->header->inuse = 0;
746         parallels_update_header(bs);
747         bdrv_truncate(bs->file, s->data_end << BDRV_SECTOR_BITS,
748                       PREALLOC_MODE_OFF, NULL);
749     }
750 
751     g_free(s->bat_dirty_bmap);
752     qemu_vfree(s->header);
753 
754     migrate_del_blocker(s->migration_blocker);
755     error_free(s->migration_blocker);
756 }
757 
758 static QemuOptsList parallels_create_opts = {
759     .name = "parallels-create-opts",
760     .head = QTAILQ_HEAD_INITIALIZER(parallels_create_opts.head),
761     .desc = {
762         {
763             .name = BLOCK_OPT_SIZE,
764             .type = QEMU_OPT_SIZE,
765             .help = "Virtual disk size",
766         },
767         {
768             .name = BLOCK_OPT_CLUSTER_SIZE,
769             .type = QEMU_OPT_SIZE,
770             .help = "Parallels image cluster size",
771             .def_value_str = stringify(DEFAULT_CLUSTER_SIZE),
772         },
773         { /* end of list */ }
774     }
775 };
776 
777 static BlockDriver bdrv_parallels = {
778     .format_name	= "parallels",
779     .instance_size	= sizeof(BDRVParallelsState),
780     .bdrv_probe		= parallels_probe,
781     .bdrv_open		= parallels_open,
782     .bdrv_close		= parallels_close,
783     .bdrv_child_perm          = bdrv_format_default_perms,
784     .bdrv_co_get_block_status = parallels_co_get_block_status,
785     .bdrv_has_zero_init       = bdrv_has_zero_init_1,
786     .bdrv_co_flush_to_os      = parallels_co_flush_to_os,
787     .bdrv_co_readv  = parallels_co_readv,
788     .bdrv_co_writev = parallels_co_writev,
789     .supports_backing = true,
790     .bdrv_create    = parallels_create,
791     .bdrv_check     = parallels_check,
792     .create_opts    = &parallels_create_opts,
793 };
794 
795 static void bdrv_parallels_init(void)
796 {
797     bdrv_register(&bdrv_parallels);
798 }
799 
800 block_init(bdrv_parallels_init);
801