xref: /openbmc/qemu/block/dmg.c (revision f363d039)
1 /*
2  * QEMU Block driver for DMG images
3  *
4  * Copyright (c) 2004 Johannes E. Schindelin
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 #include "qemu/osdep.h"
25 #include "qapi/error.h"
26 #include "block/block_int.h"
27 #include "qemu/bswap.h"
28 #include "qemu/error-report.h"
29 #include "qemu/module.h"
30 #include "dmg.h"
31 
32 int (*dmg_uncompress_bz2)(char *next_in, unsigned int avail_in,
33                           char *next_out, unsigned int avail_out);
34 
35 int (*dmg_uncompress_lzfse)(char *next_in, unsigned int avail_in,
36                             char *next_out, unsigned int avail_out);
37 
38 enum {
39     /* Limit chunk sizes to prevent unreasonable amounts of memory being used
40      * or truncating when converting to 32-bit types
41      */
42     DMG_LENGTHS_MAX = 64 * 1024 * 1024, /* 64 MB */
43     DMG_SECTORCOUNTS_MAX = DMG_LENGTHS_MAX / 512,
44 };
45 
46 enum {
47     /* DMG Block Type */
48     UDZE = 0, /* Zeroes */
49     UDRW,     /* RAW type */
50     UDIG,     /* Ignore */
51     UDCO = 0x80000004,
52     UDZO,
53     UDBZ,
54     ULFO,
55     UDCM = 0x7ffffffe, /* Comments */
56     UDLE = 0xffffffff  /* Last Entry */
57 };
58 
59 static int dmg_probe(const uint8_t *buf, int buf_size, const char *filename)
60 {
61     int len;
62 
63     if (!filename) {
64         return 0;
65     }
66 
67     len = strlen(filename);
68     if (len > 4 && !strcmp(filename + len - 4, ".dmg")) {
69         return 2;
70     }
71     return 0;
72 }
73 
74 static int read_uint64(BlockDriverState *bs, int64_t offset, uint64_t *result)
75 {
76     uint64_t buffer;
77     int ret;
78 
79     ret = bdrv_pread(bs->file, offset, &buffer, 8);
80     if (ret < 0) {
81         return ret;
82     }
83 
84     *result = be64_to_cpu(buffer);
85     return 0;
86 }
87 
88 static int read_uint32(BlockDriverState *bs, int64_t offset, uint32_t *result)
89 {
90     uint32_t buffer;
91     int ret;
92 
93     ret = bdrv_pread(bs->file, offset, &buffer, 4);
94     if (ret < 0) {
95         return ret;
96     }
97 
98     *result = be32_to_cpu(buffer);
99     return 0;
100 }
101 
102 static inline uint64_t buff_read_uint64(const uint8_t *buffer, int64_t offset)
103 {
104     return be64_to_cpu(*(uint64_t *)&buffer[offset]);
105 }
106 
107 static inline uint32_t buff_read_uint32(const uint8_t *buffer, int64_t offset)
108 {
109     return be32_to_cpu(*(uint32_t *)&buffer[offset]);
110 }
111 
112 /* Increase max chunk sizes, if necessary.  This function is used to calculate
113  * the buffer sizes needed for compressed/uncompressed chunk I/O.
114  */
115 static void update_max_chunk_size(BDRVDMGState *s, uint32_t chunk,
116                                   uint32_t *max_compressed_size,
117                                   uint32_t *max_sectors_per_chunk)
118 {
119     uint32_t compressed_size = 0;
120     uint32_t uncompressed_sectors = 0;
121 
122     switch (s->types[chunk]) {
123     case UDZO: /* zlib compressed */
124     case UDBZ: /* bzip2 compressed */
125     case ULFO: /* lzfse compressed */
126         compressed_size = s->lengths[chunk];
127         uncompressed_sectors = s->sectorcounts[chunk];
128         break;
129     case UDRW: /* copy */
130         uncompressed_sectors = DIV_ROUND_UP(s->lengths[chunk], 512);
131         break;
132     case UDZE: /* zero */
133     case UDIG: /* ignore */
134         /* as the all-zeroes block may be large, it is treated specially: the
135          * sector is not copied from a large buffer, a simple memset is used
136          * instead. Therefore uncompressed_sectors does not need to be set. */
137         break;
138     }
139 
140     if (compressed_size > *max_compressed_size) {
141         *max_compressed_size = compressed_size;
142     }
143     if (uncompressed_sectors > *max_sectors_per_chunk) {
144         *max_sectors_per_chunk = uncompressed_sectors;
145     }
146 }
147 
148 static int64_t dmg_find_koly_offset(BdrvChild *file, Error **errp)
149 {
150     BlockDriverState *file_bs = file->bs;
151     int64_t length;
152     int64_t offset = 0;
153     uint8_t buffer[515];
154     int i, ret;
155 
156     /* bdrv_getlength returns a multiple of block size (512), rounded up. Since
157      * dmg images can have odd sizes, try to look for the "koly" magic which
158      * marks the begin of the UDIF trailer (512 bytes). This magic can be found
159      * in the last 511 bytes of the second-last sector or the first 4 bytes of
160      * the last sector (search space: 515 bytes) */
161     length = bdrv_getlength(file_bs);
162     if (length < 0) {
163         error_setg_errno(errp, -length,
164             "Failed to get file size while reading UDIF trailer");
165         return length;
166     } else if (length < 512) {
167         error_setg(errp, "dmg file must be at least 512 bytes long");
168         return -EINVAL;
169     }
170     if (length > 511 + 512) {
171         offset = length - 511 - 512;
172     }
173     length = length < 515 ? length : 515;
174     ret = bdrv_pread(file, offset, buffer, length);
175     if (ret < 0) {
176         error_setg_errno(errp, -ret, "Failed while reading UDIF trailer");
177         return ret;
178     }
179     for (i = 0; i < length - 3; i++) {
180         if (buffer[i] == 'k' && buffer[i+1] == 'o' &&
181             buffer[i+2] == 'l' && buffer[i+3] == 'y') {
182             return offset + i;
183         }
184     }
185     error_setg(errp, "Could not locate UDIF trailer in dmg file");
186     return -EINVAL;
187 }
188 
189 /* used when building the sector table */
190 typedef struct DmgHeaderState {
191     /* used internally by dmg_read_mish_block to remember offsets of blocks
192      * across calls */
193     uint64_t data_fork_offset;
194     /* exported for dmg_open */
195     uint32_t max_compressed_size;
196     uint32_t max_sectors_per_chunk;
197 } DmgHeaderState;
198 
199 static bool dmg_is_known_block_type(uint32_t entry_type)
200 {
201     switch (entry_type) {
202     case UDZE:    /* zeros */
203     case UDRW:    /* uncompressed */
204     case UDIG:    /* ignore */
205     case UDZO:    /* zlib */
206         return true;
207     case UDBZ:    /* bzip2 */
208         return !!dmg_uncompress_bz2;
209     case ULFO:    /* lzfse */
210         return !!dmg_uncompress_lzfse;
211     default:
212         return false;
213     }
214 }
215 
216 static int dmg_read_mish_block(BDRVDMGState *s, DmgHeaderState *ds,
217                                uint8_t *buffer, uint32_t count)
218 {
219     uint32_t type, i;
220     int ret;
221     size_t new_size;
222     uint32_t chunk_count;
223     int64_t offset = 0;
224     uint64_t data_offset;
225     uint64_t in_offset = ds->data_fork_offset;
226     uint64_t out_offset;
227 
228     type = buff_read_uint32(buffer, offset);
229     /* skip data that is not a valid MISH block (invalid magic or too small) */
230     if (type != 0x6d697368 || count < 244) {
231         /* assume success for now */
232         return 0;
233     }
234 
235     /* chunk offsets are relative to this sector number */
236     out_offset = buff_read_uint64(buffer, offset + 8);
237 
238     /* location in data fork for (compressed) blob (in bytes) */
239     data_offset = buff_read_uint64(buffer, offset + 0x18);
240     in_offset += data_offset;
241 
242     /* move to begin of chunk entries */
243     offset += 204;
244 
245     chunk_count = (count - 204) / 40;
246     new_size = sizeof(uint64_t) * (s->n_chunks + chunk_count);
247     s->types = g_realloc(s->types, new_size / 2);
248     s->offsets = g_realloc(s->offsets, new_size);
249     s->lengths = g_realloc(s->lengths, new_size);
250     s->sectors = g_realloc(s->sectors, new_size);
251     s->sectorcounts = g_realloc(s->sectorcounts, new_size);
252 
253     for (i = s->n_chunks; i < s->n_chunks + chunk_count; i++) {
254         s->types[i] = buff_read_uint32(buffer, offset);
255         if (!dmg_is_known_block_type(s->types[i])) {
256             chunk_count--;
257             i--;
258             offset += 40;
259             continue;
260         }
261 
262         /* sector number */
263         s->sectors[i] = buff_read_uint64(buffer, offset + 8);
264         s->sectors[i] += out_offset;
265 
266         /* sector count */
267         s->sectorcounts[i] = buff_read_uint64(buffer, offset + 0x10);
268 
269         /* all-zeroes sector (type UDZE and UDIG) does not need to be
270          * "uncompressed" and can therefore be unbounded. */
271         if (s->types[i] != UDZE && s->types[i] != UDIG
272             && s->sectorcounts[i] > DMG_SECTORCOUNTS_MAX) {
273             error_report("sector count %" PRIu64 " for chunk %" PRIu32
274                          " is larger than max (%u)",
275                          s->sectorcounts[i], i, DMG_SECTORCOUNTS_MAX);
276             ret = -EINVAL;
277             goto fail;
278         }
279 
280         /* offset in (compressed) data fork */
281         s->offsets[i] = buff_read_uint64(buffer, offset + 0x18);
282         s->offsets[i] += in_offset;
283 
284         /* length in (compressed) data fork */
285         s->lengths[i] = buff_read_uint64(buffer, offset + 0x20);
286 
287         if (s->lengths[i] > DMG_LENGTHS_MAX) {
288             error_report("length %" PRIu64 " for chunk %" PRIu32
289                          " is larger than max (%u)",
290                          s->lengths[i], i, DMG_LENGTHS_MAX);
291             ret = -EINVAL;
292             goto fail;
293         }
294 
295         update_max_chunk_size(s, i, &ds->max_compressed_size,
296                               &ds->max_sectors_per_chunk);
297         offset += 40;
298     }
299     s->n_chunks += chunk_count;
300     return 0;
301 
302 fail:
303     return ret;
304 }
305 
306 static int dmg_read_resource_fork(BlockDriverState *bs, DmgHeaderState *ds,
307                                   uint64_t info_begin, uint64_t info_length)
308 {
309     BDRVDMGState *s = bs->opaque;
310     int ret;
311     uint32_t count, rsrc_data_offset;
312     uint8_t *buffer = NULL;
313     uint64_t info_end;
314     uint64_t offset;
315 
316     /* read offset from begin of resource fork (info_begin) to resource data */
317     ret = read_uint32(bs, info_begin, &rsrc_data_offset);
318     if (ret < 0) {
319         goto fail;
320     } else if (rsrc_data_offset > info_length) {
321         ret = -EINVAL;
322         goto fail;
323     }
324 
325     /* read length of resource data */
326     ret = read_uint32(bs, info_begin + 8, &count);
327     if (ret < 0) {
328         goto fail;
329     } else if (count == 0 || rsrc_data_offset + count > info_length) {
330         ret = -EINVAL;
331         goto fail;
332     }
333 
334     /* begin of resource data (consisting of one or more resources) */
335     offset = info_begin + rsrc_data_offset;
336 
337     /* end of resource data (there is possibly a following resource map
338      * which will be ignored). */
339     info_end = offset + count;
340 
341     /* read offsets (mish blocks) from one or more resources in resource data */
342     while (offset < info_end) {
343         /* size of following resource */
344         ret = read_uint32(bs, offset, &count);
345         if (ret < 0) {
346             goto fail;
347         } else if (count == 0 || count > info_end - offset) {
348             ret = -EINVAL;
349             goto fail;
350         }
351         offset += 4;
352 
353         buffer = g_realloc(buffer, count);
354         ret = bdrv_pread(bs->file, offset, buffer, count);
355         if (ret < 0) {
356             goto fail;
357         }
358 
359         ret = dmg_read_mish_block(s, ds, buffer, count);
360         if (ret < 0) {
361             goto fail;
362         }
363         /* advance offset by size of resource */
364         offset += count;
365     }
366     ret = 0;
367 
368 fail:
369     g_free(buffer);
370     return ret;
371 }
372 
373 static int dmg_read_plist_xml(BlockDriverState *bs, DmgHeaderState *ds,
374                               uint64_t info_begin, uint64_t info_length)
375 {
376     BDRVDMGState *s = bs->opaque;
377     int ret;
378     uint8_t *buffer = NULL;
379     char *data_begin, *data_end;
380 
381     /* Have at least some length to avoid NULL for g_malloc. Attempt to set a
382      * safe upper cap on the data length. A test sample had a XML length of
383      * about 1 MiB. */
384     if (info_length == 0 || info_length > 16 * 1024 * 1024) {
385         ret = -EINVAL;
386         goto fail;
387     }
388 
389     buffer = g_malloc(info_length + 1);
390     buffer[info_length] = '\0';
391     ret = bdrv_pread(bs->file, info_begin, buffer, info_length);
392     if (ret != info_length) {
393         ret = -EINVAL;
394         goto fail;
395     }
396 
397     /* look for <data>...</data>. The data is 284 (0x11c) bytes after base64
398      * decode. The actual data element has 431 (0x1af) bytes which includes tabs
399      * and line feeds. */
400     data_end = (char *)buffer;
401     while ((data_begin = strstr(data_end, "<data>")) != NULL) {
402         guchar *mish;
403         gsize out_len = 0;
404 
405         data_begin += 6;
406         data_end = strstr(data_begin, "</data>");
407         /* malformed XML? */
408         if (data_end == NULL) {
409             ret = -EINVAL;
410             goto fail;
411         }
412         *data_end++ = '\0';
413         mish = g_base64_decode(data_begin, &out_len);
414         ret = dmg_read_mish_block(s, ds, mish, (uint32_t)out_len);
415         g_free(mish);
416         if (ret < 0) {
417             goto fail;
418         }
419     }
420     ret = 0;
421 
422 fail:
423     g_free(buffer);
424     return ret;
425 }
426 
427 static int dmg_open(BlockDriverState *bs, QDict *options, int flags,
428                     Error **errp)
429 {
430     BDRVDMGState *s = bs->opaque;
431     DmgHeaderState ds;
432     uint64_t rsrc_fork_offset, rsrc_fork_length;
433     uint64_t plist_xml_offset, plist_xml_length;
434     int64_t offset;
435     int ret;
436 
437     ret = bdrv_apply_auto_read_only(bs, NULL, errp);
438     if (ret < 0) {
439         return ret;
440     }
441 
442     bs->file = bdrv_open_child(NULL, options, "file", bs, &child_file,
443                                false, errp);
444     if (!bs->file) {
445         return -EINVAL;
446     }
447 
448     block_module_load_one("dmg-bz2");
449     block_module_load_one("dmg-lzfse");
450 
451     s->n_chunks = 0;
452     s->offsets = s->lengths = s->sectors = s->sectorcounts = NULL;
453     /* used by dmg_read_mish_block to keep track of the current I/O position */
454     ds.data_fork_offset = 0;
455     ds.max_compressed_size = 1;
456     ds.max_sectors_per_chunk = 1;
457 
458     /* locate the UDIF trailer */
459     offset = dmg_find_koly_offset(bs->file, errp);
460     if (offset < 0) {
461         ret = offset;
462         goto fail;
463     }
464 
465     /* offset of data fork (DataForkOffset) */
466     ret = read_uint64(bs, offset + 0x18, &ds.data_fork_offset);
467     if (ret < 0) {
468         goto fail;
469     } else if (ds.data_fork_offset > offset) {
470         ret = -EINVAL;
471         goto fail;
472     }
473 
474     /* offset of resource fork (RsrcForkOffset) */
475     ret = read_uint64(bs, offset + 0x28, &rsrc_fork_offset);
476     if (ret < 0) {
477         goto fail;
478     }
479     ret = read_uint64(bs, offset + 0x30, &rsrc_fork_length);
480     if (ret < 0) {
481         goto fail;
482     }
483     if (rsrc_fork_offset >= offset ||
484         rsrc_fork_length > offset - rsrc_fork_offset) {
485         ret = -EINVAL;
486         goto fail;
487     }
488     /* offset of property list (XMLOffset) */
489     ret = read_uint64(bs, offset + 0xd8, &plist_xml_offset);
490     if (ret < 0) {
491         goto fail;
492     }
493     ret = read_uint64(bs, offset + 0xe0, &plist_xml_length);
494     if (ret < 0) {
495         goto fail;
496     }
497     if (plist_xml_offset >= offset ||
498         plist_xml_length > offset - plist_xml_offset) {
499         ret = -EINVAL;
500         goto fail;
501     }
502     ret = read_uint64(bs, offset + 0x1ec, (uint64_t *)&bs->total_sectors);
503     if (ret < 0) {
504         goto fail;
505     }
506     if (bs->total_sectors < 0) {
507         ret = -EINVAL;
508         goto fail;
509     }
510     if (rsrc_fork_length != 0) {
511         ret = dmg_read_resource_fork(bs, &ds,
512                                      rsrc_fork_offset, rsrc_fork_length);
513         if (ret < 0) {
514             goto fail;
515         }
516     } else if (plist_xml_length != 0) {
517         ret = dmg_read_plist_xml(bs, &ds, plist_xml_offset, plist_xml_length);
518         if (ret < 0) {
519             goto fail;
520         }
521     } else {
522         ret = -EINVAL;
523         goto fail;
524     }
525 
526     /* initialize zlib engine */
527     s->compressed_chunk = qemu_try_blockalign(bs->file->bs,
528                                               ds.max_compressed_size + 1);
529     s->uncompressed_chunk = qemu_try_blockalign(bs->file->bs,
530                                                 512 * ds.max_sectors_per_chunk);
531     if (s->compressed_chunk == NULL || s->uncompressed_chunk == NULL) {
532         ret = -ENOMEM;
533         goto fail;
534     }
535 
536     if (inflateInit(&s->zstream) != Z_OK) {
537         ret = -EINVAL;
538         goto fail;
539     }
540 
541     s->current_chunk = s->n_chunks;
542 
543     qemu_co_mutex_init(&s->lock);
544     return 0;
545 
546 fail:
547     g_free(s->types);
548     g_free(s->offsets);
549     g_free(s->lengths);
550     g_free(s->sectors);
551     g_free(s->sectorcounts);
552     qemu_vfree(s->compressed_chunk);
553     qemu_vfree(s->uncompressed_chunk);
554     return ret;
555 }
556 
557 static void dmg_refresh_limits(BlockDriverState *bs, Error **errp)
558 {
559     bs->bl.request_alignment = BDRV_SECTOR_SIZE; /* No sub-sector I/O */
560 }
561 
562 static inline int is_sector_in_chunk(BDRVDMGState* s,
563                 uint32_t chunk_num, uint64_t sector_num)
564 {
565     if (chunk_num >= s->n_chunks || s->sectors[chunk_num] > sector_num ||
566             s->sectors[chunk_num] + s->sectorcounts[chunk_num] <= sector_num) {
567         return 0;
568     } else {
569         return -1;
570     }
571 }
572 
573 static inline uint32_t search_chunk(BDRVDMGState *s, uint64_t sector_num)
574 {
575     /* binary search */
576     uint32_t chunk1 = 0, chunk2 = s->n_chunks, chunk3;
577     while (chunk1 <= chunk2) {
578         chunk3 = (chunk1 + chunk2) / 2;
579         if (s->sectors[chunk3] > sector_num) {
580             if (chunk3 == 0) {
581                 goto err;
582             }
583             chunk2 = chunk3 - 1;
584         } else if (s->sectors[chunk3] + s->sectorcounts[chunk3] > sector_num) {
585             return chunk3;
586         } else {
587             chunk1 = chunk3 + 1;
588         }
589     }
590 err:
591     return s->n_chunks; /* error */
592 }
593 
594 static inline int dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num)
595 {
596     BDRVDMGState *s = bs->opaque;
597 
598     if (!is_sector_in_chunk(s, s->current_chunk, sector_num)) {
599         int ret;
600         uint32_t chunk = search_chunk(s, sector_num);
601 
602         if (chunk >= s->n_chunks) {
603             return -1;
604         }
605 
606         s->current_chunk = s->n_chunks;
607         switch (s->types[chunk]) { /* block entry type */
608         case UDZO: { /* zlib compressed */
609             /* we need to buffer, because only the chunk as whole can be
610              * inflated. */
611             ret = bdrv_pread(bs->file, s->offsets[chunk],
612                              s->compressed_chunk, s->lengths[chunk]);
613             if (ret != s->lengths[chunk]) {
614                 return -1;
615             }
616 
617             s->zstream.next_in = s->compressed_chunk;
618             s->zstream.avail_in = s->lengths[chunk];
619             s->zstream.next_out = s->uncompressed_chunk;
620             s->zstream.avail_out = 512 * s->sectorcounts[chunk];
621             ret = inflateReset(&s->zstream);
622             if (ret != Z_OK) {
623                 return -1;
624             }
625             ret = inflate(&s->zstream, Z_FINISH);
626             if (ret != Z_STREAM_END ||
627                 s->zstream.total_out != 512 * s->sectorcounts[chunk]) {
628                 return -1;
629             }
630             break; }
631         case UDBZ: /* bzip2 compressed */
632             if (!dmg_uncompress_bz2) {
633                 break;
634             }
635             /* we need to buffer, because only the chunk as whole can be
636              * inflated. */
637             ret = bdrv_pread(bs->file, s->offsets[chunk],
638                              s->compressed_chunk, s->lengths[chunk]);
639             if (ret != s->lengths[chunk]) {
640                 return -1;
641             }
642 
643             ret = dmg_uncompress_bz2((char *)s->compressed_chunk,
644                                      (unsigned int) s->lengths[chunk],
645                                      (char *)s->uncompressed_chunk,
646                                      (unsigned int)
647                                          (512 * s->sectorcounts[chunk]));
648             if (ret < 0) {
649                 return ret;
650             }
651             break;
652         case ULFO:
653             if (!dmg_uncompress_lzfse) {
654                 break;
655             }
656             /* we need to buffer, because only the chunk as whole can be
657              * inflated. */
658             ret = bdrv_pread(bs->file, s->offsets[chunk],
659                              s->compressed_chunk, s->lengths[chunk]);
660             if (ret != s->lengths[chunk]) {
661                 return -1;
662             }
663 
664             ret = dmg_uncompress_lzfse((char *)s->compressed_chunk,
665                                        (unsigned int) s->lengths[chunk],
666                                        (char *)s->uncompressed_chunk,
667                                        (unsigned int)
668                                            (512 * s->sectorcounts[chunk]));
669             if (ret < 0) {
670                 return ret;
671             }
672             break;
673         case UDRW: /* copy */
674             ret = bdrv_pread(bs->file, s->offsets[chunk],
675                              s->uncompressed_chunk, s->lengths[chunk]);
676             if (ret != s->lengths[chunk]) {
677                 return -1;
678             }
679             break;
680         case UDZE: /* zeros */
681         case UDIG: /* ignore */
682             /* see dmg_read, it is treated specially. No buffer needs to be
683              * pre-filled, the zeroes can be set directly. */
684             break;
685         }
686         s->current_chunk = chunk;
687     }
688     return 0;
689 }
690 
691 static int coroutine_fn
692 dmg_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
693               QEMUIOVector *qiov, int flags)
694 {
695     BDRVDMGState *s = bs->opaque;
696     uint64_t sector_num = offset >> BDRV_SECTOR_BITS;
697     int nb_sectors = bytes >> BDRV_SECTOR_BITS;
698     int ret, i;
699 
700     assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE));
701     assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE));
702 
703     qemu_co_mutex_lock(&s->lock);
704 
705     for (i = 0; i < nb_sectors; i++) {
706         uint32_t sector_offset_in_chunk;
707         void *data;
708 
709         if (dmg_read_chunk(bs, sector_num + i) != 0) {
710             ret = -EIO;
711             goto fail;
712         }
713         /* Special case: current chunk is all zeroes. Do not perform a memcpy as
714          * s->uncompressed_chunk may be too small to cover the large all-zeroes
715          * section. dmg_read_chunk is called to find s->current_chunk */
716         if (s->types[s->current_chunk] == UDZE
717             || s->types[s->current_chunk] == UDIG) { /* all zeroes block entry */
718             qemu_iovec_memset(qiov, i * 512, 0, 512);
719             continue;
720         }
721         sector_offset_in_chunk = sector_num + i - s->sectors[s->current_chunk];
722         data = s->uncompressed_chunk + sector_offset_in_chunk * 512;
723         qemu_iovec_from_buf(qiov, i * 512, data, 512);
724     }
725 
726     ret = 0;
727 fail:
728     qemu_co_mutex_unlock(&s->lock);
729     return ret;
730 }
731 
732 static void dmg_close(BlockDriverState *bs)
733 {
734     BDRVDMGState *s = bs->opaque;
735 
736     g_free(s->types);
737     g_free(s->offsets);
738     g_free(s->lengths);
739     g_free(s->sectors);
740     g_free(s->sectorcounts);
741     qemu_vfree(s->compressed_chunk);
742     qemu_vfree(s->uncompressed_chunk);
743 
744     inflateEnd(&s->zstream);
745 }
746 
747 static BlockDriver bdrv_dmg = {
748     .format_name    = "dmg",
749     .instance_size  = sizeof(BDRVDMGState),
750     .bdrv_probe     = dmg_probe,
751     .bdrv_open      = dmg_open,
752     .bdrv_refresh_limits = dmg_refresh_limits,
753     .bdrv_child_perm     = bdrv_format_default_perms,
754     .bdrv_co_preadv = dmg_co_preadv,
755     .bdrv_close     = dmg_close,
756 };
757 
758 static void bdrv_dmg_init(void)
759 {
760     bdrv_register(&bdrv_dmg);
761 }
762 
763 block_init(bdrv_dmg_init);
764