1 /* 2 * QEMU Block driver for DMG images 3 * 4 * Copyright (c) 2004 Johannes E. Schindelin 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 #include "qemu/osdep.h" 25 #include "qapi/error.h" 26 #include "block/block-io.h" 27 #include "block/block_int.h" 28 #include "qemu/bswap.h" 29 #include "qemu/error-report.h" 30 #include "qemu/module.h" 31 #include "qemu/memalign.h" 32 #include "dmg.h" 33 34 BdrvDmgUncompressFunc *dmg_uncompress_bz2; 35 BdrvDmgUncompressFunc *dmg_uncompress_lzfse; 36 37 enum { 38 /* Limit chunk sizes to prevent unreasonable amounts of memory being used 39 * or truncating when converting to 32-bit types 40 */ 41 DMG_LENGTHS_MAX = 64 * 1024 * 1024, /* 64 MB */ 42 DMG_SECTORCOUNTS_MAX = DMG_LENGTHS_MAX / 512, 43 }; 44 45 enum { 46 /* DMG Block Type */ 47 UDZE = 0, /* Zeroes */ 48 UDRW, /* RAW type */ 49 UDIG, /* Ignore */ 50 UDCO = 0x80000004, 51 UDZO, 52 UDBZ, 53 ULFO, 54 UDCM = 0x7ffffffe, /* Comments */ 55 UDLE = 0xffffffff /* Last Entry */ 56 }; 57 58 static int dmg_probe(const uint8_t *buf, int buf_size, const char *filename) 59 { 60 int len; 61 62 if (!filename) { 63 return 0; 64 } 65 66 len = strlen(filename); 67 if (len > 4 && !strcmp(filename + len - 4, ".dmg")) { 68 return 2; 69 } 70 return 0; 71 } 72 73 static int GRAPH_RDLOCK 74 read_uint64(BlockDriverState *bs, int64_t offset, uint64_t *result) 75 { 76 uint64_t buffer; 77 int ret; 78 79 ret = bdrv_pread(bs->file, offset, 8, &buffer, 0); 80 if (ret < 0) { 81 return ret; 82 } 83 84 *result = be64_to_cpu(buffer); 85 return 0; 86 } 87 88 static int GRAPH_RDLOCK 89 read_uint32(BlockDriverState *bs, int64_t offset, uint32_t *result) 90 { 91 uint32_t buffer; 92 int ret; 93 94 ret = bdrv_pread(bs->file, offset, 4, &buffer, 0); 95 if (ret < 0) { 96 return ret; 97 } 98 99 *result = be32_to_cpu(buffer); 100 return 0; 101 } 102 103 static inline uint64_t buff_read_uint64(const uint8_t *buffer, int64_t offset) 104 { 105 return be64_to_cpu(*(uint64_t *)&buffer[offset]); 106 } 107 108 static inline uint32_t buff_read_uint32(const uint8_t *buffer, int64_t offset) 109 { 110 return be32_to_cpu(*(uint32_t *)&buffer[offset]); 111 } 112 113 /* Increase max chunk sizes, if necessary. This function is used to calculate 114 * the buffer sizes needed for compressed/uncompressed chunk I/O. 115 */ 116 static void update_max_chunk_size(BDRVDMGState *s, uint32_t chunk, 117 uint32_t *max_compressed_size, 118 uint32_t *max_sectors_per_chunk) 119 { 120 uint32_t compressed_size = 0; 121 uint32_t uncompressed_sectors = 0; 122 123 switch (s->types[chunk]) { 124 case UDZO: /* zlib compressed */ 125 case UDBZ: /* bzip2 compressed */ 126 case ULFO: /* lzfse compressed */ 127 compressed_size = s->lengths[chunk]; 128 uncompressed_sectors = s->sectorcounts[chunk]; 129 break; 130 case UDRW: /* copy */ 131 uncompressed_sectors = DIV_ROUND_UP(s->lengths[chunk], 512); 132 break; 133 case UDZE: /* zero */ 134 case UDIG: /* ignore */ 135 /* as the all-zeroes block may be large, it is treated specially: the 136 * sector is not copied from a large buffer, a simple memset is used 137 * instead. Therefore uncompressed_sectors does not need to be set. */ 138 break; 139 } 140 141 if (compressed_size > *max_compressed_size) { 142 *max_compressed_size = compressed_size; 143 } 144 if (uncompressed_sectors > *max_sectors_per_chunk) { 145 *max_sectors_per_chunk = uncompressed_sectors; 146 } 147 } 148 149 static int64_t dmg_find_koly_offset(BdrvChild *file, Error **errp) 150 { 151 BlockDriverState *file_bs = file->bs; 152 int64_t length; 153 int64_t offset = 0; 154 uint8_t buffer[515]; 155 int i, ret; 156 157 /* bdrv_getlength returns a multiple of block size (512), rounded up. Since 158 * dmg images can have odd sizes, try to look for the "koly" magic which 159 * marks the begin of the UDIF trailer (512 bytes). This magic can be found 160 * in the last 511 bytes of the second-last sector or the first 4 bytes of 161 * the last sector (search space: 515 bytes) */ 162 length = bdrv_getlength(file_bs); 163 if (length < 0) { 164 error_setg_errno(errp, -length, 165 "Failed to get file size while reading UDIF trailer"); 166 return length; 167 } else if (length < 512) { 168 error_setg(errp, "dmg file must be at least 512 bytes long"); 169 return -EINVAL; 170 } 171 if (length > 511 + 512) { 172 offset = length - 511 - 512; 173 } 174 length = length < 515 ? length : 515; 175 ret = bdrv_pread(file, offset, length, buffer, 0); 176 if (ret < 0) { 177 error_setg_errno(errp, -ret, "Failed while reading UDIF trailer"); 178 return ret; 179 } 180 for (i = 0; i < length - 3; i++) { 181 if (buffer[i] == 'k' && buffer[i+1] == 'o' && 182 buffer[i+2] == 'l' && buffer[i+3] == 'y') { 183 return offset + i; 184 } 185 } 186 error_setg(errp, "Could not locate UDIF trailer in dmg file"); 187 return -EINVAL; 188 } 189 190 /* used when building the sector table */ 191 typedef struct DmgHeaderState { 192 /* used internally by dmg_read_mish_block to remember offsets of blocks 193 * across calls */ 194 uint64_t data_fork_offset; 195 /* exported for dmg_open */ 196 uint32_t max_compressed_size; 197 uint32_t max_sectors_per_chunk; 198 } DmgHeaderState; 199 200 static bool dmg_is_known_block_type(uint32_t entry_type) 201 { 202 switch (entry_type) { 203 case UDZE: /* zeros */ 204 case UDRW: /* uncompressed */ 205 case UDIG: /* ignore */ 206 case UDZO: /* zlib */ 207 return true; 208 case UDBZ: /* bzip2 */ 209 return !!dmg_uncompress_bz2; 210 case ULFO: /* lzfse */ 211 return !!dmg_uncompress_lzfse; 212 default: 213 return false; 214 } 215 } 216 217 static int dmg_read_mish_block(BDRVDMGState *s, DmgHeaderState *ds, 218 uint8_t *buffer, uint32_t count) 219 { 220 uint32_t type, i; 221 int ret; 222 size_t new_size; 223 uint32_t chunk_count; 224 int64_t offset = 0; 225 uint64_t data_offset; 226 uint64_t in_offset = ds->data_fork_offset; 227 uint64_t out_offset; 228 229 type = buff_read_uint32(buffer, offset); 230 /* skip data that is not a valid MISH block (invalid magic or too small) */ 231 if (type != 0x6d697368 || count < 244) { 232 /* assume success for now */ 233 return 0; 234 } 235 236 /* chunk offsets are relative to this sector number */ 237 out_offset = buff_read_uint64(buffer, offset + 8); 238 239 /* location in data fork for (compressed) blob (in bytes) */ 240 data_offset = buff_read_uint64(buffer, offset + 0x18); 241 in_offset += data_offset; 242 243 /* move to begin of chunk entries */ 244 offset += 204; 245 246 chunk_count = (count - 204) / 40; 247 new_size = sizeof(uint64_t) * (s->n_chunks + chunk_count); 248 s->types = g_realloc(s->types, new_size / 2); 249 s->offsets = g_realloc(s->offsets, new_size); 250 s->lengths = g_realloc(s->lengths, new_size); 251 s->sectors = g_realloc(s->sectors, new_size); 252 s->sectorcounts = g_realloc(s->sectorcounts, new_size); 253 254 for (i = s->n_chunks; i < s->n_chunks + chunk_count; i++) { 255 s->types[i] = buff_read_uint32(buffer, offset); 256 if (!dmg_is_known_block_type(s->types[i])) { 257 switch (s->types[i]) { 258 case UDBZ: 259 warn_report_once("dmg-bzip2 module is missing, accessing bzip2 " 260 "compressed blocks will result in I/O errors"); 261 break; 262 case ULFO: 263 warn_report_once("dmg-lzfse module is missing, accessing lzfse " 264 "compressed blocks will result in I/O errors"); 265 break; 266 case UDCM: 267 case UDLE: 268 /* Comments and last entry can be ignored without problems */ 269 break; 270 default: 271 warn_report_once("Image contains chunks of unknown type %x, " 272 "accessing them will result in I/O errors", 273 s->types[i]); 274 break; 275 } 276 chunk_count--; 277 i--; 278 offset += 40; 279 continue; 280 } 281 282 /* sector number */ 283 s->sectors[i] = buff_read_uint64(buffer, offset + 8); 284 s->sectors[i] += out_offset; 285 286 /* sector count */ 287 s->sectorcounts[i] = buff_read_uint64(buffer, offset + 0x10); 288 289 /* all-zeroes sector (type UDZE and UDIG) does not need to be 290 * "uncompressed" and can therefore be unbounded. */ 291 if (s->types[i] != UDZE && s->types[i] != UDIG 292 && s->sectorcounts[i] > DMG_SECTORCOUNTS_MAX) { 293 error_report("sector count %" PRIu64 " for chunk %" PRIu32 294 " is larger than max (%u)", 295 s->sectorcounts[i], i, DMG_SECTORCOUNTS_MAX); 296 ret = -EINVAL; 297 goto fail; 298 } 299 300 /* offset in (compressed) data fork */ 301 s->offsets[i] = buff_read_uint64(buffer, offset + 0x18); 302 s->offsets[i] += in_offset; 303 304 /* length in (compressed) data fork */ 305 s->lengths[i] = buff_read_uint64(buffer, offset + 0x20); 306 307 if (s->lengths[i] > DMG_LENGTHS_MAX) { 308 error_report("length %" PRIu64 " for chunk %" PRIu32 309 " is larger than max (%u)", 310 s->lengths[i], i, DMG_LENGTHS_MAX); 311 ret = -EINVAL; 312 goto fail; 313 } 314 315 update_max_chunk_size(s, i, &ds->max_compressed_size, 316 &ds->max_sectors_per_chunk); 317 offset += 40; 318 } 319 s->n_chunks += chunk_count; 320 return 0; 321 322 fail: 323 return ret; 324 } 325 326 static int GRAPH_RDLOCK 327 dmg_read_resource_fork(BlockDriverState *bs, DmgHeaderState *ds, 328 uint64_t info_begin, uint64_t info_length) 329 { 330 BDRVDMGState *s = bs->opaque; 331 int ret; 332 uint32_t count, rsrc_data_offset; 333 uint8_t *buffer = NULL; 334 uint64_t info_end; 335 uint64_t offset; 336 337 /* read offset from begin of resource fork (info_begin) to resource data */ 338 ret = read_uint32(bs, info_begin, &rsrc_data_offset); 339 if (ret < 0) { 340 goto fail; 341 } else if (rsrc_data_offset > info_length) { 342 ret = -EINVAL; 343 goto fail; 344 } 345 346 /* read length of resource data */ 347 ret = read_uint32(bs, info_begin + 8, &count); 348 if (ret < 0) { 349 goto fail; 350 } else if (count == 0 || rsrc_data_offset + count > info_length) { 351 ret = -EINVAL; 352 goto fail; 353 } 354 355 /* begin of resource data (consisting of one or more resources) */ 356 offset = info_begin + rsrc_data_offset; 357 358 /* end of resource data (there is possibly a following resource map 359 * which will be ignored). */ 360 info_end = offset + count; 361 362 /* read offsets (mish blocks) from one or more resources in resource data */ 363 while (offset < info_end) { 364 /* size of following resource */ 365 ret = read_uint32(bs, offset, &count); 366 if (ret < 0) { 367 goto fail; 368 } else if (count == 0 || count > info_end - offset) { 369 ret = -EINVAL; 370 goto fail; 371 } 372 offset += 4; 373 374 buffer = g_realloc(buffer, count); 375 ret = bdrv_pread(bs->file, offset, count, buffer, 0); 376 if (ret < 0) { 377 goto fail; 378 } 379 380 ret = dmg_read_mish_block(s, ds, buffer, count); 381 if (ret < 0) { 382 goto fail; 383 } 384 /* advance offset by size of resource */ 385 offset += count; 386 } 387 ret = 0; 388 389 fail: 390 g_free(buffer); 391 return ret; 392 } 393 394 static int GRAPH_RDLOCK 395 dmg_read_plist_xml(BlockDriverState *bs, DmgHeaderState *ds, 396 uint64_t info_begin, uint64_t info_length) 397 { 398 BDRVDMGState *s = bs->opaque; 399 int ret; 400 uint8_t *buffer = NULL; 401 char *data_begin, *data_end; 402 403 /* Have at least some length to avoid NULL for g_malloc. Attempt to set a 404 * safe upper cap on the data length. A test sample had a XML length of 405 * about 1 MiB. */ 406 if (info_length == 0 || info_length > 16 * 1024 * 1024) { 407 ret = -EINVAL; 408 goto fail; 409 } 410 411 buffer = g_malloc(info_length + 1); 412 buffer[info_length] = '\0'; 413 ret = bdrv_pread(bs->file, info_begin, info_length, buffer, 0); 414 if (ret < 0) { 415 ret = -EINVAL; 416 goto fail; 417 } 418 419 /* look for <data>...</data>. The data is 284 (0x11c) bytes after base64 420 * decode. The actual data element has 431 (0x1af) bytes which includes tabs 421 * and line feeds. */ 422 data_end = (char *)buffer; 423 while ((data_begin = strstr(data_end, "<data>")) != NULL) { 424 guchar *mish; 425 gsize out_len = 0; 426 427 data_begin += 6; 428 data_end = strstr(data_begin, "</data>"); 429 /* malformed XML? */ 430 if (data_end == NULL) { 431 ret = -EINVAL; 432 goto fail; 433 } 434 *data_end++ = '\0'; 435 mish = g_base64_decode(data_begin, &out_len); 436 ret = dmg_read_mish_block(s, ds, mish, (uint32_t)out_len); 437 g_free(mish); 438 if (ret < 0) { 439 goto fail; 440 } 441 } 442 ret = 0; 443 444 fail: 445 g_free(buffer); 446 return ret; 447 } 448 449 static int dmg_open(BlockDriverState *bs, QDict *options, int flags, 450 Error **errp) 451 { 452 BDRVDMGState *s = bs->opaque; 453 DmgHeaderState ds; 454 uint64_t rsrc_fork_offset, rsrc_fork_length; 455 uint64_t plist_xml_offset, plist_xml_length; 456 int64_t offset; 457 int ret; 458 459 GLOBAL_STATE_CODE(); 460 461 bdrv_graph_rdlock_main_loop(); 462 ret = bdrv_apply_auto_read_only(bs, NULL, errp); 463 bdrv_graph_rdunlock_main_loop(); 464 if (ret < 0) { 465 return ret; 466 } 467 468 ret = bdrv_open_file_child(NULL, options, "file", bs, errp); 469 if (ret < 0) { 470 return ret; 471 } 472 473 GRAPH_RDLOCK_GUARD_MAINLOOP(); 474 475 /* 476 * NB: if uncompress submodules are absent, 477 * ie block_module_load return value == 0, the function pointers 478 * dmg_uncompress_bz2 and dmg_uncompress_lzfse will be NULL. 479 */ 480 if (block_module_load("dmg-bz2", errp) < 0) { 481 return -EINVAL; 482 } 483 if (block_module_load("dmg-lzfse", errp) < 0) { 484 return -EINVAL; 485 } 486 487 s->n_chunks = 0; 488 s->offsets = s->lengths = s->sectors = s->sectorcounts = NULL; 489 /* used by dmg_read_mish_block to keep track of the current I/O position */ 490 ds.data_fork_offset = 0; 491 ds.max_compressed_size = 1; 492 ds.max_sectors_per_chunk = 1; 493 494 /* locate the UDIF trailer */ 495 offset = dmg_find_koly_offset(bs->file, errp); 496 if (offset < 0) { 497 ret = offset; 498 goto fail; 499 } 500 501 /* offset of data fork (DataForkOffset) */ 502 ret = read_uint64(bs, offset + 0x18, &ds.data_fork_offset); 503 if (ret < 0) { 504 goto fail; 505 } else if (ds.data_fork_offset > offset) { 506 ret = -EINVAL; 507 goto fail; 508 } 509 510 /* offset of resource fork (RsrcForkOffset) */ 511 ret = read_uint64(bs, offset + 0x28, &rsrc_fork_offset); 512 if (ret < 0) { 513 goto fail; 514 } 515 ret = read_uint64(bs, offset + 0x30, &rsrc_fork_length); 516 if (ret < 0) { 517 goto fail; 518 } 519 if (rsrc_fork_offset >= offset || 520 rsrc_fork_length > offset - rsrc_fork_offset) { 521 ret = -EINVAL; 522 goto fail; 523 } 524 /* offset of property list (XMLOffset) */ 525 ret = read_uint64(bs, offset + 0xd8, &plist_xml_offset); 526 if (ret < 0) { 527 goto fail; 528 } 529 ret = read_uint64(bs, offset + 0xe0, &plist_xml_length); 530 if (ret < 0) { 531 goto fail; 532 } 533 if (plist_xml_offset >= offset || 534 plist_xml_length > offset - plist_xml_offset) { 535 ret = -EINVAL; 536 goto fail; 537 } 538 ret = read_uint64(bs, offset + 0x1ec, (uint64_t *)&bs->total_sectors); 539 if (ret < 0) { 540 goto fail; 541 } 542 if (bs->total_sectors < 0) { 543 ret = -EINVAL; 544 goto fail; 545 } 546 if (rsrc_fork_length != 0) { 547 ret = dmg_read_resource_fork(bs, &ds, 548 rsrc_fork_offset, rsrc_fork_length); 549 if (ret < 0) { 550 goto fail; 551 } 552 } else if (plist_xml_length != 0) { 553 ret = dmg_read_plist_xml(bs, &ds, plist_xml_offset, plist_xml_length); 554 if (ret < 0) { 555 goto fail; 556 } 557 } else { 558 ret = -EINVAL; 559 goto fail; 560 } 561 562 /* initialize zlib engine */ 563 s->compressed_chunk = qemu_try_blockalign(bs->file->bs, 564 ds.max_compressed_size + 1); 565 s->uncompressed_chunk = qemu_try_blockalign(bs->file->bs, 566 512 * ds.max_sectors_per_chunk); 567 if (s->compressed_chunk == NULL || s->uncompressed_chunk == NULL) { 568 ret = -ENOMEM; 569 goto fail; 570 } 571 572 if (inflateInit(&s->zstream) != Z_OK) { 573 ret = -EINVAL; 574 goto fail; 575 } 576 577 s->current_chunk = s->n_chunks; 578 579 qemu_co_mutex_init(&s->lock); 580 return 0; 581 582 fail: 583 g_free(s->types); 584 g_free(s->offsets); 585 g_free(s->lengths); 586 g_free(s->sectors); 587 g_free(s->sectorcounts); 588 qemu_vfree(s->compressed_chunk); 589 qemu_vfree(s->uncompressed_chunk); 590 return ret; 591 } 592 593 static void dmg_refresh_limits(BlockDriverState *bs, Error **errp) 594 { 595 bs->bl.request_alignment = BDRV_SECTOR_SIZE; /* No sub-sector I/O */ 596 } 597 598 static inline int is_sector_in_chunk(BDRVDMGState *s, 599 uint32_t chunk_num, uint64_t sector_num) 600 { 601 if (chunk_num >= s->n_chunks || s->sectors[chunk_num] > sector_num || 602 s->sectors[chunk_num] + s->sectorcounts[chunk_num] <= sector_num) { 603 return 0; 604 } else { 605 return -1; 606 } 607 } 608 609 static inline uint32_t search_chunk(BDRVDMGState *s, uint64_t sector_num) 610 { 611 /* binary search */ 612 uint32_t chunk1 = 0, chunk2 = s->n_chunks, chunk3; 613 while (chunk1 <= chunk2) { 614 chunk3 = (chunk1 + chunk2) / 2; 615 if (s->sectors[chunk3] > sector_num) { 616 if (chunk3 == 0) { 617 goto err; 618 } 619 chunk2 = chunk3 - 1; 620 } else if (s->sectors[chunk3] + s->sectorcounts[chunk3] > sector_num) { 621 return chunk3; 622 } else { 623 chunk1 = chunk3 + 1; 624 } 625 } 626 err: 627 return s->n_chunks; /* error */ 628 } 629 630 static int coroutine_fn GRAPH_RDLOCK 631 dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num) 632 { 633 BDRVDMGState *s = bs->opaque; 634 635 if (!is_sector_in_chunk(s, s->current_chunk, sector_num)) { 636 int ret; 637 uint32_t chunk = search_chunk(s, sector_num); 638 639 if (chunk >= s->n_chunks) { 640 return -1; 641 } 642 643 s->current_chunk = s->n_chunks; 644 switch (s->types[chunk]) { /* block entry type */ 645 case UDZO: { /* zlib compressed */ 646 /* we need to buffer, because only the chunk as whole can be 647 * inflated. */ 648 ret = bdrv_co_pread(bs->file, s->offsets[chunk], s->lengths[chunk], 649 s->compressed_chunk, 0); 650 if (ret < 0) { 651 return -1; 652 } 653 654 s->zstream.next_in = s->compressed_chunk; 655 s->zstream.avail_in = s->lengths[chunk]; 656 s->zstream.next_out = s->uncompressed_chunk; 657 s->zstream.avail_out = 512 * s->sectorcounts[chunk]; 658 ret = inflateReset(&s->zstream); 659 if (ret != Z_OK) { 660 return -1; 661 } 662 ret = inflate(&s->zstream, Z_FINISH); 663 if (ret != Z_STREAM_END || 664 s->zstream.total_out != 512 * s->sectorcounts[chunk]) { 665 return -1; 666 } 667 break; } 668 case UDBZ: /* bzip2 compressed */ 669 if (!dmg_uncompress_bz2) { 670 break; 671 } 672 /* we need to buffer, because only the chunk as whole can be 673 * inflated. */ 674 ret = bdrv_co_pread(bs->file, s->offsets[chunk], s->lengths[chunk], 675 s->compressed_chunk, 0); 676 if (ret < 0) { 677 return -1; 678 } 679 680 ret = dmg_uncompress_bz2((char *)s->compressed_chunk, 681 (unsigned int) s->lengths[chunk], 682 (char *)s->uncompressed_chunk, 683 (unsigned int) 684 (512 * s->sectorcounts[chunk])); 685 if (ret < 0) { 686 return ret; 687 } 688 break; 689 case ULFO: 690 if (!dmg_uncompress_lzfse) { 691 break; 692 } 693 /* we need to buffer, because only the chunk as whole can be 694 * inflated. */ 695 ret = bdrv_co_pread(bs->file, s->offsets[chunk], s->lengths[chunk], 696 s->compressed_chunk, 0); 697 if (ret < 0) { 698 return -1; 699 } 700 701 ret = dmg_uncompress_lzfse((char *)s->compressed_chunk, 702 (unsigned int) s->lengths[chunk], 703 (char *)s->uncompressed_chunk, 704 (unsigned int) 705 (512 * s->sectorcounts[chunk])); 706 if (ret < 0) { 707 return ret; 708 } 709 break; 710 case UDRW: /* copy */ 711 ret = bdrv_co_pread(bs->file, s->offsets[chunk], s->lengths[chunk], 712 s->uncompressed_chunk, 0); 713 if (ret < 0) { 714 return -1; 715 } 716 break; 717 case UDZE: /* zeros */ 718 case UDIG: /* ignore */ 719 /* see dmg_read, it is treated specially. No buffer needs to be 720 * pre-filled, the zeroes can be set directly. */ 721 break; 722 } 723 s->current_chunk = chunk; 724 } 725 return 0; 726 } 727 728 static int coroutine_fn GRAPH_RDLOCK 729 dmg_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes, 730 QEMUIOVector *qiov, BdrvRequestFlags flags) 731 { 732 BDRVDMGState *s = bs->opaque; 733 uint64_t sector_num = offset >> BDRV_SECTOR_BITS; 734 int nb_sectors = bytes >> BDRV_SECTOR_BITS; 735 int ret, i; 736 737 assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE)); 738 assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE)); 739 740 qemu_co_mutex_lock(&s->lock); 741 742 for (i = 0; i < nb_sectors; i++) { 743 uint32_t sector_offset_in_chunk; 744 void *data; 745 746 if (dmg_read_chunk(bs, sector_num + i) != 0) { 747 ret = -EIO; 748 goto fail; 749 } 750 /* Special case: current chunk is all zeroes. Do not perform a memcpy as 751 * s->uncompressed_chunk may be too small to cover the large all-zeroes 752 * section. dmg_read_chunk is called to find s->current_chunk */ 753 if (s->types[s->current_chunk] == UDZE 754 || s->types[s->current_chunk] == UDIG) { /* all zeroes block entry */ 755 qemu_iovec_memset(qiov, i * 512, 0, 512); 756 continue; 757 } 758 sector_offset_in_chunk = sector_num + i - s->sectors[s->current_chunk]; 759 data = s->uncompressed_chunk + sector_offset_in_chunk * 512; 760 qemu_iovec_from_buf(qiov, i * 512, data, 512); 761 } 762 763 ret = 0; 764 fail: 765 qemu_co_mutex_unlock(&s->lock); 766 return ret; 767 } 768 769 static void dmg_close(BlockDriverState *bs) 770 { 771 BDRVDMGState *s = bs->opaque; 772 773 g_free(s->types); 774 g_free(s->offsets); 775 g_free(s->lengths); 776 g_free(s->sectors); 777 g_free(s->sectorcounts); 778 qemu_vfree(s->compressed_chunk); 779 qemu_vfree(s->uncompressed_chunk); 780 781 inflateEnd(&s->zstream); 782 } 783 784 static BlockDriver bdrv_dmg = { 785 .format_name = "dmg", 786 .instance_size = sizeof(BDRVDMGState), 787 .bdrv_probe = dmg_probe, 788 .bdrv_open = dmg_open, 789 .bdrv_refresh_limits = dmg_refresh_limits, 790 .bdrv_child_perm = bdrv_default_perms, 791 .bdrv_co_preadv = dmg_co_preadv, 792 .bdrv_close = dmg_close, 793 .is_format = true, 794 }; 795 796 static void bdrv_dmg_init(void) 797 { 798 bdrv_register(&bdrv_dmg); 799 } 800 801 block_init(bdrv_dmg_init); 802