1 /* 2 * QEMU Block driver for DMG images 3 * 4 * Copyright (c) 2004 Johannes E. Schindelin 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 #include "qemu/osdep.h" 25 #include "qapi/error.h" 26 #include "qemu-common.h" 27 #include "block/block_int.h" 28 #include "qemu/bswap.h" 29 #include "qemu/error-report.h" 30 #include "qemu/module.h" 31 #include "dmg.h" 32 33 int (*dmg_uncompress_bz2)(char *next_in, unsigned int avail_in, 34 char *next_out, unsigned int avail_out); 35 36 enum { 37 /* Limit chunk sizes to prevent unreasonable amounts of memory being used 38 * or truncating when converting to 32-bit types 39 */ 40 DMG_LENGTHS_MAX = 64 * 1024 * 1024, /* 64 MB */ 41 DMG_SECTORCOUNTS_MAX = DMG_LENGTHS_MAX / 512, 42 }; 43 44 static int dmg_probe(const uint8_t *buf, int buf_size, const char *filename) 45 { 46 int len; 47 48 if (!filename) { 49 return 0; 50 } 51 52 len = strlen(filename); 53 if (len > 4 && !strcmp(filename + len - 4, ".dmg")) { 54 return 2; 55 } 56 return 0; 57 } 58 59 static int read_uint64(BlockDriverState *bs, int64_t offset, uint64_t *result) 60 { 61 uint64_t buffer; 62 int ret; 63 64 ret = bdrv_pread(bs->file, offset, &buffer, 8); 65 if (ret < 0) { 66 return ret; 67 } 68 69 *result = be64_to_cpu(buffer); 70 return 0; 71 } 72 73 static int read_uint32(BlockDriverState *bs, int64_t offset, uint32_t *result) 74 { 75 uint32_t buffer; 76 int ret; 77 78 ret = bdrv_pread(bs->file, offset, &buffer, 4); 79 if (ret < 0) { 80 return ret; 81 } 82 83 *result = be32_to_cpu(buffer); 84 return 0; 85 } 86 87 static inline uint64_t buff_read_uint64(const uint8_t *buffer, int64_t offset) 88 { 89 return be64_to_cpu(*(uint64_t *)&buffer[offset]); 90 } 91 92 static inline uint32_t buff_read_uint32(const uint8_t *buffer, int64_t offset) 93 { 94 return be32_to_cpu(*(uint32_t *)&buffer[offset]); 95 } 96 97 /* Increase max chunk sizes, if necessary. This function is used to calculate 98 * the buffer sizes needed for compressed/uncompressed chunk I/O. 99 */ 100 static void update_max_chunk_size(BDRVDMGState *s, uint32_t chunk, 101 uint32_t *max_compressed_size, 102 uint32_t *max_sectors_per_chunk) 103 { 104 uint32_t compressed_size = 0; 105 uint32_t uncompressed_sectors = 0; 106 107 switch (s->types[chunk]) { 108 case 0x80000005: /* zlib compressed */ 109 case 0x80000006: /* bzip2 compressed */ 110 compressed_size = s->lengths[chunk]; 111 uncompressed_sectors = s->sectorcounts[chunk]; 112 break; 113 case 1: /* copy */ 114 uncompressed_sectors = (s->lengths[chunk] + 511) / 512; 115 break; 116 case 2: /* zero */ 117 /* as the all-zeroes block may be large, it is treated specially: the 118 * sector is not copied from a large buffer, a simple memset is used 119 * instead. Therefore uncompressed_sectors does not need to be set. */ 120 break; 121 } 122 123 if (compressed_size > *max_compressed_size) { 124 *max_compressed_size = compressed_size; 125 } 126 if (uncompressed_sectors > *max_sectors_per_chunk) { 127 *max_sectors_per_chunk = uncompressed_sectors; 128 } 129 } 130 131 static int64_t dmg_find_koly_offset(BdrvChild *file, Error **errp) 132 { 133 BlockDriverState *file_bs = file->bs; 134 int64_t length; 135 int64_t offset = 0; 136 uint8_t buffer[515]; 137 int i, ret; 138 139 /* bdrv_getlength returns a multiple of block size (512), rounded up. Since 140 * dmg images can have odd sizes, try to look for the "koly" magic which 141 * marks the begin of the UDIF trailer (512 bytes). This magic can be found 142 * in the last 511 bytes of the second-last sector or the first 4 bytes of 143 * the last sector (search space: 515 bytes) */ 144 length = bdrv_getlength(file_bs); 145 if (length < 0) { 146 error_setg_errno(errp, -length, 147 "Failed to get file size while reading UDIF trailer"); 148 return length; 149 } else if (length < 512) { 150 error_setg(errp, "dmg file must be at least 512 bytes long"); 151 return -EINVAL; 152 } 153 if (length > 511 + 512) { 154 offset = length - 511 - 512; 155 } 156 length = length < 515 ? length : 515; 157 ret = bdrv_pread(file, offset, buffer, length); 158 if (ret < 0) { 159 error_setg_errno(errp, -ret, "Failed while reading UDIF trailer"); 160 return ret; 161 } 162 for (i = 0; i < length - 3; i++) { 163 if (buffer[i] == 'k' && buffer[i+1] == 'o' && 164 buffer[i+2] == 'l' && buffer[i+3] == 'y') { 165 return offset + i; 166 } 167 } 168 error_setg(errp, "Could not locate UDIF trailer in dmg file"); 169 return -EINVAL; 170 } 171 172 /* used when building the sector table */ 173 typedef struct DmgHeaderState { 174 /* used internally by dmg_read_mish_block to remember offsets of blocks 175 * across calls */ 176 uint64_t data_fork_offset; 177 /* exported for dmg_open */ 178 uint32_t max_compressed_size; 179 uint32_t max_sectors_per_chunk; 180 } DmgHeaderState; 181 182 static bool dmg_is_known_block_type(uint32_t entry_type) 183 { 184 switch (entry_type) { 185 case 0x00000001: /* uncompressed */ 186 case 0x00000002: /* zeroes */ 187 case 0x80000005: /* zlib */ 188 return true; 189 case 0x80000006: /* bzip2 */ 190 return !!dmg_uncompress_bz2; 191 default: 192 return false; 193 } 194 } 195 196 static int dmg_read_mish_block(BDRVDMGState *s, DmgHeaderState *ds, 197 uint8_t *buffer, uint32_t count) 198 { 199 uint32_t type, i; 200 int ret; 201 size_t new_size; 202 uint32_t chunk_count; 203 int64_t offset = 0; 204 uint64_t data_offset; 205 uint64_t in_offset = ds->data_fork_offset; 206 uint64_t out_offset; 207 208 type = buff_read_uint32(buffer, offset); 209 /* skip data that is not a valid MISH block (invalid magic or too small) */ 210 if (type != 0x6d697368 || count < 244) { 211 /* assume success for now */ 212 return 0; 213 } 214 215 /* chunk offsets are relative to this sector number */ 216 out_offset = buff_read_uint64(buffer, offset + 8); 217 218 /* location in data fork for (compressed) blob (in bytes) */ 219 data_offset = buff_read_uint64(buffer, offset + 0x18); 220 in_offset += data_offset; 221 222 /* move to begin of chunk entries */ 223 offset += 204; 224 225 chunk_count = (count - 204) / 40; 226 new_size = sizeof(uint64_t) * (s->n_chunks + chunk_count); 227 s->types = g_realloc(s->types, new_size / 2); 228 s->offsets = g_realloc(s->offsets, new_size); 229 s->lengths = g_realloc(s->lengths, new_size); 230 s->sectors = g_realloc(s->sectors, new_size); 231 s->sectorcounts = g_realloc(s->sectorcounts, new_size); 232 233 for (i = s->n_chunks; i < s->n_chunks + chunk_count; i++) { 234 s->types[i] = buff_read_uint32(buffer, offset); 235 if (!dmg_is_known_block_type(s->types[i])) { 236 chunk_count--; 237 i--; 238 offset += 40; 239 continue; 240 } 241 242 /* sector number */ 243 s->sectors[i] = buff_read_uint64(buffer, offset + 8); 244 s->sectors[i] += out_offset; 245 246 /* sector count */ 247 s->sectorcounts[i] = buff_read_uint64(buffer, offset + 0x10); 248 249 /* all-zeroes sector (type 2) does not need to be "uncompressed" and can 250 * therefore be unbounded. */ 251 if (s->types[i] != 2 && s->sectorcounts[i] > DMG_SECTORCOUNTS_MAX) { 252 error_report("sector count %" PRIu64 " for chunk %" PRIu32 253 " is larger than max (%u)", 254 s->sectorcounts[i], i, DMG_SECTORCOUNTS_MAX); 255 ret = -EINVAL; 256 goto fail; 257 } 258 259 /* offset in (compressed) data fork */ 260 s->offsets[i] = buff_read_uint64(buffer, offset + 0x18); 261 s->offsets[i] += in_offset; 262 263 /* length in (compressed) data fork */ 264 s->lengths[i] = buff_read_uint64(buffer, offset + 0x20); 265 266 if (s->lengths[i] > DMG_LENGTHS_MAX) { 267 error_report("length %" PRIu64 " for chunk %" PRIu32 268 " is larger than max (%u)", 269 s->lengths[i], i, DMG_LENGTHS_MAX); 270 ret = -EINVAL; 271 goto fail; 272 } 273 274 update_max_chunk_size(s, i, &ds->max_compressed_size, 275 &ds->max_sectors_per_chunk); 276 offset += 40; 277 } 278 s->n_chunks += chunk_count; 279 return 0; 280 281 fail: 282 return ret; 283 } 284 285 static int dmg_read_resource_fork(BlockDriverState *bs, DmgHeaderState *ds, 286 uint64_t info_begin, uint64_t info_length) 287 { 288 BDRVDMGState *s = bs->opaque; 289 int ret; 290 uint32_t count, rsrc_data_offset; 291 uint8_t *buffer = NULL; 292 uint64_t info_end; 293 uint64_t offset; 294 295 /* read offset from begin of resource fork (info_begin) to resource data */ 296 ret = read_uint32(bs, info_begin, &rsrc_data_offset); 297 if (ret < 0) { 298 goto fail; 299 } else if (rsrc_data_offset > info_length) { 300 ret = -EINVAL; 301 goto fail; 302 } 303 304 /* read length of resource data */ 305 ret = read_uint32(bs, info_begin + 8, &count); 306 if (ret < 0) { 307 goto fail; 308 } else if (count == 0 || rsrc_data_offset + count > info_length) { 309 ret = -EINVAL; 310 goto fail; 311 } 312 313 /* begin of resource data (consisting of one or more resources) */ 314 offset = info_begin + rsrc_data_offset; 315 316 /* end of resource data (there is possibly a following resource map 317 * which will be ignored). */ 318 info_end = offset + count; 319 320 /* read offsets (mish blocks) from one or more resources in resource data */ 321 while (offset < info_end) { 322 /* size of following resource */ 323 ret = read_uint32(bs, offset, &count); 324 if (ret < 0) { 325 goto fail; 326 } else if (count == 0 || count > info_end - offset) { 327 ret = -EINVAL; 328 goto fail; 329 } 330 offset += 4; 331 332 buffer = g_realloc(buffer, count); 333 ret = bdrv_pread(bs->file, offset, buffer, count); 334 if (ret < 0) { 335 goto fail; 336 } 337 338 ret = dmg_read_mish_block(s, ds, buffer, count); 339 if (ret < 0) { 340 goto fail; 341 } 342 /* advance offset by size of resource */ 343 offset += count; 344 } 345 ret = 0; 346 347 fail: 348 g_free(buffer); 349 return ret; 350 } 351 352 static int dmg_read_plist_xml(BlockDriverState *bs, DmgHeaderState *ds, 353 uint64_t info_begin, uint64_t info_length) 354 { 355 BDRVDMGState *s = bs->opaque; 356 int ret; 357 uint8_t *buffer = NULL; 358 char *data_begin, *data_end; 359 360 /* Have at least some length to avoid NULL for g_malloc. Attempt to set a 361 * safe upper cap on the data length. A test sample had a XML length of 362 * about 1 MiB. */ 363 if (info_length == 0 || info_length > 16 * 1024 * 1024) { 364 ret = -EINVAL; 365 goto fail; 366 } 367 368 buffer = g_malloc(info_length + 1); 369 buffer[info_length] = '\0'; 370 ret = bdrv_pread(bs->file, info_begin, buffer, info_length); 371 if (ret != info_length) { 372 ret = -EINVAL; 373 goto fail; 374 } 375 376 /* look for <data>...</data>. The data is 284 (0x11c) bytes after base64 377 * decode. The actual data element has 431 (0x1af) bytes which includes tabs 378 * and line feeds. */ 379 data_end = (char *)buffer; 380 while ((data_begin = strstr(data_end, "<data>")) != NULL) { 381 guchar *mish; 382 gsize out_len = 0; 383 384 data_begin += 6; 385 data_end = strstr(data_begin, "</data>"); 386 /* malformed XML? */ 387 if (data_end == NULL) { 388 ret = -EINVAL; 389 goto fail; 390 } 391 *data_end++ = '\0'; 392 mish = g_base64_decode(data_begin, &out_len); 393 ret = dmg_read_mish_block(s, ds, mish, (uint32_t)out_len); 394 g_free(mish); 395 if (ret < 0) { 396 goto fail; 397 } 398 } 399 ret = 0; 400 401 fail: 402 g_free(buffer); 403 return ret; 404 } 405 406 static int dmg_open(BlockDriverState *bs, QDict *options, int flags, 407 Error **errp) 408 { 409 BDRVDMGState *s = bs->opaque; 410 DmgHeaderState ds; 411 uint64_t rsrc_fork_offset, rsrc_fork_length; 412 uint64_t plist_xml_offset, plist_xml_length; 413 int64_t offset; 414 int ret; 415 416 block_module_load_one("dmg-bz2"); 417 bs->read_only = true; 418 419 s->n_chunks = 0; 420 s->offsets = s->lengths = s->sectors = s->sectorcounts = NULL; 421 /* used by dmg_read_mish_block to keep track of the current I/O position */ 422 ds.data_fork_offset = 0; 423 ds.max_compressed_size = 1; 424 ds.max_sectors_per_chunk = 1; 425 426 /* locate the UDIF trailer */ 427 offset = dmg_find_koly_offset(bs->file, errp); 428 if (offset < 0) { 429 ret = offset; 430 goto fail; 431 } 432 433 /* offset of data fork (DataForkOffset) */ 434 ret = read_uint64(bs, offset + 0x18, &ds.data_fork_offset); 435 if (ret < 0) { 436 goto fail; 437 } else if (ds.data_fork_offset > offset) { 438 ret = -EINVAL; 439 goto fail; 440 } 441 442 /* offset of resource fork (RsrcForkOffset) */ 443 ret = read_uint64(bs, offset + 0x28, &rsrc_fork_offset); 444 if (ret < 0) { 445 goto fail; 446 } 447 ret = read_uint64(bs, offset + 0x30, &rsrc_fork_length); 448 if (ret < 0) { 449 goto fail; 450 } 451 if (rsrc_fork_offset >= offset || 452 rsrc_fork_length > offset - rsrc_fork_offset) { 453 ret = -EINVAL; 454 goto fail; 455 } 456 /* offset of property list (XMLOffset) */ 457 ret = read_uint64(bs, offset + 0xd8, &plist_xml_offset); 458 if (ret < 0) { 459 goto fail; 460 } 461 ret = read_uint64(bs, offset + 0xe0, &plist_xml_length); 462 if (ret < 0) { 463 goto fail; 464 } 465 if (plist_xml_offset >= offset || 466 plist_xml_length > offset - plist_xml_offset) { 467 ret = -EINVAL; 468 goto fail; 469 } 470 ret = read_uint64(bs, offset + 0x1ec, (uint64_t *)&bs->total_sectors); 471 if (ret < 0) { 472 goto fail; 473 } 474 if (bs->total_sectors < 0) { 475 ret = -EINVAL; 476 goto fail; 477 } 478 if (rsrc_fork_length != 0) { 479 ret = dmg_read_resource_fork(bs, &ds, 480 rsrc_fork_offset, rsrc_fork_length); 481 if (ret < 0) { 482 goto fail; 483 } 484 } else if (plist_xml_length != 0) { 485 ret = dmg_read_plist_xml(bs, &ds, plist_xml_offset, plist_xml_length); 486 if (ret < 0) { 487 goto fail; 488 } 489 } else { 490 ret = -EINVAL; 491 goto fail; 492 } 493 494 /* initialize zlib engine */ 495 s->compressed_chunk = qemu_try_blockalign(bs->file->bs, 496 ds.max_compressed_size + 1); 497 s->uncompressed_chunk = qemu_try_blockalign(bs->file->bs, 498 512 * ds.max_sectors_per_chunk); 499 if (s->compressed_chunk == NULL || s->uncompressed_chunk == NULL) { 500 ret = -ENOMEM; 501 goto fail; 502 } 503 504 if (inflateInit(&s->zstream) != Z_OK) { 505 ret = -EINVAL; 506 goto fail; 507 } 508 509 s->current_chunk = s->n_chunks; 510 511 qemu_co_mutex_init(&s->lock); 512 return 0; 513 514 fail: 515 g_free(s->types); 516 g_free(s->offsets); 517 g_free(s->lengths); 518 g_free(s->sectors); 519 g_free(s->sectorcounts); 520 qemu_vfree(s->compressed_chunk); 521 qemu_vfree(s->uncompressed_chunk); 522 return ret; 523 } 524 525 static void dmg_refresh_limits(BlockDriverState *bs, Error **errp) 526 { 527 bs->bl.request_alignment = BDRV_SECTOR_SIZE; /* No sub-sector I/O */ 528 } 529 530 static inline int is_sector_in_chunk(BDRVDMGState* s, 531 uint32_t chunk_num, uint64_t sector_num) 532 { 533 if (chunk_num >= s->n_chunks || s->sectors[chunk_num] > sector_num || 534 s->sectors[chunk_num] + s->sectorcounts[chunk_num] <= sector_num) { 535 return 0; 536 } else { 537 return -1; 538 } 539 } 540 541 static inline uint32_t search_chunk(BDRVDMGState *s, uint64_t sector_num) 542 { 543 /* binary search */ 544 uint32_t chunk1 = 0, chunk2 = s->n_chunks, chunk3; 545 while (chunk1 != chunk2) { 546 chunk3 = (chunk1 + chunk2) / 2; 547 if (s->sectors[chunk3] > sector_num) { 548 chunk2 = chunk3; 549 } else if (s->sectors[chunk3] + s->sectorcounts[chunk3] > sector_num) { 550 return chunk3; 551 } else { 552 chunk1 = chunk3; 553 } 554 } 555 return s->n_chunks; /* error */ 556 } 557 558 static inline int dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num) 559 { 560 BDRVDMGState *s = bs->opaque; 561 562 if (!is_sector_in_chunk(s, s->current_chunk, sector_num)) { 563 int ret; 564 uint32_t chunk = search_chunk(s, sector_num); 565 566 if (chunk >= s->n_chunks) { 567 return -1; 568 } 569 570 s->current_chunk = s->n_chunks; 571 switch (s->types[chunk]) { /* block entry type */ 572 case 0x80000005: { /* zlib compressed */ 573 /* we need to buffer, because only the chunk as whole can be 574 * inflated. */ 575 ret = bdrv_pread(bs->file, s->offsets[chunk], 576 s->compressed_chunk, s->lengths[chunk]); 577 if (ret != s->lengths[chunk]) { 578 return -1; 579 } 580 581 s->zstream.next_in = s->compressed_chunk; 582 s->zstream.avail_in = s->lengths[chunk]; 583 s->zstream.next_out = s->uncompressed_chunk; 584 s->zstream.avail_out = 512 * s->sectorcounts[chunk]; 585 ret = inflateReset(&s->zstream); 586 if (ret != Z_OK) { 587 return -1; 588 } 589 ret = inflate(&s->zstream, Z_FINISH); 590 if (ret != Z_STREAM_END || 591 s->zstream.total_out != 512 * s->sectorcounts[chunk]) { 592 return -1; 593 } 594 break; } 595 case 0x80000006: /* bzip2 compressed */ 596 if (!dmg_uncompress_bz2) { 597 break; 598 } 599 /* we need to buffer, because only the chunk as whole can be 600 * inflated. */ 601 ret = bdrv_pread(bs->file, s->offsets[chunk], 602 s->compressed_chunk, s->lengths[chunk]); 603 if (ret != s->lengths[chunk]) { 604 return -1; 605 } 606 607 ret = dmg_uncompress_bz2((char *)s->compressed_chunk, 608 (unsigned int) s->lengths[chunk], 609 (char *)s->uncompressed_chunk, 610 (unsigned int) 611 (512 * s->sectorcounts[chunk])); 612 if (ret < 0) { 613 return ret; 614 } 615 break; 616 case 1: /* copy */ 617 ret = bdrv_pread(bs->file, s->offsets[chunk], 618 s->uncompressed_chunk, s->lengths[chunk]); 619 if (ret != s->lengths[chunk]) { 620 return -1; 621 } 622 break; 623 case 2: /* zero */ 624 /* see dmg_read, it is treated specially. No buffer needs to be 625 * pre-filled, the zeroes can be set directly. */ 626 break; 627 } 628 s->current_chunk = chunk; 629 } 630 return 0; 631 } 632 633 static int coroutine_fn 634 dmg_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes, 635 QEMUIOVector *qiov, int flags) 636 { 637 BDRVDMGState *s = bs->opaque; 638 uint64_t sector_num = offset >> BDRV_SECTOR_BITS; 639 int nb_sectors = bytes >> BDRV_SECTOR_BITS; 640 int ret, i; 641 642 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); 643 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0); 644 645 qemu_co_mutex_lock(&s->lock); 646 647 for (i = 0; i < nb_sectors; i++) { 648 uint32_t sector_offset_in_chunk; 649 void *data; 650 651 if (dmg_read_chunk(bs, sector_num + i) != 0) { 652 ret = -EIO; 653 goto fail; 654 } 655 /* Special case: current chunk is all zeroes. Do not perform a memcpy as 656 * s->uncompressed_chunk may be too small to cover the large all-zeroes 657 * section. dmg_read_chunk is called to find s->current_chunk */ 658 if (s->types[s->current_chunk] == 2) { /* all zeroes block entry */ 659 qemu_iovec_memset(qiov, i * 512, 0, 512); 660 continue; 661 } 662 sector_offset_in_chunk = sector_num + i - s->sectors[s->current_chunk]; 663 data = s->uncompressed_chunk + sector_offset_in_chunk * 512; 664 qemu_iovec_from_buf(qiov, i * 512, data, 512); 665 } 666 667 ret = 0; 668 fail: 669 qemu_co_mutex_unlock(&s->lock); 670 return ret; 671 } 672 673 static void dmg_close(BlockDriverState *bs) 674 { 675 BDRVDMGState *s = bs->opaque; 676 677 g_free(s->types); 678 g_free(s->offsets); 679 g_free(s->lengths); 680 g_free(s->sectors); 681 g_free(s->sectorcounts); 682 qemu_vfree(s->compressed_chunk); 683 qemu_vfree(s->uncompressed_chunk); 684 685 inflateEnd(&s->zstream); 686 } 687 688 static BlockDriver bdrv_dmg = { 689 .format_name = "dmg", 690 .instance_size = sizeof(BDRVDMGState), 691 .bdrv_probe = dmg_probe, 692 .bdrv_open = dmg_open, 693 .bdrv_refresh_limits = dmg_refresh_limits, 694 .bdrv_co_preadv = dmg_co_preadv, 695 .bdrv_close = dmg_close, 696 }; 697 698 static void bdrv_dmg_init(void) 699 { 700 bdrv_register(&bdrv_dmg); 701 } 702 703 block_init(bdrv_dmg_init); 704