1 /* 2 * QEMU Enhanced Disk Format 3 * 4 * Copyright IBM, Corp. 2010 5 * 6 * Authors: 7 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com> 8 * Anthony Liguori <aliguori@us.ibm.com> 9 * 10 * This work is licensed under the terms of the GNU LGPL, version 2 or later. 11 * See the COPYING.LIB file in the top-level directory. 12 * 13 */ 14 15 #include "qemu/timer.h" 16 #include "trace.h" 17 #include "qed.h" 18 #include "qapi/qmp/qerror.h" 19 #include "migration/migration.h" 20 21 static void qed_aio_cancel(BlockDriverAIOCB *blockacb) 22 { 23 QEDAIOCB *acb = (QEDAIOCB *)blockacb; 24 bool finished = false; 25 26 /* Wait for the request to finish */ 27 acb->finished = &finished; 28 while (!finished) { 29 qemu_aio_wait(); 30 } 31 } 32 33 static const AIOCBInfo qed_aiocb_info = { 34 .aiocb_size = sizeof(QEDAIOCB), 35 .cancel = qed_aio_cancel, 36 }; 37 38 static int bdrv_qed_probe(const uint8_t *buf, int buf_size, 39 const char *filename) 40 { 41 const QEDHeader *header = (const QEDHeader *)buf; 42 43 if (buf_size < sizeof(*header)) { 44 return 0; 45 } 46 if (le32_to_cpu(header->magic) != QED_MAGIC) { 47 return 0; 48 } 49 return 100; 50 } 51 52 /** 53 * Check whether an image format is raw 54 * 55 * @fmt: Backing file format, may be NULL 56 */ 57 static bool qed_fmt_is_raw(const char *fmt) 58 { 59 return fmt && strcmp(fmt, "raw") == 0; 60 } 61 62 static void qed_header_le_to_cpu(const QEDHeader *le, QEDHeader *cpu) 63 { 64 cpu->magic = le32_to_cpu(le->magic); 65 cpu->cluster_size = le32_to_cpu(le->cluster_size); 66 cpu->table_size = le32_to_cpu(le->table_size); 67 cpu->header_size = le32_to_cpu(le->header_size); 68 cpu->features = le64_to_cpu(le->features); 69 cpu->compat_features = le64_to_cpu(le->compat_features); 70 cpu->autoclear_features = le64_to_cpu(le->autoclear_features); 71 cpu->l1_table_offset = le64_to_cpu(le->l1_table_offset); 72 cpu->image_size = le64_to_cpu(le->image_size); 73 cpu->backing_filename_offset = le32_to_cpu(le->backing_filename_offset); 74 cpu->backing_filename_size = le32_to_cpu(le->backing_filename_size); 75 } 76 77 static void qed_header_cpu_to_le(const QEDHeader *cpu, QEDHeader *le) 78 { 79 le->magic = cpu_to_le32(cpu->magic); 80 le->cluster_size = cpu_to_le32(cpu->cluster_size); 81 le->table_size = cpu_to_le32(cpu->table_size); 82 le->header_size = cpu_to_le32(cpu->header_size); 83 le->features = cpu_to_le64(cpu->features); 84 le->compat_features = cpu_to_le64(cpu->compat_features); 85 le->autoclear_features = cpu_to_le64(cpu->autoclear_features); 86 le->l1_table_offset = cpu_to_le64(cpu->l1_table_offset); 87 le->image_size = cpu_to_le64(cpu->image_size); 88 le->backing_filename_offset = cpu_to_le32(cpu->backing_filename_offset); 89 le->backing_filename_size = cpu_to_le32(cpu->backing_filename_size); 90 } 91 92 int qed_write_header_sync(BDRVQEDState *s) 93 { 94 QEDHeader le; 95 int ret; 96 97 qed_header_cpu_to_le(&s->header, &le); 98 ret = bdrv_pwrite(s->bs->file, 0, &le, sizeof(le)); 99 if (ret != sizeof(le)) { 100 return ret; 101 } 102 return 0; 103 } 104 105 typedef struct { 106 GenericCB gencb; 107 BDRVQEDState *s; 108 struct iovec iov; 109 QEMUIOVector qiov; 110 int nsectors; 111 uint8_t *buf; 112 } QEDWriteHeaderCB; 113 114 static void qed_write_header_cb(void *opaque, int ret) 115 { 116 QEDWriteHeaderCB *write_header_cb = opaque; 117 118 qemu_vfree(write_header_cb->buf); 119 gencb_complete(write_header_cb, ret); 120 } 121 122 static void qed_write_header_read_cb(void *opaque, int ret) 123 { 124 QEDWriteHeaderCB *write_header_cb = opaque; 125 BDRVQEDState *s = write_header_cb->s; 126 127 if (ret) { 128 qed_write_header_cb(write_header_cb, ret); 129 return; 130 } 131 132 /* Update header */ 133 qed_header_cpu_to_le(&s->header, (QEDHeader *)write_header_cb->buf); 134 135 bdrv_aio_writev(s->bs->file, 0, &write_header_cb->qiov, 136 write_header_cb->nsectors, qed_write_header_cb, 137 write_header_cb); 138 } 139 140 /** 141 * Update header in-place (does not rewrite backing filename or other strings) 142 * 143 * This function only updates known header fields in-place and does not affect 144 * extra data after the QED header. 145 */ 146 static void qed_write_header(BDRVQEDState *s, BlockDriverCompletionFunc cb, 147 void *opaque) 148 { 149 /* We must write full sectors for O_DIRECT but cannot necessarily generate 150 * the data following the header if an unrecognized compat feature is 151 * active. Therefore, first read the sectors containing the header, update 152 * them, and write back. 153 */ 154 155 int nsectors = (sizeof(QEDHeader) + BDRV_SECTOR_SIZE - 1) / 156 BDRV_SECTOR_SIZE; 157 size_t len = nsectors * BDRV_SECTOR_SIZE; 158 QEDWriteHeaderCB *write_header_cb = gencb_alloc(sizeof(*write_header_cb), 159 cb, opaque); 160 161 write_header_cb->s = s; 162 write_header_cb->nsectors = nsectors; 163 write_header_cb->buf = qemu_blockalign(s->bs, len); 164 write_header_cb->iov.iov_base = write_header_cb->buf; 165 write_header_cb->iov.iov_len = len; 166 qemu_iovec_init_external(&write_header_cb->qiov, &write_header_cb->iov, 1); 167 168 bdrv_aio_readv(s->bs->file, 0, &write_header_cb->qiov, nsectors, 169 qed_write_header_read_cb, write_header_cb); 170 } 171 172 static uint64_t qed_max_image_size(uint32_t cluster_size, uint32_t table_size) 173 { 174 uint64_t table_entries; 175 uint64_t l2_size; 176 177 table_entries = (table_size * cluster_size) / sizeof(uint64_t); 178 l2_size = table_entries * cluster_size; 179 180 return l2_size * table_entries; 181 } 182 183 static bool qed_is_cluster_size_valid(uint32_t cluster_size) 184 { 185 if (cluster_size < QED_MIN_CLUSTER_SIZE || 186 cluster_size > QED_MAX_CLUSTER_SIZE) { 187 return false; 188 } 189 if (cluster_size & (cluster_size - 1)) { 190 return false; /* not power of 2 */ 191 } 192 return true; 193 } 194 195 static bool qed_is_table_size_valid(uint32_t table_size) 196 { 197 if (table_size < QED_MIN_TABLE_SIZE || 198 table_size > QED_MAX_TABLE_SIZE) { 199 return false; 200 } 201 if (table_size & (table_size - 1)) { 202 return false; /* not power of 2 */ 203 } 204 return true; 205 } 206 207 static bool qed_is_image_size_valid(uint64_t image_size, uint32_t cluster_size, 208 uint32_t table_size) 209 { 210 if (image_size % BDRV_SECTOR_SIZE != 0) { 211 return false; /* not multiple of sector size */ 212 } 213 if (image_size > qed_max_image_size(cluster_size, table_size)) { 214 return false; /* image is too large */ 215 } 216 return true; 217 } 218 219 /** 220 * Read a string of known length from the image file 221 * 222 * @file: Image file 223 * @offset: File offset to start of string, in bytes 224 * @n: String length in bytes 225 * @buf: Destination buffer 226 * @buflen: Destination buffer length in bytes 227 * @ret: 0 on success, -errno on failure 228 * 229 * The string is NUL-terminated. 230 */ 231 static int qed_read_string(BlockDriverState *file, uint64_t offset, size_t n, 232 char *buf, size_t buflen) 233 { 234 int ret; 235 if (n >= buflen) { 236 return -EINVAL; 237 } 238 ret = bdrv_pread(file, offset, buf, n); 239 if (ret < 0) { 240 return ret; 241 } 242 buf[n] = '\0'; 243 return 0; 244 } 245 246 /** 247 * Allocate new clusters 248 * 249 * @s: QED state 250 * @n: Number of contiguous clusters to allocate 251 * @ret: Offset of first allocated cluster 252 * 253 * This function only produces the offset where the new clusters should be 254 * written. It updates BDRVQEDState but does not make any changes to the image 255 * file. 256 */ 257 static uint64_t qed_alloc_clusters(BDRVQEDState *s, unsigned int n) 258 { 259 uint64_t offset = s->file_size; 260 s->file_size += n * s->header.cluster_size; 261 return offset; 262 } 263 264 QEDTable *qed_alloc_table(BDRVQEDState *s) 265 { 266 /* Honor O_DIRECT memory alignment requirements */ 267 return qemu_blockalign(s->bs, 268 s->header.cluster_size * s->header.table_size); 269 } 270 271 /** 272 * Allocate a new zeroed L2 table 273 */ 274 static CachedL2Table *qed_new_l2_table(BDRVQEDState *s) 275 { 276 CachedL2Table *l2_table = qed_alloc_l2_cache_entry(&s->l2_cache); 277 278 l2_table->table = qed_alloc_table(s); 279 l2_table->offset = qed_alloc_clusters(s, s->header.table_size); 280 281 memset(l2_table->table->offsets, 0, 282 s->header.cluster_size * s->header.table_size); 283 return l2_table; 284 } 285 286 static void qed_aio_next_io(void *opaque, int ret); 287 288 static void qed_plug_allocating_write_reqs(BDRVQEDState *s) 289 { 290 assert(!s->allocating_write_reqs_plugged); 291 292 s->allocating_write_reqs_plugged = true; 293 } 294 295 static void qed_unplug_allocating_write_reqs(BDRVQEDState *s) 296 { 297 QEDAIOCB *acb; 298 299 assert(s->allocating_write_reqs_plugged); 300 301 s->allocating_write_reqs_plugged = false; 302 303 acb = QSIMPLEQ_FIRST(&s->allocating_write_reqs); 304 if (acb) { 305 qed_aio_next_io(acb, 0); 306 } 307 } 308 309 static void qed_finish_clear_need_check(void *opaque, int ret) 310 { 311 /* Do nothing */ 312 } 313 314 static void qed_flush_after_clear_need_check(void *opaque, int ret) 315 { 316 BDRVQEDState *s = opaque; 317 318 bdrv_aio_flush(s->bs, qed_finish_clear_need_check, s); 319 320 /* No need to wait until flush completes */ 321 qed_unplug_allocating_write_reqs(s); 322 } 323 324 static void qed_clear_need_check(void *opaque, int ret) 325 { 326 BDRVQEDState *s = opaque; 327 328 if (ret) { 329 qed_unplug_allocating_write_reqs(s); 330 return; 331 } 332 333 s->header.features &= ~QED_F_NEED_CHECK; 334 qed_write_header(s, qed_flush_after_clear_need_check, s); 335 } 336 337 static void qed_need_check_timer_cb(void *opaque) 338 { 339 BDRVQEDState *s = opaque; 340 341 /* The timer should only fire when allocating writes have drained */ 342 assert(!QSIMPLEQ_FIRST(&s->allocating_write_reqs)); 343 344 trace_qed_need_check_timer_cb(s); 345 346 qed_plug_allocating_write_reqs(s); 347 348 /* Ensure writes are on disk before clearing flag */ 349 bdrv_aio_flush(s->bs, qed_clear_need_check, s); 350 } 351 352 static void qed_start_need_check_timer(BDRVQEDState *s) 353 { 354 trace_qed_start_need_check_timer(s); 355 356 /* Use QEMU_CLOCK_VIRTUAL so we don't alter the image file while suspended for 357 * migration. 358 */ 359 timer_mod(s->need_check_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 360 get_ticks_per_sec() * QED_NEED_CHECK_TIMEOUT); 361 } 362 363 /* It's okay to call this multiple times or when no timer is started */ 364 static void qed_cancel_need_check_timer(BDRVQEDState *s) 365 { 366 trace_qed_cancel_need_check_timer(s); 367 timer_del(s->need_check_timer); 368 } 369 370 static void bdrv_qed_rebind(BlockDriverState *bs) 371 { 372 BDRVQEDState *s = bs->opaque; 373 s->bs = bs; 374 } 375 376 static int bdrv_qed_open(BlockDriverState *bs, QDict *options, int flags, 377 Error **errp) 378 { 379 BDRVQEDState *s = bs->opaque; 380 QEDHeader le_header; 381 int64_t file_size; 382 int ret; 383 384 s->bs = bs; 385 QSIMPLEQ_INIT(&s->allocating_write_reqs); 386 387 ret = bdrv_pread(bs->file, 0, &le_header, sizeof(le_header)); 388 if (ret < 0) { 389 return ret; 390 } 391 qed_header_le_to_cpu(&le_header, &s->header); 392 393 if (s->header.magic != QED_MAGIC) { 394 error_setg(errp, "Image not in QED format"); 395 return -EINVAL; 396 } 397 if (s->header.features & ~QED_FEATURE_MASK) { 398 /* image uses unsupported feature bits */ 399 char buf[64]; 400 snprintf(buf, sizeof(buf), "%" PRIx64, 401 s->header.features & ~QED_FEATURE_MASK); 402 error_set(errp, QERR_UNKNOWN_BLOCK_FORMAT_FEATURE, 403 bs->device_name, "QED", buf); 404 return -ENOTSUP; 405 } 406 if (!qed_is_cluster_size_valid(s->header.cluster_size)) { 407 return -EINVAL; 408 } 409 410 /* Round down file size to the last cluster */ 411 file_size = bdrv_getlength(bs->file); 412 if (file_size < 0) { 413 return file_size; 414 } 415 s->file_size = qed_start_of_cluster(s, file_size); 416 417 if (!qed_is_table_size_valid(s->header.table_size)) { 418 return -EINVAL; 419 } 420 if (!qed_is_image_size_valid(s->header.image_size, 421 s->header.cluster_size, 422 s->header.table_size)) { 423 return -EINVAL; 424 } 425 if (!qed_check_table_offset(s, s->header.l1_table_offset)) { 426 return -EINVAL; 427 } 428 429 s->table_nelems = (s->header.cluster_size * s->header.table_size) / 430 sizeof(uint64_t); 431 s->l2_shift = ffs(s->header.cluster_size) - 1; 432 s->l2_mask = s->table_nelems - 1; 433 s->l1_shift = s->l2_shift + ffs(s->table_nelems) - 1; 434 435 if ((s->header.features & QED_F_BACKING_FILE)) { 436 if ((uint64_t)s->header.backing_filename_offset + 437 s->header.backing_filename_size > 438 s->header.cluster_size * s->header.header_size) { 439 return -EINVAL; 440 } 441 442 ret = qed_read_string(bs->file, s->header.backing_filename_offset, 443 s->header.backing_filename_size, bs->backing_file, 444 sizeof(bs->backing_file)); 445 if (ret < 0) { 446 return ret; 447 } 448 449 if (s->header.features & QED_F_BACKING_FORMAT_NO_PROBE) { 450 pstrcpy(bs->backing_format, sizeof(bs->backing_format), "raw"); 451 } 452 } 453 454 /* Reset unknown autoclear feature bits. This is a backwards 455 * compatibility mechanism that allows images to be opened by older 456 * programs, which "knock out" unknown feature bits. When an image is 457 * opened by a newer program again it can detect that the autoclear 458 * feature is no longer valid. 459 */ 460 if ((s->header.autoclear_features & ~QED_AUTOCLEAR_FEATURE_MASK) != 0 && 461 !bdrv_is_read_only(bs->file) && !(flags & BDRV_O_INCOMING)) { 462 s->header.autoclear_features &= QED_AUTOCLEAR_FEATURE_MASK; 463 464 ret = qed_write_header_sync(s); 465 if (ret) { 466 return ret; 467 } 468 469 /* From here on only known autoclear feature bits are valid */ 470 bdrv_flush(bs->file); 471 } 472 473 s->l1_table = qed_alloc_table(s); 474 qed_init_l2_cache(&s->l2_cache); 475 476 ret = qed_read_l1_table_sync(s); 477 if (ret) { 478 goto out; 479 } 480 481 /* If image was not closed cleanly, check consistency */ 482 if (!(flags & BDRV_O_CHECK) && (s->header.features & QED_F_NEED_CHECK)) { 483 /* Read-only images cannot be fixed. There is no risk of corruption 484 * since write operations are not possible. Therefore, allow 485 * potentially inconsistent images to be opened read-only. This can 486 * aid data recovery from an otherwise inconsistent image. 487 */ 488 if (!bdrv_is_read_only(bs->file) && 489 !(flags & BDRV_O_INCOMING)) { 490 BdrvCheckResult result = {0}; 491 492 ret = qed_check(s, &result, true); 493 if (ret) { 494 goto out; 495 } 496 } 497 } 498 499 s->need_check_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, 500 qed_need_check_timer_cb, s); 501 502 out: 503 if (ret) { 504 qed_free_l2_cache(&s->l2_cache); 505 qemu_vfree(s->l1_table); 506 } 507 return ret; 508 } 509 510 static int bdrv_qed_refresh_limits(BlockDriverState *bs) 511 { 512 BDRVQEDState *s = bs->opaque; 513 514 bs->bl.write_zeroes_alignment = s->header.cluster_size >> BDRV_SECTOR_BITS; 515 516 return 0; 517 } 518 519 /* We have nothing to do for QED reopen, stubs just return 520 * success */ 521 static int bdrv_qed_reopen_prepare(BDRVReopenState *state, 522 BlockReopenQueue *queue, Error **errp) 523 { 524 return 0; 525 } 526 527 static void bdrv_qed_close(BlockDriverState *bs) 528 { 529 BDRVQEDState *s = bs->opaque; 530 531 qed_cancel_need_check_timer(s); 532 timer_free(s->need_check_timer); 533 534 /* Ensure writes reach stable storage */ 535 bdrv_flush(bs->file); 536 537 /* Clean shutdown, no check required on next open */ 538 if (s->header.features & QED_F_NEED_CHECK) { 539 s->header.features &= ~QED_F_NEED_CHECK; 540 qed_write_header_sync(s); 541 } 542 543 qed_free_l2_cache(&s->l2_cache); 544 qemu_vfree(s->l1_table); 545 } 546 547 static int qed_create(const char *filename, uint32_t cluster_size, 548 uint64_t image_size, uint32_t table_size, 549 const char *backing_file, const char *backing_fmt, 550 Error **errp) 551 { 552 QEDHeader header = { 553 .magic = QED_MAGIC, 554 .cluster_size = cluster_size, 555 .table_size = table_size, 556 .header_size = 1, 557 .features = 0, 558 .compat_features = 0, 559 .l1_table_offset = cluster_size, 560 .image_size = image_size, 561 }; 562 QEDHeader le_header; 563 uint8_t *l1_table = NULL; 564 size_t l1_size = header.cluster_size * header.table_size; 565 Error *local_err = NULL; 566 int ret = 0; 567 BlockDriverState *bs; 568 569 ret = bdrv_create_file(filename, NULL, &local_err); 570 if (ret < 0) { 571 error_propagate(errp, local_err); 572 return ret; 573 } 574 575 bs = NULL; 576 ret = bdrv_open(&bs, filename, NULL, NULL, 577 BDRV_O_RDWR | BDRV_O_CACHE_WB | BDRV_O_PROTOCOL, NULL, 578 &local_err); 579 if (ret < 0) { 580 error_propagate(errp, local_err); 581 return ret; 582 } 583 584 /* File must start empty and grow, check truncate is supported */ 585 ret = bdrv_truncate(bs, 0); 586 if (ret < 0) { 587 goto out; 588 } 589 590 if (backing_file) { 591 header.features |= QED_F_BACKING_FILE; 592 header.backing_filename_offset = sizeof(le_header); 593 header.backing_filename_size = strlen(backing_file); 594 595 if (qed_fmt_is_raw(backing_fmt)) { 596 header.features |= QED_F_BACKING_FORMAT_NO_PROBE; 597 } 598 } 599 600 qed_header_cpu_to_le(&header, &le_header); 601 ret = bdrv_pwrite(bs, 0, &le_header, sizeof(le_header)); 602 if (ret < 0) { 603 goto out; 604 } 605 ret = bdrv_pwrite(bs, sizeof(le_header), backing_file, 606 header.backing_filename_size); 607 if (ret < 0) { 608 goto out; 609 } 610 611 l1_table = g_malloc0(l1_size); 612 ret = bdrv_pwrite(bs, header.l1_table_offset, l1_table, l1_size); 613 if (ret < 0) { 614 goto out; 615 } 616 617 ret = 0; /* success */ 618 out: 619 g_free(l1_table); 620 bdrv_unref(bs); 621 return ret; 622 } 623 624 static int bdrv_qed_create(const char *filename, QEMUOptionParameter *options, 625 Error **errp) 626 { 627 uint64_t image_size = 0; 628 uint32_t cluster_size = QED_DEFAULT_CLUSTER_SIZE; 629 uint32_t table_size = QED_DEFAULT_TABLE_SIZE; 630 const char *backing_file = NULL; 631 const char *backing_fmt = NULL; 632 633 while (options && options->name) { 634 if (!strcmp(options->name, BLOCK_OPT_SIZE)) { 635 image_size = options->value.n; 636 } else if (!strcmp(options->name, BLOCK_OPT_BACKING_FILE)) { 637 backing_file = options->value.s; 638 } else if (!strcmp(options->name, BLOCK_OPT_BACKING_FMT)) { 639 backing_fmt = options->value.s; 640 } else if (!strcmp(options->name, BLOCK_OPT_CLUSTER_SIZE)) { 641 if (options->value.n) { 642 cluster_size = options->value.n; 643 } 644 } else if (!strcmp(options->name, BLOCK_OPT_TABLE_SIZE)) { 645 if (options->value.n) { 646 table_size = options->value.n; 647 } 648 } 649 options++; 650 } 651 652 if (!qed_is_cluster_size_valid(cluster_size)) { 653 fprintf(stderr, "QED cluster size must be within range [%u, %u] and power of 2\n", 654 QED_MIN_CLUSTER_SIZE, QED_MAX_CLUSTER_SIZE); 655 return -EINVAL; 656 } 657 if (!qed_is_table_size_valid(table_size)) { 658 fprintf(stderr, "QED table size must be within range [%u, %u] and power of 2\n", 659 QED_MIN_TABLE_SIZE, QED_MAX_TABLE_SIZE); 660 return -EINVAL; 661 } 662 if (!qed_is_image_size_valid(image_size, cluster_size, table_size)) { 663 fprintf(stderr, "QED image size must be a non-zero multiple of " 664 "cluster size and less than %" PRIu64 " bytes\n", 665 qed_max_image_size(cluster_size, table_size)); 666 return -EINVAL; 667 } 668 669 return qed_create(filename, cluster_size, image_size, table_size, 670 backing_file, backing_fmt, errp); 671 } 672 673 typedef struct { 674 BlockDriverState *bs; 675 Coroutine *co; 676 uint64_t pos; 677 int64_t status; 678 int *pnum; 679 } QEDIsAllocatedCB; 680 681 static void qed_is_allocated_cb(void *opaque, int ret, uint64_t offset, size_t len) 682 { 683 QEDIsAllocatedCB *cb = opaque; 684 BDRVQEDState *s = cb->bs->opaque; 685 *cb->pnum = len / BDRV_SECTOR_SIZE; 686 switch (ret) { 687 case QED_CLUSTER_FOUND: 688 offset |= qed_offset_into_cluster(s, cb->pos); 689 cb->status = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID | offset; 690 break; 691 case QED_CLUSTER_ZERO: 692 cb->status = BDRV_BLOCK_ZERO; 693 break; 694 case QED_CLUSTER_L2: 695 case QED_CLUSTER_L1: 696 cb->status = 0; 697 break; 698 default: 699 assert(ret < 0); 700 cb->status = ret; 701 break; 702 } 703 704 if (cb->co) { 705 qemu_coroutine_enter(cb->co, NULL); 706 } 707 } 708 709 static int64_t coroutine_fn bdrv_qed_co_get_block_status(BlockDriverState *bs, 710 int64_t sector_num, 711 int nb_sectors, int *pnum) 712 { 713 BDRVQEDState *s = bs->opaque; 714 size_t len = (size_t)nb_sectors * BDRV_SECTOR_SIZE; 715 QEDIsAllocatedCB cb = { 716 .bs = bs, 717 .pos = (uint64_t)sector_num * BDRV_SECTOR_SIZE, 718 .status = BDRV_BLOCK_OFFSET_MASK, 719 .pnum = pnum, 720 }; 721 QEDRequest request = { .l2_table = NULL }; 722 723 qed_find_cluster(s, &request, cb.pos, len, qed_is_allocated_cb, &cb); 724 725 /* Now sleep if the callback wasn't invoked immediately */ 726 while (cb.status == BDRV_BLOCK_OFFSET_MASK) { 727 cb.co = qemu_coroutine_self(); 728 qemu_coroutine_yield(); 729 } 730 731 qed_unref_l2_cache_entry(request.l2_table); 732 733 return cb.status; 734 } 735 736 static BDRVQEDState *acb_to_s(QEDAIOCB *acb) 737 { 738 return acb->common.bs->opaque; 739 } 740 741 /** 742 * Read from the backing file or zero-fill if no backing file 743 * 744 * @s: QED state 745 * @pos: Byte position in device 746 * @qiov: Destination I/O vector 747 * @cb: Completion function 748 * @opaque: User data for completion function 749 * 750 * This function reads qiov->size bytes starting at pos from the backing file. 751 * If there is no backing file then zeroes are read. 752 */ 753 static void qed_read_backing_file(BDRVQEDState *s, uint64_t pos, 754 QEMUIOVector *qiov, 755 BlockDriverCompletionFunc *cb, void *opaque) 756 { 757 uint64_t backing_length = 0; 758 size_t size; 759 760 /* If there is a backing file, get its length. Treat the absence of a 761 * backing file like a zero length backing file. 762 */ 763 if (s->bs->backing_hd) { 764 int64_t l = bdrv_getlength(s->bs->backing_hd); 765 if (l < 0) { 766 cb(opaque, l); 767 return; 768 } 769 backing_length = l; 770 } 771 772 /* Zero all sectors if reading beyond the end of the backing file */ 773 if (pos >= backing_length || 774 pos + qiov->size > backing_length) { 775 qemu_iovec_memset(qiov, 0, 0, qiov->size); 776 } 777 778 /* Complete now if there are no backing file sectors to read */ 779 if (pos >= backing_length) { 780 cb(opaque, 0); 781 return; 782 } 783 784 /* If the read straddles the end of the backing file, shorten it */ 785 size = MIN((uint64_t)backing_length - pos, qiov->size); 786 787 BLKDBG_EVENT(s->bs->file, BLKDBG_READ_BACKING_AIO); 788 bdrv_aio_readv(s->bs->backing_hd, pos / BDRV_SECTOR_SIZE, 789 qiov, size / BDRV_SECTOR_SIZE, cb, opaque); 790 } 791 792 typedef struct { 793 GenericCB gencb; 794 BDRVQEDState *s; 795 QEMUIOVector qiov; 796 struct iovec iov; 797 uint64_t offset; 798 } CopyFromBackingFileCB; 799 800 static void qed_copy_from_backing_file_cb(void *opaque, int ret) 801 { 802 CopyFromBackingFileCB *copy_cb = opaque; 803 qemu_vfree(copy_cb->iov.iov_base); 804 gencb_complete(©_cb->gencb, ret); 805 } 806 807 static void qed_copy_from_backing_file_write(void *opaque, int ret) 808 { 809 CopyFromBackingFileCB *copy_cb = opaque; 810 BDRVQEDState *s = copy_cb->s; 811 812 if (ret) { 813 qed_copy_from_backing_file_cb(copy_cb, ret); 814 return; 815 } 816 817 BLKDBG_EVENT(s->bs->file, BLKDBG_COW_WRITE); 818 bdrv_aio_writev(s->bs->file, copy_cb->offset / BDRV_SECTOR_SIZE, 819 ©_cb->qiov, copy_cb->qiov.size / BDRV_SECTOR_SIZE, 820 qed_copy_from_backing_file_cb, copy_cb); 821 } 822 823 /** 824 * Copy data from backing file into the image 825 * 826 * @s: QED state 827 * @pos: Byte position in device 828 * @len: Number of bytes 829 * @offset: Byte offset in image file 830 * @cb: Completion function 831 * @opaque: User data for completion function 832 */ 833 static void qed_copy_from_backing_file(BDRVQEDState *s, uint64_t pos, 834 uint64_t len, uint64_t offset, 835 BlockDriverCompletionFunc *cb, 836 void *opaque) 837 { 838 CopyFromBackingFileCB *copy_cb; 839 840 /* Skip copy entirely if there is no work to do */ 841 if (len == 0) { 842 cb(opaque, 0); 843 return; 844 } 845 846 copy_cb = gencb_alloc(sizeof(*copy_cb), cb, opaque); 847 copy_cb->s = s; 848 copy_cb->offset = offset; 849 copy_cb->iov.iov_base = qemu_blockalign(s->bs, len); 850 copy_cb->iov.iov_len = len; 851 qemu_iovec_init_external(©_cb->qiov, ©_cb->iov, 1); 852 853 qed_read_backing_file(s, pos, ©_cb->qiov, 854 qed_copy_from_backing_file_write, copy_cb); 855 } 856 857 /** 858 * Link one or more contiguous clusters into a table 859 * 860 * @s: QED state 861 * @table: L2 table 862 * @index: First cluster index 863 * @n: Number of contiguous clusters 864 * @cluster: First cluster offset 865 * 866 * The cluster offset may be an allocated byte offset in the image file, the 867 * zero cluster marker, or the unallocated cluster marker. 868 */ 869 static void qed_update_l2_table(BDRVQEDState *s, QEDTable *table, int index, 870 unsigned int n, uint64_t cluster) 871 { 872 int i; 873 for (i = index; i < index + n; i++) { 874 table->offsets[i] = cluster; 875 if (!qed_offset_is_unalloc_cluster(cluster) && 876 !qed_offset_is_zero_cluster(cluster)) { 877 cluster += s->header.cluster_size; 878 } 879 } 880 } 881 882 static void qed_aio_complete_bh(void *opaque) 883 { 884 QEDAIOCB *acb = opaque; 885 BlockDriverCompletionFunc *cb = acb->common.cb; 886 void *user_opaque = acb->common.opaque; 887 int ret = acb->bh_ret; 888 bool *finished = acb->finished; 889 890 qemu_bh_delete(acb->bh); 891 qemu_aio_release(acb); 892 893 /* Invoke callback */ 894 cb(user_opaque, ret); 895 896 /* Signal cancel completion */ 897 if (finished) { 898 *finished = true; 899 } 900 } 901 902 static void qed_aio_complete(QEDAIOCB *acb, int ret) 903 { 904 BDRVQEDState *s = acb_to_s(acb); 905 906 trace_qed_aio_complete(s, acb, ret); 907 908 /* Free resources */ 909 qemu_iovec_destroy(&acb->cur_qiov); 910 qed_unref_l2_cache_entry(acb->request.l2_table); 911 912 /* Free the buffer we may have allocated for zero writes */ 913 if (acb->flags & QED_AIOCB_ZERO) { 914 qemu_vfree(acb->qiov->iov[0].iov_base); 915 acb->qiov->iov[0].iov_base = NULL; 916 } 917 918 /* Arrange for a bh to invoke the completion function */ 919 acb->bh_ret = ret; 920 acb->bh = qemu_bh_new(qed_aio_complete_bh, acb); 921 qemu_bh_schedule(acb->bh); 922 923 /* Start next allocating write request waiting behind this one. Note that 924 * requests enqueue themselves when they first hit an unallocated cluster 925 * but they wait until the entire request is finished before waking up the 926 * next request in the queue. This ensures that we don't cycle through 927 * requests multiple times but rather finish one at a time completely. 928 */ 929 if (acb == QSIMPLEQ_FIRST(&s->allocating_write_reqs)) { 930 QSIMPLEQ_REMOVE_HEAD(&s->allocating_write_reqs, next); 931 acb = QSIMPLEQ_FIRST(&s->allocating_write_reqs); 932 if (acb) { 933 qed_aio_next_io(acb, 0); 934 } else if (s->header.features & QED_F_NEED_CHECK) { 935 qed_start_need_check_timer(s); 936 } 937 } 938 } 939 940 /** 941 * Commit the current L2 table to the cache 942 */ 943 static void qed_commit_l2_update(void *opaque, int ret) 944 { 945 QEDAIOCB *acb = opaque; 946 BDRVQEDState *s = acb_to_s(acb); 947 CachedL2Table *l2_table = acb->request.l2_table; 948 uint64_t l2_offset = l2_table->offset; 949 950 qed_commit_l2_cache_entry(&s->l2_cache, l2_table); 951 952 /* This is guaranteed to succeed because we just committed the entry to the 953 * cache. 954 */ 955 acb->request.l2_table = qed_find_l2_cache_entry(&s->l2_cache, l2_offset); 956 assert(acb->request.l2_table != NULL); 957 958 qed_aio_next_io(opaque, ret); 959 } 960 961 /** 962 * Update L1 table with new L2 table offset and write it out 963 */ 964 static void qed_aio_write_l1_update(void *opaque, int ret) 965 { 966 QEDAIOCB *acb = opaque; 967 BDRVQEDState *s = acb_to_s(acb); 968 int index; 969 970 if (ret) { 971 qed_aio_complete(acb, ret); 972 return; 973 } 974 975 index = qed_l1_index(s, acb->cur_pos); 976 s->l1_table->offsets[index] = acb->request.l2_table->offset; 977 978 qed_write_l1_table(s, index, 1, qed_commit_l2_update, acb); 979 } 980 981 /** 982 * Update L2 table with new cluster offsets and write them out 983 */ 984 static void qed_aio_write_l2_update(QEDAIOCB *acb, int ret, uint64_t offset) 985 { 986 BDRVQEDState *s = acb_to_s(acb); 987 bool need_alloc = acb->find_cluster_ret == QED_CLUSTER_L1; 988 int index; 989 990 if (ret) { 991 goto err; 992 } 993 994 if (need_alloc) { 995 qed_unref_l2_cache_entry(acb->request.l2_table); 996 acb->request.l2_table = qed_new_l2_table(s); 997 } 998 999 index = qed_l2_index(s, acb->cur_pos); 1000 qed_update_l2_table(s, acb->request.l2_table->table, index, acb->cur_nclusters, 1001 offset); 1002 1003 if (need_alloc) { 1004 /* Write out the whole new L2 table */ 1005 qed_write_l2_table(s, &acb->request, 0, s->table_nelems, true, 1006 qed_aio_write_l1_update, acb); 1007 } else { 1008 /* Write out only the updated part of the L2 table */ 1009 qed_write_l2_table(s, &acb->request, index, acb->cur_nclusters, false, 1010 qed_aio_next_io, acb); 1011 } 1012 return; 1013 1014 err: 1015 qed_aio_complete(acb, ret); 1016 } 1017 1018 static void qed_aio_write_l2_update_cb(void *opaque, int ret) 1019 { 1020 QEDAIOCB *acb = opaque; 1021 qed_aio_write_l2_update(acb, ret, acb->cur_cluster); 1022 } 1023 1024 /** 1025 * Flush new data clusters before updating the L2 table 1026 * 1027 * This flush is necessary when a backing file is in use. A crash during an 1028 * allocating write could result in empty clusters in the image. If the write 1029 * only touched a subregion of the cluster, then backing image sectors have 1030 * been lost in the untouched region. The solution is to flush after writing a 1031 * new data cluster and before updating the L2 table. 1032 */ 1033 static void qed_aio_write_flush_before_l2_update(void *opaque, int ret) 1034 { 1035 QEDAIOCB *acb = opaque; 1036 BDRVQEDState *s = acb_to_s(acb); 1037 1038 if (!bdrv_aio_flush(s->bs->file, qed_aio_write_l2_update_cb, opaque)) { 1039 qed_aio_complete(acb, -EIO); 1040 } 1041 } 1042 1043 /** 1044 * Write data to the image file 1045 */ 1046 static void qed_aio_write_main(void *opaque, int ret) 1047 { 1048 QEDAIOCB *acb = opaque; 1049 BDRVQEDState *s = acb_to_s(acb); 1050 uint64_t offset = acb->cur_cluster + 1051 qed_offset_into_cluster(s, acb->cur_pos); 1052 BlockDriverCompletionFunc *next_fn; 1053 1054 trace_qed_aio_write_main(s, acb, ret, offset, acb->cur_qiov.size); 1055 1056 if (ret) { 1057 qed_aio_complete(acb, ret); 1058 return; 1059 } 1060 1061 if (acb->find_cluster_ret == QED_CLUSTER_FOUND) { 1062 next_fn = qed_aio_next_io; 1063 } else { 1064 if (s->bs->backing_hd) { 1065 next_fn = qed_aio_write_flush_before_l2_update; 1066 } else { 1067 next_fn = qed_aio_write_l2_update_cb; 1068 } 1069 } 1070 1071 BLKDBG_EVENT(s->bs->file, BLKDBG_WRITE_AIO); 1072 bdrv_aio_writev(s->bs->file, offset / BDRV_SECTOR_SIZE, 1073 &acb->cur_qiov, acb->cur_qiov.size / BDRV_SECTOR_SIZE, 1074 next_fn, acb); 1075 } 1076 1077 /** 1078 * Populate back untouched region of new data cluster 1079 */ 1080 static void qed_aio_write_postfill(void *opaque, int ret) 1081 { 1082 QEDAIOCB *acb = opaque; 1083 BDRVQEDState *s = acb_to_s(acb); 1084 uint64_t start = acb->cur_pos + acb->cur_qiov.size; 1085 uint64_t len = 1086 qed_start_of_cluster(s, start + s->header.cluster_size - 1) - start; 1087 uint64_t offset = acb->cur_cluster + 1088 qed_offset_into_cluster(s, acb->cur_pos) + 1089 acb->cur_qiov.size; 1090 1091 if (ret) { 1092 qed_aio_complete(acb, ret); 1093 return; 1094 } 1095 1096 trace_qed_aio_write_postfill(s, acb, start, len, offset); 1097 qed_copy_from_backing_file(s, start, len, offset, 1098 qed_aio_write_main, acb); 1099 } 1100 1101 /** 1102 * Populate front untouched region of new data cluster 1103 */ 1104 static void qed_aio_write_prefill(void *opaque, int ret) 1105 { 1106 QEDAIOCB *acb = opaque; 1107 BDRVQEDState *s = acb_to_s(acb); 1108 uint64_t start = qed_start_of_cluster(s, acb->cur_pos); 1109 uint64_t len = qed_offset_into_cluster(s, acb->cur_pos); 1110 1111 trace_qed_aio_write_prefill(s, acb, start, len, acb->cur_cluster); 1112 qed_copy_from_backing_file(s, start, len, acb->cur_cluster, 1113 qed_aio_write_postfill, acb); 1114 } 1115 1116 /** 1117 * Check if the QED_F_NEED_CHECK bit should be set during allocating write 1118 */ 1119 static bool qed_should_set_need_check(BDRVQEDState *s) 1120 { 1121 /* The flush before L2 update path ensures consistency */ 1122 if (s->bs->backing_hd) { 1123 return false; 1124 } 1125 1126 return !(s->header.features & QED_F_NEED_CHECK); 1127 } 1128 1129 static void qed_aio_write_zero_cluster(void *opaque, int ret) 1130 { 1131 QEDAIOCB *acb = opaque; 1132 1133 if (ret) { 1134 qed_aio_complete(acb, ret); 1135 return; 1136 } 1137 1138 qed_aio_write_l2_update(acb, 0, 1); 1139 } 1140 1141 /** 1142 * Write new data cluster 1143 * 1144 * @acb: Write request 1145 * @len: Length in bytes 1146 * 1147 * This path is taken when writing to previously unallocated clusters. 1148 */ 1149 static void qed_aio_write_alloc(QEDAIOCB *acb, size_t len) 1150 { 1151 BDRVQEDState *s = acb_to_s(acb); 1152 BlockDriverCompletionFunc *cb; 1153 1154 /* Cancel timer when the first allocating request comes in */ 1155 if (QSIMPLEQ_EMPTY(&s->allocating_write_reqs)) { 1156 qed_cancel_need_check_timer(s); 1157 } 1158 1159 /* Freeze this request if another allocating write is in progress */ 1160 if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs)) { 1161 QSIMPLEQ_INSERT_TAIL(&s->allocating_write_reqs, acb, next); 1162 } 1163 if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs) || 1164 s->allocating_write_reqs_plugged) { 1165 return; /* wait for existing request to finish */ 1166 } 1167 1168 acb->cur_nclusters = qed_bytes_to_clusters(s, 1169 qed_offset_into_cluster(s, acb->cur_pos) + len); 1170 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len); 1171 1172 if (acb->flags & QED_AIOCB_ZERO) { 1173 /* Skip ahead if the clusters are already zero */ 1174 if (acb->find_cluster_ret == QED_CLUSTER_ZERO) { 1175 qed_aio_next_io(acb, 0); 1176 return; 1177 } 1178 1179 cb = qed_aio_write_zero_cluster; 1180 } else { 1181 cb = qed_aio_write_prefill; 1182 acb->cur_cluster = qed_alloc_clusters(s, acb->cur_nclusters); 1183 } 1184 1185 if (qed_should_set_need_check(s)) { 1186 s->header.features |= QED_F_NEED_CHECK; 1187 qed_write_header(s, cb, acb); 1188 } else { 1189 cb(acb, 0); 1190 } 1191 } 1192 1193 /** 1194 * Write data cluster in place 1195 * 1196 * @acb: Write request 1197 * @offset: Cluster offset in bytes 1198 * @len: Length in bytes 1199 * 1200 * This path is taken when writing to already allocated clusters. 1201 */ 1202 static void qed_aio_write_inplace(QEDAIOCB *acb, uint64_t offset, size_t len) 1203 { 1204 /* Allocate buffer for zero writes */ 1205 if (acb->flags & QED_AIOCB_ZERO) { 1206 struct iovec *iov = acb->qiov->iov; 1207 1208 if (!iov->iov_base) { 1209 iov->iov_base = qemu_blockalign(acb->common.bs, iov->iov_len); 1210 memset(iov->iov_base, 0, iov->iov_len); 1211 } 1212 } 1213 1214 /* Calculate the I/O vector */ 1215 acb->cur_cluster = offset; 1216 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len); 1217 1218 /* Do the actual write */ 1219 qed_aio_write_main(acb, 0); 1220 } 1221 1222 /** 1223 * Write data cluster 1224 * 1225 * @opaque: Write request 1226 * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2, QED_CLUSTER_L1, 1227 * or -errno 1228 * @offset: Cluster offset in bytes 1229 * @len: Length in bytes 1230 * 1231 * Callback from qed_find_cluster(). 1232 */ 1233 static void qed_aio_write_data(void *opaque, int ret, 1234 uint64_t offset, size_t len) 1235 { 1236 QEDAIOCB *acb = opaque; 1237 1238 trace_qed_aio_write_data(acb_to_s(acb), acb, ret, offset, len); 1239 1240 acb->find_cluster_ret = ret; 1241 1242 switch (ret) { 1243 case QED_CLUSTER_FOUND: 1244 qed_aio_write_inplace(acb, offset, len); 1245 break; 1246 1247 case QED_CLUSTER_L2: 1248 case QED_CLUSTER_L1: 1249 case QED_CLUSTER_ZERO: 1250 qed_aio_write_alloc(acb, len); 1251 break; 1252 1253 default: 1254 qed_aio_complete(acb, ret); 1255 break; 1256 } 1257 } 1258 1259 /** 1260 * Read data cluster 1261 * 1262 * @opaque: Read request 1263 * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2, QED_CLUSTER_L1, 1264 * or -errno 1265 * @offset: Cluster offset in bytes 1266 * @len: Length in bytes 1267 * 1268 * Callback from qed_find_cluster(). 1269 */ 1270 static void qed_aio_read_data(void *opaque, int ret, 1271 uint64_t offset, size_t len) 1272 { 1273 QEDAIOCB *acb = opaque; 1274 BDRVQEDState *s = acb_to_s(acb); 1275 BlockDriverState *bs = acb->common.bs; 1276 1277 /* Adjust offset into cluster */ 1278 offset += qed_offset_into_cluster(s, acb->cur_pos); 1279 1280 trace_qed_aio_read_data(s, acb, ret, offset, len); 1281 1282 if (ret < 0) { 1283 goto err; 1284 } 1285 1286 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len); 1287 1288 /* Handle zero cluster and backing file reads */ 1289 if (ret == QED_CLUSTER_ZERO) { 1290 qemu_iovec_memset(&acb->cur_qiov, 0, 0, acb->cur_qiov.size); 1291 qed_aio_next_io(acb, 0); 1292 return; 1293 } else if (ret != QED_CLUSTER_FOUND) { 1294 qed_read_backing_file(s, acb->cur_pos, &acb->cur_qiov, 1295 qed_aio_next_io, acb); 1296 return; 1297 } 1298 1299 BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO); 1300 bdrv_aio_readv(bs->file, offset / BDRV_SECTOR_SIZE, 1301 &acb->cur_qiov, acb->cur_qiov.size / BDRV_SECTOR_SIZE, 1302 qed_aio_next_io, acb); 1303 return; 1304 1305 err: 1306 qed_aio_complete(acb, ret); 1307 } 1308 1309 /** 1310 * Begin next I/O or complete the request 1311 */ 1312 static void qed_aio_next_io(void *opaque, int ret) 1313 { 1314 QEDAIOCB *acb = opaque; 1315 BDRVQEDState *s = acb_to_s(acb); 1316 QEDFindClusterFunc *io_fn = (acb->flags & QED_AIOCB_WRITE) ? 1317 qed_aio_write_data : qed_aio_read_data; 1318 1319 trace_qed_aio_next_io(s, acb, ret, acb->cur_pos + acb->cur_qiov.size); 1320 1321 /* Handle I/O error */ 1322 if (ret) { 1323 qed_aio_complete(acb, ret); 1324 return; 1325 } 1326 1327 acb->qiov_offset += acb->cur_qiov.size; 1328 acb->cur_pos += acb->cur_qiov.size; 1329 qemu_iovec_reset(&acb->cur_qiov); 1330 1331 /* Complete request */ 1332 if (acb->cur_pos >= acb->end_pos) { 1333 qed_aio_complete(acb, 0); 1334 return; 1335 } 1336 1337 /* Find next cluster and start I/O */ 1338 qed_find_cluster(s, &acb->request, 1339 acb->cur_pos, acb->end_pos - acb->cur_pos, 1340 io_fn, acb); 1341 } 1342 1343 static BlockDriverAIOCB *qed_aio_setup(BlockDriverState *bs, 1344 int64_t sector_num, 1345 QEMUIOVector *qiov, int nb_sectors, 1346 BlockDriverCompletionFunc *cb, 1347 void *opaque, int flags) 1348 { 1349 QEDAIOCB *acb = qemu_aio_get(&qed_aiocb_info, bs, cb, opaque); 1350 1351 trace_qed_aio_setup(bs->opaque, acb, sector_num, nb_sectors, 1352 opaque, flags); 1353 1354 acb->flags = flags; 1355 acb->finished = NULL; 1356 acb->qiov = qiov; 1357 acb->qiov_offset = 0; 1358 acb->cur_pos = (uint64_t)sector_num * BDRV_SECTOR_SIZE; 1359 acb->end_pos = acb->cur_pos + nb_sectors * BDRV_SECTOR_SIZE; 1360 acb->request.l2_table = NULL; 1361 qemu_iovec_init(&acb->cur_qiov, qiov->niov); 1362 1363 /* Start request */ 1364 qed_aio_next_io(acb, 0); 1365 return &acb->common; 1366 } 1367 1368 static BlockDriverAIOCB *bdrv_qed_aio_readv(BlockDriverState *bs, 1369 int64_t sector_num, 1370 QEMUIOVector *qiov, int nb_sectors, 1371 BlockDriverCompletionFunc *cb, 1372 void *opaque) 1373 { 1374 return qed_aio_setup(bs, sector_num, qiov, nb_sectors, cb, opaque, 0); 1375 } 1376 1377 static BlockDriverAIOCB *bdrv_qed_aio_writev(BlockDriverState *bs, 1378 int64_t sector_num, 1379 QEMUIOVector *qiov, int nb_sectors, 1380 BlockDriverCompletionFunc *cb, 1381 void *opaque) 1382 { 1383 return qed_aio_setup(bs, sector_num, qiov, nb_sectors, cb, 1384 opaque, QED_AIOCB_WRITE); 1385 } 1386 1387 typedef struct { 1388 Coroutine *co; 1389 int ret; 1390 bool done; 1391 } QEDWriteZeroesCB; 1392 1393 static void coroutine_fn qed_co_write_zeroes_cb(void *opaque, int ret) 1394 { 1395 QEDWriteZeroesCB *cb = opaque; 1396 1397 cb->done = true; 1398 cb->ret = ret; 1399 if (cb->co) { 1400 qemu_coroutine_enter(cb->co, NULL); 1401 } 1402 } 1403 1404 static int coroutine_fn bdrv_qed_co_write_zeroes(BlockDriverState *bs, 1405 int64_t sector_num, 1406 int nb_sectors, 1407 BdrvRequestFlags flags) 1408 { 1409 BlockDriverAIOCB *blockacb; 1410 BDRVQEDState *s = bs->opaque; 1411 QEDWriteZeroesCB cb = { .done = false }; 1412 QEMUIOVector qiov; 1413 struct iovec iov; 1414 1415 /* Refuse if there are untouched backing file sectors */ 1416 if (bs->backing_hd) { 1417 if (qed_offset_into_cluster(s, sector_num * BDRV_SECTOR_SIZE) != 0) { 1418 return -ENOTSUP; 1419 } 1420 if (qed_offset_into_cluster(s, nb_sectors * BDRV_SECTOR_SIZE) != 0) { 1421 return -ENOTSUP; 1422 } 1423 } 1424 1425 /* Zero writes start without an I/O buffer. If a buffer becomes necessary 1426 * then it will be allocated during request processing. 1427 */ 1428 iov.iov_base = NULL, 1429 iov.iov_len = nb_sectors * BDRV_SECTOR_SIZE, 1430 1431 qemu_iovec_init_external(&qiov, &iov, 1); 1432 blockacb = qed_aio_setup(bs, sector_num, &qiov, nb_sectors, 1433 qed_co_write_zeroes_cb, &cb, 1434 QED_AIOCB_WRITE | QED_AIOCB_ZERO); 1435 if (!blockacb) { 1436 return -EIO; 1437 } 1438 if (!cb.done) { 1439 cb.co = qemu_coroutine_self(); 1440 qemu_coroutine_yield(); 1441 } 1442 assert(cb.done); 1443 return cb.ret; 1444 } 1445 1446 static int bdrv_qed_truncate(BlockDriverState *bs, int64_t offset) 1447 { 1448 BDRVQEDState *s = bs->opaque; 1449 uint64_t old_image_size; 1450 int ret; 1451 1452 if (!qed_is_image_size_valid(offset, s->header.cluster_size, 1453 s->header.table_size)) { 1454 return -EINVAL; 1455 } 1456 1457 /* Shrinking is currently not supported */ 1458 if ((uint64_t)offset < s->header.image_size) { 1459 return -ENOTSUP; 1460 } 1461 1462 old_image_size = s->header.image_size; 1463 s->header.image_size = offset; 1464 ret = qed_write_header_sync(s); 1465 if (ret < 0) { 1466 s->header.image_size = old_image_size; 1467 } 1468 return ret; 1469 } 1470 1471 static int64_t bdrv_qed_getlength(BlockDriverState *bs) 1472 { 1473 BDRVQEDState *s = bs->opaque; 1474 return s->header.image_size; 1475 } 1476 1477 static int bdrv_qed_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) 1478 { 1479 BDRVQEDState *s = bs->opaque; 1480 1481 memset(bdi, 0, sizeof(*bdi)); 1482 bdi->cluster_size = s->header.cluster_size; 1483 bdi->is_dirty = s->header.features & QED_F_NEED_CHECK; 1484 bdi->unallocated_blocks_are_zero = true; 1485 bdi->can_write_zeroes_with_unmap = true; 1486 return 0; 1487 } 1488 1489 static int bdrv_qed_change_backing_file(BlockDriverState *bs, 1490 const char *backing_file, 1491 const char *backing_fmt) 1492 { 1493 BDRVQEDState *s = bs->opaque; 1494 QEDHeader new_header, le_header; 1495 void *buffer; 1496 size_t buffer_len, backing_file_len; 1497 int ret; 1498 1499 /* Refuse to set backing filename if unknown compat feature bits are 1500 * active. If the image uses an unknown compat feature then we may not 1501 * know the layout of data following the header structure and cannot safely 1502 * add a new string. 1503 */ 1504 if (backing_file && (s->header.compat_features & 1505 ~QED_COMPAT_FEATURE_MASK)) { 1506 return -ENOTSUP; 1507 } 1508 1509 memcpy(&new_header, &s->header, sizeof(new_header)); 1510 1511 new_header.features &= ~(QED_F_BACKING_FILE | 1512 QED_F_BACKING_FORMAT_NO_PROBE); 1513 1514 /* Adjust feature flags */ 1515 if (backing_file) { 1516 new_header.features |= QED_F_BACKING_FILE; 1517 1518 if (qed_fmt_is_raw(backing_fmt)) { 1519 new_header.features |= QED_F_BACKING_FORMAT_NO_PROBE; 1520 } 1521 } 1522 1523 /* Calculate new header size */ 1524 backing_file_len = 0; 1525 1526 if (backing_file) { 1527 backing_file_len = strlen(backing_file); 1528 } 1529 1530 buffer_len = sizeof(new_header); 1531 new_header.backing_filename_offset = buffer_len; 1532 new_header.backing_filename_size = backing_file_len; 1533 buffer_len += backing_file_len; 1534 1535 /* Make sure we can rewrite header without failing */ 1536 if (buffer_len > new_header.header_size * new_header.cluster_size) { 1537 return -ENOSPC; 1538 } 1539 1540 /* Prepare new header */ 1541 buffer = g_malloc(buffer_len); 1542 1543 qed_header_cpu_to_le(&new_header, &le_header); 1544 memcpy(buffer, &le_header, sizeof(le_header)); 1545 buffer_len = sizeof(le_header); 1546 1547 if (backing_file) { 1548 memcpy(buffer + buffer_len, backing_file, backing_file_len); 1549 buffer_len += backing_file_len; 1550 } 1551 1552 /* Write new header */ 1553 ret = bdrv_pwrite_sync(bs->file, 0, buffer, buffer_len); 1554 g_free(buffer); 1555 if (ret == 0) { 1556 memcpy(&s->header, &new_header, sizeof(new_header)); 1557 } 1558 return ret; 1559 } 1560 1561 static void bdrv_qed_invalidate_cache(BlockDriverState *bs) 1562 { 1563 BDRVQEDState *s = bs->opaque; 1564 1565 bdrv_qed_close(bs); 1566 1567 bdrv_invalidate_cache(bs->file); 1568 1569 memset(s, 0, sizeof(BDRVQEDState)); 1570 bdrv_qed_open(bs, NULL, bs->open_flags, NULL); 1571 } 1572 1573 static int bdrv_qed_check(BlockDriverState *bs, BdrvCheckResult *result, 1574 BdrvCheckMode fix) 1575 { 1576 BDRVQEDState *s = bs->opaque; 1577 1578 return qed_check(s, result, !!fix); 1579 } 1580 1581 static QEMUOptionParameter qed_create_options[] = { 1582 { 1583 .name = BLOCK_OPT_SIZE, 1584 .type = OPT_SIZE, 1585 .help = "Virtual disk size (in bytes)" 1586 }, { 1587 .name = BLOCK_OPT_BACKING_FILE, 1588 .type = OPT_STRING, 1589 .help = "File name of a base image" 1590 }, { 1591 .name = BLOCK_OPT_BACKING_FMT, 1592 .type = OPT_STRING, 1593 .help = "Image format of the base image" 1594 }, { 1595 .name = BLOCK_OPT_CLUSTER_SIZE, 1596 .type = OPT_SIZE, 1597 .help = "Cluster size (in bytes)", 1598 .value = { .n = QED_DEFAULT_CLUSTER_SIZE }, 1599 }, { 1600 .name = BLOCK_OPT_TABLE_SIZE, 1601 .type = OPT_SIZE, 1602 .help = "L1/L2 table size (in clusters)" 1603 }, 1604 { /* end of list */ } 1605 }; 1606 1607 static BlockDriver bdrv_qed = { 1608 .format_name = "qed", 1609 .instance_size = sizeof(BDRVQEDState), 1610 .create_options = qed_create_options, 1611 1612 .bdrv_probe = bdrv_qed_probe, 1613 .bdrv_rebind = bdrv_qed_rebind, 1614 .bdrv_open = bdrv_qed_open, 1615 .bdrv_close = bdrv_qed_close, 1616 .bdrv_reopen_prepare = bdrv_qed_reopen_prepare, 1617 .bdrv_create = bdrv_qed_create, 1618 .bdrv_has_zero_init = bdrv_has_zero_init_1, 1619 .bdrv_co_get_block_status = bdrv_qed_co_get_block_status, 1620 .bdrv_aio_readv = bdrv_qed_aio_readv, 1621 .bdrv_aio_writev = bdrv_qed_aio_writev, 1622 .bdrv_co_write_zeroes = bdrv_qed_co_write_zeroes, 1623 .bdrv_truncate = bdrv_qed_truncate, 1624 .bdrv_getlength = bdrv_qed_getlength, 1625 .bdrv_get_info = bdrv_qed_get_info, 1626 .bdrv_refresh_limits = bdrv_qed_refresh_limits, 1627 .bdrv_change_backing_file = bdrv_qed_change_backing_file, 1628 .bdrv_invalidate_cache = bdrv_qed_invalidate_cache, 1629 .bdrv_check = bdrv_qed_check, 1630 }; 1631 1632 static void bdrv_qed_init(void) 1633 { 1634 bdrv_register(&bdrv_qed); 1635 } 1636 1637 block_init(bdrv_qed_init); 1638