1 /* 2 * QEMU Enhanced Disk Format 3 * 4 * Copyright IBM, Corp. 2010 5 * 6 * Authors: 7 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com> 8 * Anthony Liguori <aliguori@us.ibm.com> 9 * 10 * This work is licensed under the terms of the GNU LGPL, version 2 or later. 11 * See the COPYING.LIB file in the top-level directory. 12 * 13 */ 14 15 #include "qemu/osdep.h" 16 #include "qapi/error.h" 17 #include "qemu/timer.h" 18 #include "qemu/bswap.h" 19 #include "trace.h" 20 #include "qed.h" 21 #include "qapi/qmp/qerror.h" 22 #include "migration/migration.h" 23 #include "sysemu/block-backend.h" 24 25 static const AIOCBInfo qed_aiocb_info = { 26 .aiocb_size = sizeof(QEDAIOCB), 27 }; 28 29 static int bdrv_qed_probe(const uint8_t *buf, int buf_size, 30 const char *filename) 31 { 32 const QEDHeader *header = (const QEDHeader *)buf; 33 34 if (buf_size < sizeof(*header)) { 35 return 0; 36 } 37 if (le32_to_cpu(header->magic) != QED_MAGIC) { 38 return 0; 39 } 40 return 100; 41 } 42 43 /** 44 * Check whether an image format is raw 45 * 46 * @fmt: Backing file format, may be NULL 47 */ 48 static bool qed_fmt_is_raw(const char *fmt) 49 { 50 return fmt && strcmp(fmt, "raw") == 0; 51 } 52 53 static void qed_header_le_to_cpu(const QEDHeader *le, QEDHeader *cpu) 54 { 55 cpu->magic = le32_to_cpu(le->magic); 56 cpu->cluster_size = le32_to_cpu(le->cluster_size); 57 cpu->table_size = le32_to_cpu(le->table_size); 58 cpu->header_size = le32_to_cpu(le->header_size); 59 cpu->features = le64_to_cpu(le->features); 60 cpu->compat_features = le64_to_cpu(le->compat_features); 61 cpu->autoclear_features = le64_to_cpu(le->autoclear_features); 62 cpu->l1_table_offset = le64_to_cpu(le->l1_table_offset); 63 cpu->image_size = le64_to_cpu(le->image_size); 64 cpu->backing_filename_offset = le32_to_cpu(le->backing_filename_offset); 65 cpu->backing_filename_size = le32_to_cpu(le->backing_filename_size); 66 } 67 68 static void qed_header_cpu_to_le(const QEDHeader *cpu, QEDHeader *le) 69 { 70 le->magic = cpu_to_le32(cpu->magic); 71 le->cluster_size = cpu_to_le32(cpu->cluster_size); 72 le->table_size = cpu_to_le32(cpu->table_size); 73 le->header_size = cpu_to_le32(cpu->header_size); 74 le->features = cpu_to_le64(cpu->features); 75 le->compat_features = cpu_to_le64(cpu->compat_features); 76 le->autoclear_features = cpu_to_le64(cpu->autoclear_features); 77 le->l1_table_offset = cpu_to_le64(cpu->l1_table_offset); 78 le->image_size = cpu_to_le64(cpu->image_size); 79 le->backing_filename_offset = cpu_to_le32(cpu->backing_filename_offset); 80 le->backing_filename_size = cpu_to_le32(cpu->backing_filename_size); 81 } 82 83 int qed_write_header_sync(BDRVQEDState *s) 84 { 85 QEDHeader le; 86 int ret; 87 88 qed_header_cpu_to_le(&s->header, &le); 89 ret = bdrv_pwrite(s->bs->file, 0, &le, sizeof(le)); 90 if (ret != sizeof(le)) { 91 return ret; 92 } 93 return 0; 94 } 95 96 typedef struct { 97 GenericCB gencb; 98 BDRVQEDState *s; 99 struct iovec iov; 100 QEMUIOVector qiov; 101 int nsectors; 102 uint8_t *buf; 103 } QEDWriteHeaderCB; 104 105 static void qed_write_header_cb(void *opaque, int ret) 106 { 107 QEDWriteHeaderCB *write_header_cb = opaque; 108 109 qemu_vfree(write_header_cb->buf); 110 gencb_complete(write_header_cb, ret); 111 } 112 113 static void qed_write_header_read_cb(void *opaque, int ret) 114 { 115 QEDWriteHeaderCB *write_header_cb = opaque; 116 BDRVQEDState *s = write_header_cb->s; 117 118 if (ret) { 119 qed_write_header_cb(write_header_cb, ret); 120 return; 121 } 122 123 /* Update header */ 124 qed_header_cpu_to_le(&s->header, (QEDHeader *)write_header_cb->buf); 125 126 bdrv_aio_writev(s->bs->file, 0, &write_header_cb->qiov, 127 write_header_cb->nsectors, qed_write_header_cb, 128 write_header_cb); 129 } 130 131 /** 132 * Update header in-place (does not rewrite backing filename or other strings) 133 * 134 * This function only updates known header fields in-place and does not affect 135 * extra data after the QED header. 136 */ 137 static void qed_write_header(BDRVQEDState *s, BlockCompletionFunc cb, 138 void *opaque) 139 { 140 /* We must write full sectors for O_DIRECT but cannot necessarily generate 141 * the data following the header if an unrecognized compat feature is 142 * active. Therefore, first read the sectors containing the header, update 143 * them, and write back. 144 */ 145 146 int nsectors = DIV_ROUND_UP(sizeof(QEDHeader), BDRV_SECTOR_SIZE); 147 size_t len = nsectors * BDRV_SECTOR_SIZE; 148 QEDWriteHeaderCB *write_header_cb = gencb_alloc(sizeof(*write_header_cb), 149 cb, opaque); 150 151 write_header_cb->s = s; 152 write_header_cb->nsectors = nsectors; 153 write_header_cb->buf = qemu_blockalign(s->bs, len); 154 write_header_cb->iov.iov_base = write_header_cb->buf; 155 write_header_cb->iov.iov_len = len; 156 qemu_iovec_init_external(&write_header_cb->qiov, &write_header_cb->iov, 1); 157 158 bdrv_aio_readv(s->bs->file, 0, &write_header_cb->qiov, nsectors, 159 qed_write_header_read_cb, write_header_cb); 160 } 161 162 static uint64_t qed_max_image_size(uint32_t cluster_size, uint32_t table_size) 163 { 164 uint64_t table_entries; 165 uint64_t l2_size; 166 167 table_entries = (table_size * cluster_size) / sizeof(uint64_t); 168 l2_size = table_entries * cluster_size; 169 170 return l2_size * table_entries; 171 } 172 173 static bool qed_is_cluster_size_valid(uint32_t cluster_size) 174 { 175 if (cluster_size < QED_MIN_CLUSTER_SIZE || 176 cluster_size > QED_MAX_CLUSTER_SIZE) { 177 return false; 178 } 179 if (cluster_size & (cluster_size - 1)) { 180 return false; /* not power of 2 */ 181 } 182 return true; 183 } 184 185 static bool qed_is_table_size_valid(uint32_t table_size) 186 { 187 if (table_size < QED_MIN_TABLE_SIZE || 188 table_size > QED_MAX_TABLE_SIZE) { 189 return false; 190 } 191 if (table_size & (table_size - 1)) { 192 return false; /* not power of 2 */ 193 } 194 return true; 195 } 196 197 static bool qed_is_image_size_valid(uint64_t image_size, uint32_t cluster_size, 198 uint32_t table_size) 199 { 200 if (image_size % BDRV_SECTOR_SIZE != 0) { 201 return false; /* not multiple of sector size */ 202 } 203 if (image_size > qed_max_image_size(cluster_size, table_size)) { 204 return false; /* image is too large */ 205 } 206 return true; 207 } 208 209 /** 210 * Read a string of known length from the image file 211 * 212 * @file: Image file 213 * @offset: File offset to start of string, in bytes 214 * @n: String length in bytes 215 * @buf: Destination buffer 216 * @buflen: Destination buffer length in bytes 217 * @ret: 0 on success, -errno on failure 218 * 219 * The string is NUL-terminated. 220 */ 221 static int qed_read_string(BdrvChild *file, uint64_t offset, size_t n, 222 char *buf, size_t buflen) 223 { 224 int ret; 225 if (n >= buflen) { 226 return -EINVAL; 227 } 228 ret = bdrv_pread(file, offset, buf, n); 229 if (ret < 0) { 230 return ret; 231 } 232 buf[n] = '\0'; 233 return 0; 234 } 235 236 /** 237 * Allocate new clusters 238 * 239 * @s: QED state 240 * @n: Number of contiguous clusters to allocate 241 * @ret: Offset of first allocated cluster 242 * 243 * This function only produces the offset where the new clusters should be 244 * written. It updates BDRVQEDState but does not make any changes to the image 245 * file. 246 */ 247 static uint64_t qed_alloc_clusters(BDRVQEDState *s, unsigned int n) 248 { 249 uint64_t offset = s->file_size; 250 s->file_size += n * s->header.cluster_size; 251 return offset; 252 } 253 254 QEDTable *qed_alloc_table(BDRVQEDState *s) 255 { 256 /* Honor O_DIRECT memory alignment requirements */ 257 return qemu_blockalign(s->bs, 258 s->header.cluster_size * s->header.table_size); 259 } 260 261 /** 262 * Allocate a new zeroed L2 table 263 */ 264 static CachedL2Table *qed_new_l2_table(BDRVQEDState *s) 265 { 266 CachedL2Table *l2_table = qed_alloc_l2_cache_entry(&s->l2_cache); 267 268 l2_table->table = qed_alloc_table(s); 269 l2_table->offset = qed_alloc_clusters(s, s->header.table_size); 270 271 memset(l2_table->table->offsets, 0, 272 s->header.cluster_size * s->header.table_size); 273 return l2_table; 274 } 275 276 static void qed_aio_next_io(void *opaque, int ret); 277 278 static void qed_plug_allocating_write_reqs(BDRVQEDState *s) 279 { 280 assert(!s->allocating_write_reqs_plugged); 281 282 s->allocating_write_reqs_plugged = true; 283 } 284 285 static void qed_unplug_allocating_write_reqs(BDRVQEDState *s) 286 { 287 QEDAIOCB *acb; 288 289 assert(s->allocating_write_reqs_plugged); 290 291 s->allocating_write_reqs_plugged = false; 292 293 acb = QSIMPLEQ_FIRST(&s->allocating_write_reqs); 294 if (acb) { 295 qed_aio_next_io(acb, 0); 296 } 297 } 298 299 static void qed_finish_clear_need_check(void *opaque, int ret) 300 { 301 /* Do nothing */ 302 } 303 304 static void qed_flush_after_clear_need_check(void *opaque, int ret) 305 { 306 BDRVQEDState *s = opaque; 307 308 bdrv_aio_flush(s->bs, qed_finish_clear_need_check, s); 309 310 /* No need to wait until flush completes */ 311 qed_unplug_allocating_write_reqs(s); 312 } 313 314 static void qed_clear_need_check(void *opaque, int ret) 315 { 316 BDRVQEDState *s = opaque; 317 318 if (ret) { 319 qed_unplug_allocating_write_reqs(s); 320 return; 321 } 322 323 s->header.features &= ~QED_F_NEED_CHECK; 324 qed_write_header(s, qed_flush_after_clear_need_check, s); 325 } 326 327 static void qed_need_check_timer_cb(void *opaque) 328 { 329 BDRVQEDState *s = opaque; 330 331 /* The timer should only fire when allocating writes have drained */ 332 assert(!QSIMPLEQ_FIRST(&s->allocating_write_reqs)); 333 334 trace_qed_need_check_timer_cb(s); 335 336 qed_plug_allocating_write_reqs(s); 337 338 /* Ensure writes are on disk before clearing flag */ 339 bdrv_aio_flush(s->bs->file->bs, qed_clear_need_check, s); 340 } 341 342 static void qed_start_need_check_timer(BDRVQEDState *s) 343 { 344 trace_qed_start_need_check_timer(s); 345 346 /* Use QEMU_CLOCK_VIRTUAL so we don't alter the image file while suspended for 347 * migration. 348 */ 349 timer_mod(s->need_check_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 350 NANOSECONDS_PER_SECOND * QED_NEED_CHECK_TIMEOUT); 351 } 352 353 /* It's okay to call this multiple times or when no timer is started */ 354 static void qed_cancel_need_check_timer(BDRVQEDState *s) 355 { 356 trace_qed_cancel_need_check_timer(s); 357 timer_del(s->need_check_timer); 358 } 359 360 static void bdrv_qed_detach_aio_context(BlockDriverState *bs) 361 { 362 BDRVQEDState *s = bs->opaque; 363 364 qed_cancel_need_check_timer(s); 365 timer_free(s->need_check_timer); 366 } 367 368 static void bdrv_qed_attach_aio_context(BlockDriverState *bs, 369 AioContext *new_context) 370 { 371 BDRVQEDState *s = bs->opaque; 372 373 s->need_check_timer = aio_timer_new(new_context, 374 QEMU_CLOCK_VIRTUAL, SCALE_NS, 375 qed_need_check_timer_cb, s); 376 if (s->header.features & QED_F_NEED_CHECK) { 377 qed_start_need_check_timer(s); 378 } 379 } 380 381 static void bdrv_qed_drain(BlockDriverState *bs) 382 { 383 BDRVQEDState *s = bs->opaque; 384 385 /* Fire the timer immediately in order to start doing I/O as soon as the 386 * header is flushed. 387 */ 388 if (s->need_check_timer && timer_pending(s->need_check_timer)) { 389 qed_cancel_need_check_timer(s); 390 qed_need_check_timer_cb(s); 391 } 392 } 393 394 static int bdrv_qed_open(BlockDriverState *bs, QDict *options, int flags, 395 Error **errp) 396 { 397 BDRVQEDState *s = bs->opaque; 398 QEDHeader le_header; 399 int64_t file_size; 400 int ret; 401 402 s->bs = bs; 403 QSIMPLEQ_INIT(&s->allocating_write_reqs); 404 405 ret = bdrv_pread(bs->file, 0, &le_header, sizeof(le_header)); 406 if (ret < 0) { 407 return ret; 408 } 409 qed_header_le_to_cpu(&le_header, &s->header); 410 411 if (s->header.magic != QED_MAGIC) { 412 error_setg(errp, "Image not in QED format"); 413 return -EINVAL; 414 } 415 if (s->header.features & ~QED_FEATURE_MASK) { 416 /* image uses unsupported feature bits */ 417 error_setg(errp, "Unsupported QED features: %" PRIx64, 418 s->header.features & ~QED_FEATURE_MASK); 419 return -ENOTSUP; 420 } 421 if (!qed_is_cluster_size_valid(s->header.cluster_size)) { 422 return -EINVAL; 423 } 424 425 /* Round down file size to the last cluster */ 426 file_size = bdrv_getlength(bs->file->bs); 427 if (file_size < 0) { 428 return file_size; 429 } 430 s->file_size = qed_start_of_cluster(s, file_size); 431 432 if (!qed_is_table_size_valid(s->header.table_size)) { 433 return -EINVAL; 434 } 435 if (!qed_is_image_size_valid(s->header.image_size, 436 s->header.cluster_size, 437 s->header.table_size)) { 438 return -EINVAL; 439 } 440 if (!qed_check_table_offset(s, s->header.l1_table_offset)) { 441 return -EINVAL; 442 } 443 444 s->table_nelems = (s->header.cluster_size * s->header.table_size) / 445 sizeof(uint64_t); 446 s->l2_shift = ctz32(s->header.cluster_size); 447 s->l2_mask = s->table_nelems - 1; 448 s->l1_shift = s->l2_shift + ctz32(s->table_nelems); 449 450 /* Header size calculation must not overflow uint32_t */ 451 if (s->header.header_size > UINT32_MAX / s->header.cluster_size) { 452 return -EINVAL; 453 } 454 455 if ((s->header.features & QED_F_BACKING_FILE)) { 456 if ((uint64_t)s->header.backing_filename_offset + 457 s->header.backing_filename_size > 458 s->header.cluster_size * s->header.header_size) { 459 return -EINVAL; 460 } 461 462 ret = qed_read_string(bs->file, s->header.backing_filename_offset, 463 s->header.backing_filename_size, bs->backing_file, 464 sizeof(bs->backing_file)); 465 if (ret < 0) { 466 return ret; 467 } 468 469 if (s->header.features & QED_F_BACKING_FORMAT_NO_PROBE) { 470 pstrcpy(bs->backing_format, sizeof(bs->backing_format), "raw"); 471 } 472 } 473 474 /* Reset unknown autoclear feature bits. This is a backwards 475 * compatibility mechanism that allows images to be opened by older 476 * programs, which "knock out" unknown feature bits. When an image is 477 * opened by a newer program again it can detect that the autoclear 478 * feature is no longer valid. 479 */ 480 if ((s->header.autoclear_features & ~QED_AUTOCLEAR_FEATURE_MASK) != 0 && 481 !bdrv_is_read_only(bs->file->bs) && !(flags & BDRV_O_INACTIVE)) { 482 s->header.autoclear_features &= QED_AUTOCLEAR_FEATURE_MASK; 483 484 ret = qed_write_header_sync(s); 485 if (ret) { 486 return ret; 487 } 488 489 /* From here on only known autoclear feature bits are valid */ 490 bdrv_flush(bs->file->bs); 491 } 492 493 s->l1_table = qed_alloc_table(s); 494 qed_init_l2_cache(&s->l2_cache); 495 496 ret = qed_read_l1_table_sync(s); 497 if (ret) { 498 goto out; 499 } 500 501 /* If image was not closed cleanly, check consistency */ 502 if (!(flags & BDRV_O_CHECK) && (s->header.features & QED_F_NEED_CHECK)) { 503 /* Read-only images cannot be fixed. There is no risk of corruption 504 * since write operations are not possible. Therefore, allow 505 * potentially inconsistent images to be opened read-only. This can 506 * aid data recovery from an otherwise inconsistent image. 507 */ 508 if (!bdrv_is_read_only(bs->file->bs) && 509 !(flags & BDRV_O_INACTIVE)) { 510 BdrvCheckResult result = {0}; 511 512 ret = qed_check(s, &result, true); 513 if (ret) { 514 goto out; 515 } 516 } 517 } 518 519 bdrv_qed_attach_aio_context(bs, bdrv_get_aio_context(bs)); 520 521 out: 522 if (ret) { 523 qed_free_l2_cache(&s->l2_cache); 524 qemu_vfree(s->l1_table); 525 } 526 return ret; 527 } 528 529 static void bdrv_qed_refresh_limits(BlockDriverState *bs, Error **errp) 530 { 531 BDRVQEDState *s = bs->opaque; 532 533 bs->bl.pwrite_zeroes_alignment = s->header.cluster_size; 534 } 535 536 /* We have nothing to do for QED reopen, stubs just return 537 * success */ 538 static int bdrv_qed_reopen_prepare(BDRVReopenState *state, 539 BlockReopenQueue *queue, Error **errp) 540 { 541 return 0; 542 } 543 544 static void bdrv_qed_close(BlockDriverState *bs) 545 { 546 BDRVQEDState *s = bs->opaque; 547 548 bdrv_qed_detach_aio_context(bs); 549 550 /* Ensure writes reach stable storage */ 551 bdrv_flush(bs->file->bs); 552 553 /* Clean shutdown, no check required on next open */ 554 if (s->header.features & QED_F_NEED_CHECK) { 555 s->header.features &= ~QED_F_NEED_CHECK; 556 qed_write_header_sync(s); 557 } 558 559 qed_free_l2_cache(&s->l2_cache); 560 qemu_vfree(s->l1_table); 561 } 562 563 static int qed_create(const char *filename, uint32_t cluster_size, 564 uint64_t image_size, uint32_t table_size, 565 const char *backing_file, const char *backing_fmt, 566 QemuOpts *opts, Error **errp) 567 { 568 QEDHeader header = { 569 .magic = QED_MAGIC, 570 .cluster_size = cluster_size, 571 .table_size = table_size, 572 .header_size = 1, 573 .features = 0, 574 .compat_features = 0, 575 .l1_table_offset = cluster_size, 576 .image_size = image_size, 577 }; 578 QEDHeader le_header; 579 uint8_t *l1_table = NULL; 580 size_t l1_size = header.cluster_size * header.table_size; 581 Error *local_err = NULL; 582 int ret = 0; 583 BlockBackend *blk; 584 585 ret = bdrv_create_file(filename, opts, &local_err); 586 if (ret < 0) { 587 error_propagate(errp, local_err); 588 return ret; 589 } 590 591 blk = blk_new_open(filename, NULL, NULL, 592 BDRV_O_RDWR | BDRV_O_PROTOCOL, &local_err); 593 if (blk == NULL) { 594 error_propagate(errp, local_err); 595 return -EIO; 596 } 597 598 blk_set_allow_write_beyond_eof(blk, true); 599 600 /* File must start empty and grow, check truncate is supported */ 601 ret = blk_truncate(blk, 0); 602 if (ret < 0) { 603 goto out; 604 } 605 606 if (backing_file) { 607 header.features |= QED_F_BACKING_FILE; 608 header.backing_filename_offset = sizeof(le_header); 609 header.backing_filename_size = strlen(backing_file); 610 611 if (qed_fmt_is_raw(backing_fmt)) { 612 header.features |= QED_F_BACKING_FORMAT_NO_PROBE; 613 } 614 } 615 616 qed_header_cpu_to_le(&header, &le_header); 617 ret = blk_pwrite(blk, 0, &le_header, sizeof(le_header), 0); 618 if (ret < 0) { 619 goto out; 620 } 621 ret = blk_pwrite(blk, sizeof(le_header), backing_file, 622 header.backing_filename_size, 0); 623 if (ret < 0) { 624 goto out; 625 } 626 627 l1_table = g_malloc0(l1_size); 628 ret = blk_pwrite(blk, header.l1_table_offset, l1_table, l1_size, 0); 629 if (ret < 0) { 630 goto out; 631 } 632 633 ret = 0; /* success */ 634 out: 635 g_free(l1_table); 636 blk_unref(blk); 637 return ret; 638 } 639 640 static int bdrv_qed_create(const char *filename, QemuOpts *opts, Error **errp) 641 { 642 uint64_t image_size = 0; 643 uint32_t cluster_size = QED_DEFAULT_CLUSTER_SIZE; 644 uint32_t table_size = QED_DEFAULT_TABLE_SIZE; 645 char *backing_file = NULL; 646 char *backing_fmt = NULL; 647 int ret; 648 649 image_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0), 650 BDRV_SECTOR_SIZE); 651 backing_file = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FILE); 652 backing_fmt = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FMT); 653 cluster_size = qemu_opt_get_size_del(opts, 654 BLOCK_OPT_CLUSTER_SIZE, 655 QED_DEFAULT_CLUSTER_SIZE); 656 table_size = qemu_opt_get_size_del(opts, BLOCK_OPT_TABLE_SIZE, 657 QED_DEFAULT_TABLE_SIZE); 658 659 if (!qed_is_cluster_size_valid(cluster_size)) { 660 error_setg(errp, "QED cluster size must be within range [%u, %u] " 661 "and power of 2", 662 QED_MIN_CLUSTER_SIZE, QED_MAX_CLUSTER_SIZE); 663 ret = -EINVAL; 664 goto finish; 665 } 666 if (!qed_is_table_size_valid(table_size)) { 667 error_setg(errp, "QED table size must be within range [%u, %u] " 668 "and power of 2", 669 QED_MIN_TABLE_SIZE, QED_MAX_TABLE_SIZE); 670 ret = -EINVAL; 671 goto finish; 672 } 673 if (!qed_is_image_size_valid(image_size, cluster_size, table_size)) { 674 error_setg(errp, "QED image size must be a non-zero multiple of " 675 "cluster size and less than %" PRIu64 " bytes", 676 qed_max_image_size(cluster_size, table_size)); 677 ret = -EINVAL; 678 goto finish; 679 } 680 681 ret = qed_create(filename, cluster_size, image_size, table_size, 682 backing_file, backing_fmt, opts, errp); 683 684 finish: 685 g_free(backing_file); 686 g_free(backing_fmt); 687 return ret; 688 } 689 690 typedef struct { 691 BlockDriverState *bs; 692 Coroutine *co; 693 uint64_t pos; 694 int64_t status; 695 int *pnum; 696 BlockDriverState **file; 697 } QEDIsAllocatedCB; 698 699 static void qed_is_allocated_cb(void *opaque, int ret, uint64_t offset, size_t len) 700 { 701 QEDIsAllocatedCB *cb = opaque; 702 BDRVQEDState *s = cb->bs->opaque; 703 *cb->pnum = len / BDRV_SECTOR_SIZE; 704 switch (ret) { 705 case QED_CLUSTER_FOUND: 706 offset |= qed_offset_into_cluster(s, cb->pos); 707 cb->status = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID | offset; 708 *cb->file = cb->bs->file->bs; 709 break; 710 case QED_CLUSTER_ZERO: 711 cb->status = BDRV_BLOCK_ZERO; 712 break; 713 case QED_CLUSTER_L2: 714 case QED_CLUSTER_L1: 715 cb->status = 0; 716 break; 717 default: 718 assert(ret < 0); 719 cb->status = ret; 720 break; 721 } 722 723 if (cb->co) { 724 qemu_coroutine_enter(cb->co); 725 } 726 } 727 728 static int64_t coroutine_fn bdrv_qed_co_get_block_status(BlockDriverState *bs, 729 int64_t sector_num, 730 int nb_sectors, int *pnum, 731 BlockDriverState **file) 732 { 733 BDRVQEDState *s = bs->opaque; 734 size_t len = (size_t)nb_sectors * BDRV_SECTOR_SIZE; 735 QEDIsAllocatedCB cb = { 736 .bs = bs, 737 .pos = (uint64_t)sector_num * BDRV_SECTOR_SIZE, 738 .status = BDRV_BLOCK_OFFSET_MASK, 739 .pnum = pnum, 740 .file = file, 741 }; 742 QEDRequest request = { .l2_table = NULL }; 743 744 qed_find_cluster(s, &request, cb.pos, len, qed_is_allocated_cb, &cb); 745 746 /* Now sleep if the callback wasn't invoked immediately */ 747 while (cb.status == BDRV_BLOCK_OFFSET_MASK) { 748 cb.co = qemu_coroutine_self(); 749 qemu_coroutine_yield(); 750 } 751 752 qed_unref_l2_cache_entry(request.l2_table); 753 754 return cb.status; 755 } 756 757 static BDRVQEDState *acb_to_s(QEDAIOCB *acb) 758 { 759 return acb->common.bs->opaque; 760 } 761 762 /** 763 * Read from the backing file or zero-fill if no backing file 764 * 765 * @s: QED state 766 * @pos: Byte position in device 767 * @qiov: Destination I/O vector 768 * @backing_qiov: Possibly shortened copy of qiov, to be allocated here 769 * @cb: Completion function 770 * @opaque: User data for completion function 771 * 772 * This function reads qiov->size bytes starting at pos from the backing file. 773 * If there is no backing file then zeroes are read. 774 */ 775 static void qed_read_backing_file(BDRVQEDState *s, uint64_t pos, 776 QEMUIOVector *qiov, 777 QEMUIOVector **backing_qiov, 778 BlockCompletionFunc *cb, void *opaque) 779 { 780 uint64_t backing_length = 0; 781 size_t size; 782 783 /* If there is a backing file, get its length. Treat the absence of a 784 * backing file like a zero length backing file. 785 */ 786 if (s->bs->backing) { 787 int64_t l = bdrv_getlength(s->bs->backing->bs); 788 if (l < 0) { 789 cb(opaque, l); 790 return; 791 } 792 backing_length = l; 793 } 794 795 /* Zero all sectors if reading beyond the end of the backing file */ 796 if (pos >= backing_length || 797 pos + qiov->size > backing_length) { 798 qemu_iovec_memset(qiov, 0, 0, qiov->size); 799 } 800 801 /* Complete now if there are no backing file sectors to read */ 802 if (pos >= backing_length) { 803 cb(opaque, 0); 804 return; 805 } 806 807 /* If the read straddles the end of the backing file, shorten it */ 808 size = MIN((uint64_t)backing_length - pos, qiov->size); 809 810 assert(*backing_qiov == NULL); 811 *backing_qiov = g_new(QEMUIOVector, 1); 812 qemu_iovec_init(*backing_qiov, qiov->niov); 813 qemu_iovec_concat(*backing_qiov, qiov, 0, size); 814 815 BLKDBG_EVENT(s->bs->file, BLKDBG_READ_BACKING_AIO); 816 bdrv_aio_readv(s->bs->backing, pos / BDRV_SECTOR_SIZE, 817 *backing_qiov, size / BDRV_SECTOR_SIZE, cb, opaque); 818 } 819 820 typedef struct { 821 GenericCB gencb; 822 BDRVQEDState *s; 823 QEMUIOVector qiov; 824 QEMUIOVector *backing_qiov; 825 struct iovec iov; 826 uint64_t offset; 827 } CopyFromBackingFileCB; 828 829 static void qed_copy_from_backing_file_cb(void *opaque, int ret) 830 { 831 CopyFromBackingFileCB *copy_cb = opaque; 832 qemu_vfree(copy_cb->iov.iov_base); 833 gencb_complete(©_cb->gencb, ret); 834 } 835 836 static void qed_copy_from_backing_file_write(void *opaque, int ret) 837 { 838 CopyFromBackingFileCB *copy_cb = opaque; 839 BDRVQEDState *s = copy_cb->s; 840 841 if (copy_cb->backing_qiov) { 842 qemu_iovec_destroy(copy_cb->backing_qiov); 843 g_free(copy_cb->backing_qiov); 844 copy_cb->backing_qiov = NULL; 845 } 846 847 if (ret) { 848 qed_copy_from_backing_file_cb(copy_cb, ret); 849 return; 850 } 851 852 BLKDBG_EVENT(s->bs->file, BLKDBG_COW_WRITE); 853 bdrv_aio_writev(s->bs->file, copy_cb->offset / BDRV_SECTOR_SIZE, 854 ©_cb->qiov, copy_cb->qiov.size / BDRV_SECTOR_SIZE, 855 qed_copy_from_backing_file_cb, copy_cb); 856 } 857 858 /** 859 * Copy data from backing file into the image 860 * 861 * @s: QED state 862 * @pos: Byte position in device 863 * @len: Number of bytes 864 * @offset: Byte offset in image file 865 * @cb: Completion function 866 * @opaque: User data for completion function 867 */ 868 static void qed_copy_from_backing_file(BDRVQEDState *s, uint64_t pos, 869 uint64_t len, uint64_t offset, 870 BlockCompletionFunc *cb, 871 void *opaque) 872 { 873 CopyFromBackingFileCB *copy_cb; 874 875 /* Skip copy entirely if there is no work to do */ 876 if (len == 0) { 877 cb(opaque, 0); 878 return; 879 } 880 881 copy_cb = gencb_alloc(sizeof(*copy_cb), cb, opaque); 882 copy_cb->s = s; 883 copy_cb->offset = offset; 884 copy_cb->backing_qiov = NULL; 885 copy_cb->iov.iov_base = qemu_blockalign(s->bs, len); 886 copy_cb->iov.iov_len = len; 887 qemu_iovec_init_external(©_cb->qiov, ©_cb->iov, 1); 888 889 qed_read_backing_file(s, pos, ©_cb->qiov, ©_cb->backing_qiov, 890 qed_copy_from_backing_file_write, copy_cb); 891 } 892 893 /** 894 * Link one or more contiguous clusters into a table 895 * 896 * @s: QED state 897 * @table: L2 table 898 * @index: First cluster index 899 * @n: Number of contiguous clusters 900 * @cluster: First cluster offset 901 * 902 * The cluster offset may be an allocated byte offset in the image file, the 903 * zero cluster marker, or the unallocated cluster marker. 904 */ 905 static void qed_update_l2_table(BDRVQEDState *s, QEDTable *table, int index, 906 unsigned int n, uint64_t cluster) 907 { 908 int i; 909 for (i = index; i < index + n; i++) { 910 table->offsets[i] = cluster; 911 if (!qed_offset_is_unalloc_cluster(cluster) && 912 !qed_offset_is_zero_cluster(cluster)) { 913 cluster += s->header.cluster_size; 914 } 915 } 916 } 917 918 static void qed_aio_complete_bh(void *opaque) 919 { 920 QEDAIOCB *acb = opaque; 921 BlockCompletionFunc *cb = acb->common.cb; 922 void *user_opaque = acb->common.opaque; 923 int ret = acb->bh_ret; 924 925 qemu_aio_unref(acb); 926 927 /* Invoke callback */ 928 cb(user_opaque, ret); 929 } 930 931 static void qed_aio_complete(QEDAIOCB *acb, int ret) 932 { 933 BDRVQEDState *s = acb_to_s(acb); 934 935 trace_qed_aio_complete(s, acb, ret); 936 937 /* Free resources */ 938 qemu_iovec_destroy(&acb->cur_qiov); 939 qed_unref_l2_cache_entry(acb->request.l2_table); 940 941 /* Free the buffer we may have allocated for zero writes */ 942 if (acb->flags & QED_AIOCB_ZERO) { 943 qemu_vfree(acb->qiov->iov[0].iov_base); 944 acb->qiov->iov[0].iov_base = NULL; 945 } 946 947 /* Arrange for a bh to invoke the completion function */ 948 acb->bh_ret = ret; 949 aio_bh_schedule_oneshot(bdrv_get_aio_context(acb->common.bs), 950 qed_aio_complete_bh, acb); 951 952 /* Start next allocating write request waiting behind this one. Note that 953 * requests enqueue themselves when they first hit an unallocated cluster 954 * but they wait until the entire request is finished before waking up the 955 * next request in the queue. This ensures that we don't cycle through 956 * requests multiple times but rather finish one at a time completely. 957 */ 958 if (acb == QSIMPLEQ_FIRST(&s->allocating_write_reqs)) { 959 QSIMPLEQ_REMOVE_HEAD(&s->allocating_write_reqs, next); 960 acb = QSIMPLEQ_FIRST(&s->allocating_write_reqs); 961 if (acb) { 962 qed_aio_next_io(acb, 0); 963 } else if (s->header.features & QED_F_NEED_CHECK) { 964 qed_start_need_check_timer(s); 965 } 966 } 967 } 968 969 /** 970 * Commit the current L2 table to the cache 971 */ 972 static void qed_commit_l2_update(void *opaque, int ret) 973 { 974 QEDAIOCB *acb = opaque; 975 BDRVQEDState *s = acb_to_s(acb); 976 CachedL2Table *l2_table = acb->request.l2_table; 977 uint64_t l2_offset = l2_table->offset; 978 979 qed_commit_l2_cache_entry(&s->l2_cache, l2_table); 980 981 /* This is guaranteed to succeed because we just committed the entry to the 982 * cache. 983 */ 984 acb->request.l2_table = qed_find_l2_cache_entry(&s->l2_cache, l2_offset); 985 assert(acb->request.l2_table != NULL); 986 987 qed_aio_next_io(opaque, ret); 988 } 989 990 /** 991 * Update L1 table with new L2 table offset and write it out 992 */ 993 static void qed_aio_write_l1_update(void *opaque, int ret) 994 { 995 QEDAIOCB *acb = opaque; 996 BDRVQEDState *s = acb_to_s(acb); 997 int index; 998 999 if (ret) { 1000 qed_aio_complete(acb, ret); 1001 return; 1002 } 1003 1004 index = qed_l1_index(s, acb->cur_pos); 1005 s->l1_table->offsets[index] = acb->request.l2_table->offset; 1006 1007 qed_write_l1_table(s, index, 1, qed_commit_l2_update, acb); 1008 } 1009 1010 /** 1011 * Update L2 table with new cluster offsets and write them out 1012 */ 1013 static void qed_aio_write_l2_update(QEDAIOCB *acb, int ret, uint64_t offset) 1014 { 1015 BDRVQEDState *s = acb_to_s(acb); 1016 bool need_alloc = acb->find_cluster_ret == QED_CLUSTER_L1; 1017 int index; 1018 1019 if (ret) { 1020 goto err; 1021 } 1022 1023 if (need_alloc) { 1024 qed_unref_l2_cache_entry(acb->request.l2_table); 1025 acb->request.l2_table = qed_new_l2_table(s); 1026 } 1027 1028 index = qed_l2_index(s, acb->cur_pos); 1029 qed_update_l2_table(s, acb->request.l2_table->table, index, acb->cur_nclusters, 1030 offset); 1031 1032 if (need_alloc) { 1033 /* Write out the whole new L2 table */ 1034 qed_write_l2_table(s, &acb->request, 0, s->table_nelems, true, 1035 qed_aio_write_l1_update, acb); 1036 } else { 1037 /* Write out only the updated part of the L2 table */ 1038 qed_write_l2_table(s, &acb->request, index, acb->cur_nclusters, false, 1039 qed_aio_next_io, acb); 1040 } 1041 return; 1042 1043 err: 1044 qed_aio_complete(acb, ret); 1045 } 1046 1047 static void qed_aio_write_l2_update_cb(void *opaque, int ret) 1048 { 1049 QEDAIOCB *acb = opaque; 1050 qed_aio_write_l2_update(acb, ret, acb->cur_cluster); 1051 } 1052 1053 /** 1054 * Flush new data clusters before updating the L2 table 1055 * 1056 * This flush is necessary when a backing file is in use. A crash during an 1057 * allocating write could result in empty clusters in the image. If the write 1058 * only touched a subregion of the cluster, then backing image sectors have 1059 * been lost in the untouched region. The solution is to flush after writing a 1060 * new data cluster and before updating the L2 table. 1061 */ 1062 static void qed_aio_write_flush_before_l2_update(void *opaque, int ret) 1063 { 1064 QEDAIOCB *acb = opaque; 1065 BDRVQEDState *s = acb_to_s(acb); 1066 1067 if (!bdrv_aio_flush(s->bs->file->bs, qed_aio_write_l2_update_cb, opaque)) { 1068 qed_aio_complete(acb, -EIO); 1069 } 1070 } 1071 1072 /** 1073 * Write data to the image file 1074 */ 1075 static void qed_aio_write_main(void *opaque, int ret) 1076 { 1077 QEDAIOCB *acb = opaque; 1078 BDRVQEDState *s = acb_to_s(acb); 1079 uint64_t offset = acb->cur_cluster + 1080 qed_offset_into_cluster(s, acb->cur_pos); 1081 BlockCompletionFunc *next_fn; 1082 1083 trace_qed_aio_write_main(s, acb, ret, offset, acb->cur_qiov.size); 1084 1085 if (ret) { 1086 qed_aio_complete(acb, ret); 1087 return; 1088 } 1089 1090 if (acb->find_cluster_ret == QED_CLUSTER_FOUND) { 1091 next_fn = qed_aio_next_io; 1092 } else { 1093 if (s->bs->backing) { 1094 next_fn = qed_aio_write_flush_before_l2_update; 1095 } else { 1096 next_fn = qed_aio_write_l2_update_cb; 1097 } 1098 } 1099 1100 BLKDBG_EVENT(s->bs->file, BLKDBG_WRITE_AIO); 1101 bdrv_aio_writev(s->bs->file, offset / BDRV_SECTOR_SIZE, 1102 &acb->cur_qiov, acb->cur_qiov.size / BDRV_SECTOR_SIZE, 1103 next_fn, acb); 1104 } 1105 1106 /** 1107 * Populate back untouched region of new data cluster 1108 */ 1109 static void qed_aio_write_postfill(void *opaque, int ret) 1110 { 1111 QEDAIOCB *acb = opaque; 1112 BDRVQEDState *s = acb_to_s(acb); 1113 uint64_t start = acb->cur_pos + acb->cur_qiov.size; 1114 uint64_t len = 1115 qed_start_of_cluster(s, start + s->header.cluster_size - 1) - start; 1116 uint64_t offset = acb->cur_cluster + 1117 qed_offset_into_cluster(s, acb->cur_pos) + 1118 acb->cur_qiov.size; 1119 1120 if (ret) { 1121 qed_aio_complete(acb, ret); 1122 return; 1123 } 1124 1125 trace_qed_aio_write_postfill(s, acb, start, len, offset); 1126 qed_copy_from_backing_file(s, start, len, offset, 1127 qed_aio_write_main, acb); 1128 } 1129 1130 /** 1131 * Populate front untouched region of new data cluster 1132 */ 1133 static void qed_aio_write_prefill(void *opaque, int ret) 1134 { 1135 QEDAIOCB *acb = opaque; 1136 BDRVQEDState *s = acb_to_s(acb); 1137 uint64_t start = qed_start_of_cluster(s, acb->cur_pos); 1138 uint64_t len = qed_offset_into_cluster(s, acb->cur_pos); 1139 1140 trace_qed_aio_write_prefill(s, acb, start, len, acb->cur_cluster); 1141 qed_copy_from_backing_file(s, start, len, acb->cur_cluster, 1142 qed_aio_write_postfill, acb); 1143 } 1144 1145 /** 1146 * Check if the QED_F_NEED_CHECK bit should be set during allocating write 1147 */ 1148 static bool qed_should_set_need_check(BDRVQEDState *s) 1149 { 1150 /* The flush before L2 update path ensures consistency */ 1151 if (s->bs->backing) { 1152 return false; 1153 } 1154 1155 return !(s->header.features & QED_F_NEED_CHECK); 1156 } 1157 1158 static void qed_aio_write_zero_cluster(void *opaque, int ret) 1159 { 1160 QEDAIOCB *acb = opaque; 1161 1162 if (ret) { 1163 qed_aio_complete(acb, ret); 1164 return; 1165 } 1166 1167 qed_aio_write_l2_update(acb, 0, 1); 1168 } 1169 1170 /** 1171 * Write new data cluster 1172 * 1173 * @acb: Write request 1174 * @len: Length in bytes 1175 * 1176 * This path is taken when writing to previously unallocated clusters. 1177 */ 1178 static void qed_aio_write_alloc(QEDAIOCB *acb, size_t len) 1179 { 1180 BDRVQEDState *s = acb_to_s(acb); 1181 BlockCompletionFunc *cb; 1182 1183 /* Cancel timer when the first allocating request comes in */ 1184 if (QSIMPLEQ_EMPTY(&s->allocating_write_reqs)) { 1185 qed_cancel_need_check_timer(s); 1186 } 1187 1188 /* Freeze this request if another allocating write is in progress */ 1189 if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs)) { 1190 QSIMPLEQ_INSERT_TAIL(&s->allocating_write_reqs, acb, next); 1191 } 1192 if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs) || 1193 s->allocating_write_reqs_plugged) { 1194 return; /* wait for existing request to finish */ 1195 } 1196 1197 acb->cur_nclusters = qed_bytes_to_clusters(s, 1198 qed_offset_into_cluster(s, acb->cur_pos) + len); 1199 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len); 1200 1201 if (acb->flags & QED_AIOCB_ZERO) { 1202 /* Skip ahead if the clusters are already zero */ 1203 if (acb->find_cluster_ret == QED_CLUSTER_ZERO) { 1204 qed_aio_next_io(acb, 0); 1205 return; 1206 } 1207 1208 cb = qed_aio_write_zero_cluster; 1209 } else { 1210 cb = qed_aio_write_prefill; 1211 acb->cur_cluster = qed_alloc_clusters(s, acb->cur_nclusters); 1212 } 1213 1214 if (qed_should_set_need_check(s)) { 1215 s->header.features |= QED_F_NEED_CHECK; 1216 qed_write_header(s, cb, acb); 1217 } else { 1218 cb(acb, 0); 1219 } 1220 } 1221 1222 /** 1223 * Write data cluster in place 1224 * 1225 * @acb: Write request 1226 * @offset: Cluster offset in bytes 1227 * @len: Length in bytes 1228 * 1229 * This path is taken when writing to already allocated clusters. 1230 */ 1231 static void qed_aio_write_inplace(QEDAIOCB *acb, uint64_t offset, size_t len) 1232 { 1233 /* Allocate buffer for zero writes */ 1234 if (acb->flags & QED_AIOCB_ZERO) { 1235 struct iovec *iov = acb->qiov->iov; 1236 1237 if (!iov->iov_base) { 1238 iov->iov_base = qemu_try_blockalign(acb->common.bs, iov->iov_len); 1239 if (iov->iov_base == NULL) { 1240 qed_aio_complete(acb, -ENOMEM); 1241 return; 1242 } 1243 memset(iov->iov_base, 0, iov->iov_len); 1244 } 1245 } 1246 1247 /* Calculate the I/O vector */ 1248 acb->cur_cluster = offset; 1249 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len); 1250 1251 /* Do the actual write */ 1252 qed_aio_write_main(acb, 0); 1253 } 1254 1255 /** 1256 * Write data cluster 1257 * 1258 * @opaque: Write request 1259 * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2, QED_CLUSTER_L1, 1260 * or -errno 1261 * @offset: Cluster offset in bytes 1262 * @len: Length in bytes 1263 * 1264 * Callback from qed_find_cluster(). 1265 */ 1266 static void qed_aio_write_data(void *opaque, int ret, 1267 uint64_t offset, size_t len) 1268 { 1269 QEDAIOCB *acb = opaque; 1270 1271 trace_qed_aio_write_data(acb_to_s(acb), acb, ret, offset, len); 1272 1273 acb->find_cluster_ret = ret; 1274 1275 switch (ret) { 1276 case QED_CLUSTER_FOUND: 1277 qed_aio_write_inplace(acb, offset, len); 1278 break; 1279 1280 case QED_CLUSTER_L2: 1281 case QED_CLUSTER_L1: 1282 case QED_CLUSTER_ZERO: 1283 qed_aio_write_alloc(acb, len); 1284 break; 1285 1286 default: 1287 qed_aio_complete(acb, ret); 1288 break; 1289 } 1290 } 1291 1292 /** 1293 * Read data cluster 1294 * 1295 * @opaque: Read request 1296 * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2, QED_CLUSTER_L1, 1297 * or -errno 1298 * @offset: Cluster offset in bytes 1299 * @len: Length in bytes 1300 * 1301 * Callback from qed_find_cluster(). 1302 */ 1303 static void qed_aio_read_data(void *opaque, int ret, 1304 uint64_t offset, size_t len) 1305 { 1306 QEDAIOCB *acb = opaque; 1307 BDRVQEDState *s = acb_to_s(acb); 1308 BlockDriverState *bs = acb->common.bs; 1309 1310 /* Adjust offset into cluster */ 1311 offset += qed_offset_into_cluster(s, acb->cur_pos); 1312 1313 trace_qed_aio_read_data(s, acb, ret, offset, len); 1314 1315 if (ret < 0) { 1316 goto err; 1317 } 1318 1319 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len); 1320 1321 /* Handle zero cluster and backing file reads */ 1322 if (ret == QED_CLUSTER_ZERO) { 1323 qemu_iovec_memset(&acb->cur_qiov, 0, 0, acb->cur_qiov.size); 1324 qed_aio_next_io(acb, 0); 1325 return; 1326 } else if (ret != QED_CLUSTER_FOUND) { 1327 qed_read_backing_file(s, acb->cur_pos, &acb->cur_qiov, 1328 &acb->backing_qiov, qed_aio_next_io, acb); 1329 return; 1330 } 1331 1332 BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO); 1333 bdrv_aio_readv(bs->file, offset / BDRV_SECTOR_SIZE, 1334 &acb->cur_qiov, acb->cur_qiov.size / BDRV_SECTOR_SIZE, 1335 qed_aio_next_io, acb); 1336 return; 1337 1338 err: 1339 qed_aio_complete(acb, ret); 1340 } 1341 1342 /** 1343 * Begin next I/O or complete the request 1344 */ 1345 static void qed_aio_next_io(void *opaque, int ret) 1346 { 1347 QEDAIOCB *acb = opaque; 1348 BDRVQEDState *s = acb_to_s(acb); 1349 QEDFindClusterFunc *io_fn = (acb->flags & QED_AIOCB_WRITE) ? 1350 qed_aio_write_data : qed_aio_read_data; 1351 1352 trace_qed_aio_next_io(s, acb, ret, acb->cur_pos + acb->cur_qiov.size); 1353 1354 if (acb->backing_qiov) { 1355 qemu_iovec_destroy(acb->backing_qiov); 1356 g_free(acb->backing_qiov); 1357 acb->backing_qiov = NULL; 1358 } 1359 1360 /* Handle I/O error */ 1361 if (ret) { 1362 qed_aio_complete(acb, ret); 1363 return; 1364 } 1365 1366 acb->qiov_offset += acb->cur_qiov.size; 1367 acb->cur_pos += acb->cur_qiov.size; 1368 qemu_iovec_reset(&acb->cur_qiov); 1369 1370 /* Complete request */ 1371 if (acb->cur_pos >= acb->end_pos) { 1372 qed_aio_complete(acb, 0); 1373 return; 1374 } 1375 1376 /* Find next cluster and start I/O */ 1377 qed_find_cluster(s, &acb->request, 1378 acb->cur_pos, acb->end_pos - acb->cur_pos, 1379 io_fn, acb); 1380 } 1381 1382 static BlockAIOCB *qed_aio_setup(BlockDriverState *bs, 1383 int64_t sector_num, 1384 QEMUIOVector *qiov, int nb_sectors, 1385 BlockCompletionFunc *cb, 1386 void *opaque, int flags) 1387 { 1388 QEDAIOCB *acb = qemu_aio_get(&qed_aiocb_info, bs, cb, opaque); 1389 1390 trace_qed_aio_setup(bs->opaque, acb, sector_num, nb_sectors, 1391 opaque, flags); 1392 1393 acb->flags = flags; 1394 acb->qiov = qiov; 1395 acb->qiov_offset = 0; 1396 acb->cur_pos = (uint64_t)sector_num * BDRV_SECTOR_SIZE; 1397 acb->end_pos = acb->cur_pos + nb_sectors * BDRV_SECTOR_SIZE; 1398 acb->backing_qiov = NULL; 1399 acb->request.l2_table = NULL; 1400 qemu_iovec_init(&acb->cur_qiov, qiov->niov); 1401 1402 /* Start request */ 1403 qed_aio_next_io(acb, 0); 1404 return &acb->common; 1405 } 1406 1407 static BlockAIOCB *bdrv_qed_aio_readv(BlockDriverState *bs, 1408 int64_t sector_num, 1409 QEMUIOVector *qiov, int nb_sectors, 1410 BlockCompletionFunc *cb, 1411 void *opaque) 1412 { 1413 return qed_aio_setup(bs, sector_num, qiov, nb_sectors, cb, opaque, 0); 1414 } 1415 1416 static BlockAIOCB *bdrv_qed_aio_writev(BlockDriverState *bs, 1417 int64_t sector_num, 1418 QEMUIOVector *qiov, int nb_sectors, 1419 BlockCompletionFunc *cb, 1420 void *opaque) 1421 { 1422 return qed_aio_setup(bs, sector_num, qiov, nb_sectors, cb, 1423 opaque, QED_AIOCB_WRITE); 1424 } 1425 1426 typedef struct { 1427 Coroutine *co; 1428 int ret; 1429 bool done; 1430 } QEDWriteZeroesCB; 1431 1432 static void coroutine_fn qed_co_pwrite_zeroes_cb(void *opaque, int ret) 1433 { 1434 QEDWriteZeroesCB *cb = opaque; 1435 1436 cb->done = true; 1437 cb->ret = ret; 1438 if (cb->co) { 1439 qemu_coroutine_enter(cb->co); 1440 } 1441 } 1442 1443 static int coroutine_fn bdrv_qed_co_pwrite_zeroes(BlockDriverState *bs, 1444 int64_t offset, 1445 int count, 1446 BdrvRequestFlags flags) 1447 { 1448 BlockAIOCB *blockacb; 1449 BDRVQEDState *s = bs->opaque; 1450 QEDWriteZeroesCB cb = { .done = false }; 1451 QEMUIOVector qiov; 1452 struct iovec iov; 1453 1454 /* Fall back if the request is not aligned */ 1455 if (qed_offset_into_cluster(s, offset) || 1456 qed_offset_into_cluster(s, count)) { 1457 return -ENOTSUP; 1458 } 1459 1460 /* Zero writes start without an I/O buffer. If a buffer becomes necessary 1461 * then it will be allocated during request processing. 1462 */ 1463 iov.iov_base = NULL; 1464 iov.iov_len = count; 1465 1466 qemu_iovec_init_external(&qiov, &iov, 1); 1467 blockacb = qed_aio_setup(bs, offset >> BDRV_SECTOR_BITS, &qiov, 1468 count >> BDRV_SECTOR_BITS, 1469 qed_co_pwrite_zeroes_cb, &cb, 1470 QED_AIOCB_WRITE | QED_AIOCB_ZERO); 1471 if (!blockacb) { 1472 return -EIO; 1473 } 1474 if (!cb.done) { 1475 cb.co = qemu_coroutine_self(); 1476 qemu_coroutine_yield(); 1477 } 1478 assert(cb.done); 1479 return cb.ret; 1480 } 1481 1482 static int bdrv_qed_truncate(BlockDriverState *bs, int64_t offset) 1483 { 1484 BDRVQEDState *s = bs->opaque; 1485 uint64_t old_image_size; 1486 int ret; 1487 1488 if (!qed_is_image_size_valid(offset, s->header.cluster_size, 1489 s->header.table_size)) { 1490 return -EINVAL; 1491 } 1492 1493 /* Shrinking is currently not supported */ 1494 if ((uint64_t)offset < s->header.image_size) { 1495 return -ENOTSUP; 1496 } 1497 1498 old_image_size = s->header.image_size; 1499 s->header.image_size = offset; 1500 ret = qed_write_header_sync(s); 1501 if (ret < 0) { 1502 s->header.image_size = old_image_size; 1503 } 1504 return ret; 1505 } 1506 1507 static int64_t bdrv_qed_getlength(BlockDriverState *bs) 1508 { 1509 BDRVQEDState *s = bs->opaque; 1510 return s->header.image_size; 1511 } 1512 1513 static int bdrv_qed_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) 1514 { 1515 BDRVQEDState *s = bs->opaque; 1516 1517 memset(bdi, 0, sizeof(*bdi)); 1518 bdi->cluster_size = s->header.cluster_size; 1519 bdi->is_dirty = s->header.features & QED_F_NEED_CHECK; 1520 bdi->unallocated_blocks_are_zero = true; 1521 bdi->can_write_zeroes_with_unmap = true; 1522 return 0; 1523 } 1524 1525 static int bdrv_qed_change_backing_file(BlockDriverState *bs, 1526 const char *backing_file, 1527 const char *backing_fmt) 1528 { 1529 BDRVQEDState *s = bs->opaque; 1530 QEDHeader new_header, le_header; 1531 void *buffer; 1532 size_t buffer_len, backing_file_len; 1533 int ret; 1534 1535 /* Refuse to set backing filename if unknown compat feature bits are 1536 * active. If the image uses an unknown compat feature then we may not 1537 * know the layout of data following the header structure and cannot safely 1538 * add a new string. 1539 */ 1540 if (backing_file && (s->header.compat_features & 1541 ~QED_COMPAT_FEATURE_MASK)) { 1542 return -ENOTSUP; 1543 } 1544 1545 memcpy(&new_header, &s->header, sizeof(new_header)); 1546 1547 new_header.features &= ~(QED_F_BACKING_FILE | 1548 QED_F_BACKING_FORMAT_NO_PROBE); 1549 1550 /* Adjust feature flags */ 1551 if (backing_file) { 1552 new_header.features |= QED_F_BACKING_FILE; 1553 1554 if (qed_fmt_is_raw(backing_fmt)) { 1555 new_header.features |= QED_F_BACKING_FORMAT_NO_PROBE; 1556 } 1557 } 1558 1559 /* Calculate new header size */ 1560 backing_file_len = 0; 1561 1562 if (backing_file) { 1563 backing_file_len = strlen(backing_file); 1564 } 1565 1566 buffer_len = sizeof(new_header); 1567 new_header.backing_filename_offset = buffer_len; 1568 new_header.backing_filename_size = backing_file_len; 1569 buffer_len += backing_file_len; 1570 1571 /* Make sure we can rewrite header without failing */ 1572 if (buffer_len > new_header.header_size * new_header.cluster_size) { 1573 return -ENOSPC; 1574 } 1575 1576 /* Prepare new header */ 1577 buffer = g_malloc(buffer_len); 1578 1579 qed_header_cpu_to_le(&new_header, &le_header); 1580 memcpy(buffer, &le_header, sizeof(le_header)); 1581 buffer_len = sizeof(le_header); 1582 1583 if (backing_file) { 1584 memcpy(buffer + buffer_len, backing_file, backing_file_len); 1585 buffer_len += backing_file_len; 1586 } 1587 1588 /* Write new header */ 1589 ret = bdrv_pwrite_sync(bs->file, 0, buffer, buffer_len); 1590 g_free(buffer); 1591 if (ret == 0) { 1592 memcpy(&s->header, &new_header, sizeof(new_header)); 1593 } 1594 return ret; 1595 } 1596 1597 static void bdrv_qed_invalidate_cache(BlockDriverState *bs, Error **errp) 1598 { 1599 BDRVQEDState *s = bs->opaque; 1600 Error *local_err = NULL; 1601 int ret; 1602 1603 bdrv_qed_close(bs); 1604 1605 memset(s, 0, sizeof(BDRVQEDState)); 1606 ret = bdrv_qed_open(bs, NULL, bs->open_flags, &local_err); 1607 if (local_err) { 1608 error_propagate(errp, local_err); 1609 error_prepend(errp, "Could not reopen qed layer: "); 1610 return; 1611 } else if (ret < 0) { 1612 error_setg_errno(errp, -ret, "Could not reopen qed layer"); 1613 return; 1614 } 1615 } 1616 1617 static int bdrv_qed_check(BlockDriverState *bs, BdrvCheckResult *result, 1618 BdrvCheckMode fix) 1619 { 1620 BDRVQEDState *s = bs->opaque; 1621 1622 return qed_check(s, result, !!fix); 1623 } 1624 1625 static QemuOptsList qed_create_opts = { 1626 .name = "qed-create-opts", 1627 .head = QTAILQ_HEAD_INITIALIZER(qed_create_opts.head), 1628 .desc = { 1629 { 1630 .name = BLOCK_OPT_SIZE, 1631 .type = QEMU_OPT_SIZE, 1632 .help = "Virtual disk size" 1633 }, 1634 { 1635 .name = BLOCK_OPT_BACKING_FILE, 1636 .type = QEMU_OPT_STRING, 1637 .help = "File name of a base image" 1638 }, 1639 { 1640 .name = BLOCK_OPT_BACKING_FMT, 1641 .type = QEMU_OPT_STRING, 1642 .help = "Image format of the base image" 1643 }, 1644 { 1645 .name = BLOCK_OPT_CLUSTER_SIZE, 1646 .type = QEMU_OPT_SIZE, 1647 .help = "Cluster size (in bytes)", 1648 .def_value_str = stringify(QED_DEFAULT_CLUSTER_SIZE) 1649 }, 1650 { 1651 .name = BLOCK_OPT_TABLE_SIZE, 1652 .type = QEMU_OPT_SIZE, 1653 .help = "L1/L2 table size (in clusters)" 1654 }, 1655 { /* end of list */ } 1656 } 1657 }; 1658 1659 static BlockDriver bdrv_qed = { 1660 .format_name = "qed", 1661 .instance_size = sizeof(BDRVQEDState), 1662 .create_opts = &qed_create_opts, 1663 .supports_backing = true, 1664 1665 .bdrv_probe = bdrv_qed_probe, 1666 .bdrv_open = bdrv_qed_open, 1667 .bdrv_close = bdrv_qed_close, 1668 .bdrv_reopen_prepare = bdrv_qed_reopen_prepare, 1669 .bdrv_create = bdrv_qed_create, 1670 .bdrv_has_zero_init = bdrv_has_zero_init_1, 1671 .bdrv_co_get_block_status = bdrv_qed_co_get_block_status, 1672 .bdrv_aio_readv = bdrv_qed_aio_readv, 1673 .bdrv_aio_writev = bdrv_qed_aio_writev, 1674 .bdrv_co_pwrite_zeroes = bdrv_qed_co_pwrite_zeroes, 1675 .bdrv_truncate = bdrv_qed_truncate, 1676 .bdrv_getlength = bdrv_qed_getlength, 1677 .bdrv_get_info = bdrv_qed_get_info, 1678 .bdrv_refresh_limits = bdrv_qed_refresh_limits, 1679 .bdrv_change_backing_file = bdrv_qed_change_backing_file, 1680 .bdrv_invalidate_cache = bdrv_qed_invalidate_cache, 1681 .bdrv_check = bdrv_qed_check, 1682 .bdrv_detach_aio_context = bdrv_qed_detach_aio_context, 1683 .bdrv_attach_aio_context = bdrv_qed_attach_aio_context, 1684 .bdrv_drain = bdrv_qed_drain, 1685 }; 1686 1687 static void bdrv_qed_init(void) 1688 { 1689 bdrv_register(&bdrv_qed); 1690 } 1691 1692 block_init(bdrv_qed_init); 1693