1 /* 2 * QEMU Enhanced Disk Format 3 * 4 * Copyright IBM, Corp. 2010 5 * 6 * Authors: 7 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com> 8 * Anthony Liguori <aliguori@us.ibm.com> 9 * 10 * This work is licensed under the terms of the GNU LGPL, version 2 or later. 11 * See the COPYING.LIB file in the top-level directory. 12 * 13 */ 14 15 #include "qemu/osdep.h" 16 #include "qapi/error.h" 17 #include "qemu/timer.h" 18 #include "qemu/bswap.h" 19 #include "trace.h" 20 #include "qed.h" 21 #include "qapi/qmp/qerror.h" 22 #include "migration/migration.h" 23 #include "sysemu/block-backend.h" 24 25 static const AIOCBInfo qed_aiocb_info = { 26 .aiocb_size = sizeof(QEDAIOCB), 27 }; 28 29 static int bdrv_qed_probe(const uint8_t *buf, int buf_size, 30 const char *filename) 31 { 32 const QEDHeader *header = (const QEDHeader *)buf; 33 34 if (buf_size < sizeof(*header)) { 35 return 0; 36 } 37 if (le32_to_cpu(header->magic) != QED_MAGIC) { 38 return 0; 39 } 40 return 100; 41 } 42 43 /** 44 * Check whether an image format is raw 45 * 46 * @fmt: Backing file format, may be NULL 47 */ 48 static bool qed_fmt_is_raw(const char *fmt) 49 { 50 return fmt && strcmp(fmt, "raw") == 0; 51 } 52 53 static void qed_header_le_to_cpu(const QEDHeader *le, QEDHeader *cpu) 54 { 55 cpu->magic = le32_to_cpu(le->magic); 56 cpu->cluster_size = le32_to_cpu(le->cluster_size); 57 cpu->table_size = le32_to_cpu(le->table_size); 58 cpu->header_size = le32_to_cpu(le->header_size); 59 cpu->features = le64_to_cpu(le->features); 60 cpu->compat_features = le64_to_cpu(le->compat_features); 61 cpu->autoclear_features = le64_to_cpu(le->autoclear_features); 62 cpu->l1_table_offset = le64_to_cpu(le->l1_table_offset); 63 cpu->image_size = le64_to_cpu(le->image_size); 64 cpu->backing_filename_offset = le32_to_cpu(le->backing_filename_offset); 65 cpu->backing_filename_size = le32_to_cpu(le->backing_filename_size); 66 } 67 68 static void qed_header_cpu_to_le(const QEDHeader *cpu, QEDHeader *le) 69 { 70 le->magic = cpu_to_le32(cpu->magic); 71 le->cluster_size = cpu_to_le32(cpu->cluster_size); 72 le->table_size = cpu_to_le32(cpu->table_size); 73 le->header_size = cpu_to_le32(cpu->header_size); 74 le->features = cpu_to_le64(cpu->features); 75 le->compat_features = cpu_to_le64(cpu->compat_features); 76 le->autoclear_features = cpu_to_le64(cpu->autoclear_features); 77 le->l1_table_offset = cpu_to_le64(cpu->l1_table_offset); 78 le->image_size = cpu_to_le64(cpu->image_size); 79 le->backing_filename_offset = cpu_to_le32(cpu->backing_filename_offset); 80 le->backing_filename_size = cpu_to_le32(cpu->backing_filename_size); 81 } 82 83 int qed_write_header_sync(BDRVQEDState *s) 84 { 85 QEDHeader le; 86 int ret; 87 88 qed_header_cpu_to_le(&s->header, &le); 89 ret = bdrv_pwrite(s->bs->file->bs, 0, &le, sizeof(le)); 90 if (ret != sizeof(le)) { 91 return ret; 92 } 93 return 0; 94 } 95 96 typedef struct { 97 GenericCB gencb; 98 BDRVQEDState *s; 99 struct iovec iov; 100 QEMUIOVector qiov; 101 int nsectors; 102 uint8_t *buf; 103 } QEDWriteHeaderCB; 104 105 static void qed_write_header_cb(void *opaque, int ret) 106 { 107 QEDWriteHeaderCB *write_header_cb = opaque; 108 109 qemu_vfree(write_header_cb->buf); 110 gencb_complete(write_header_cb, ret); 111 } 112 113 static void qed_write_header_read_cb(void *opaque, int ret) 114 { 115 QEDWriteHeaderCB *write_header_cb = opaque; 116 BDRVQEDState *s = write_header_cb->s; 117 118 if (ret) { 119 qed_write_header_cb(write_header_cb, ret); 120 return; 121 } 122 123 /* Update header */ 124 qed_header_cpu_to_le(&s->header, (QEDHeader *)write_header_cb->buf); 125 126 bdrv_aio_writev(s->bs->file->bs, 0, &write_header_cb->qiov, 127 write_header_cb->nsectors, qed_write_header_cb, 128 write_header_cb); 129 } 130 131 /** 132 * Update header in-place (does not rewrite backing filename or other strings) 133 * 134 * This function only updates known header fields in-place and does not affect 135 * extra data after the QED header. 136 */ 137 static void qed_write_header(BDRVQEDState *s, BlockCompletionFunc cb, 138 void *opaque) 139 { 140 /* We must write full sectors for O_DIRECT but cannot necessarily generate 141 * the data following the header if an unrecognized compat feature is 142 * active. Therefore, first read the sectors containing the header, update 143 * them, and write back. 144 */ 145 146 int nsectors = (sizeof(QEDHeader) + BDRV_SECTOR_SIZE - 1) / 147 BDRV_SECTOR_SIZE; 148 size_t len = nsectors * BDRV_SECTOR_SIZE; 149 QEDWriteHeaderCB *write_header_cb = gencb_alloc(sizeof(*write_header_cb), 150 cb, opaque); 151 152 write_header_cb->s = s; 153 write_header_cb->nsectors = nsectors; 154 write_header_cb->buf = qemu_blockalign(s->bs, len); 155 write_header_cb->iov.iov_base = write_header_cb->buf; 156 write_header_cb->iov.iov_len = len; 157 qemu_iovec_init_external(&write_header_cb->qiov, &write_header_cb->iov, 1); 158 159 bdrv_aio_readv(s->bs->file->bs, 0, &write_header_cb->qiov, nsectors, 160 qed_write_header_read_cb, write_header_cb); 161 } 162 163 static uint64_t qed_max_image_size(uint32_t cluster_size, uint32_t table_size) 164 { 165 uint64_t table_entries; 166 uint64_t l2_size; 167 168 table_entries = (table_size * cluster_size) / sizeof(uint64_t); 169 l2_size = table_entries * cluster_size; 170 171 return l2_size * table_entries; 172 } 173 174 static bool qed_is_cluster_size_valid(uint32_t cluster_size) 175 { 176 if (cluster_size < QED_MIN_CLUSTER_SIZE || 177 cluster_size > QED_MAX_CLUSTER_SIZE) { 178 return false; 179 } 180 if (cluster_size & (cluster_size - 1)) { 181 return false; /* not power of 2 */ 182 } 183 return true; 184 } 185 186 static bool qed_is_table_size_valid(uint32_t table_size) 187 { 188 if (table_size < QED_MIN_TABLE_SIZE || 189 table_size > QED_MAX_TABLE_SIZE) { 190 return false; 191 } 192 if (table_size & (table_size - 1)) { 193 return false; /* not power of 2 */ 194 } 195 return true; 196 } 197 198 static bool qed_is_image_size_valid(uint64_t image_size, uint32_t cluster_size, 199 uint32_t table_size) 200 { 201 if (image_size % BDRV_SECTOR_SIZE != 0) { 202 return false; /* not multiple of sector size */ 203 } 204 if (image_size > qed_max_image_size(cluster_size, table_size)) { 205 return false; /* image is too large */ 206 } 207 return true; 208 } 209 210 /** 211 * Read a string of known length from the image file 212 * 213 * @file: Image file 214 * @offset: File offset to start of string, in bytes 215 * @n: String length in bytes 216 * @buf: Destination buffer 217 * @buflen: Destination buffer length in bytes 218 * @ret: 0 on success, -errno on failure 219 * 220 * The string is NUL-terminated. 221 */ 222 static int qed_read_string(BlockDriverState *file, uint64_t offset, size_t n, 223 char *buf, size_t buflen) 224 { 225 int ret; 226 if (n >= buflen) { 227 return -EINVAL; 228 } 229 ret = bdrv_pread(file, offset, buf, n); 230 if (ret < 0) { 231 return ret; 232 } 233 buf[n] = '\0'; 234 return 0; 235 } 236 237 /** 238 * Allocate new clusters 239 * 240 * @s: QED state 241 * @n: Number of contiguous clusters to allocate 242 * @ret: Offset of first allocated cluster 243 * 244 * This function only produces the offset where the new clusters should be 245 * written. It updates BDRVQEDState but does not make any changes to the image 246 * file. 247 */ 248 static uint64_t qed_alloc_clusters(BDRVQEDState *s, unsigned int n) 249 { 250 uint64_t offset = s->file_size; 251 s->file_size += n * s->header.cluster_size; 252 return offset; 253 } 254 255 QEDTable *qed_alloc_table(BDRVQEDState *s) 256 { 257 /* Honor O_DIRECT memory alignment requirements */ 258 return qemu_blockalign(s->bs, 259 s->header.cluster_size * s->header.table_size); 260 } 261 262 /** 263 * Allocate a new zeroed L2 table 264 */ 265 static CachedL2Table *qed_new_l2_table(BDRVQEDState *s) 266 { 267 CachedL2Table *l2_table = qed_alloc_l2_cache_entry(&s->l2_cache); 268 269 l2_table->table = qed_alloc_table(s); 270 l2_table->offset = qed_alloc_clusters(s, s->header.table_size); 271 272 memset(l2_table->table->offsets, 0, 273 s->header.cluster_size * s->header.table_size); 274 return l2_table; 275 } 276 277 static void qed_aio_next_io(void *opaque, int ret); 278 279 static void qed_plug_allocating_write_reqs(BDRVQEDState *s) 280 { 281 assert(!s->allocating_write_reqs_plugged); 282 283 s->allocating_write_reqs_plugged = true; 284 } 285 286 static void qed_unplug_allocating_write_reqs(BDRVQEDState *s) 287 { 288 QEDAIOCB *acb; 289 290 assert(s->allocating_write_reqs_plugged); 291 292 s->allocating_write_reqs_plugged = false; 293 294 acb = QSIMPLEQ_FIRST(&s->allocating_write_reqs); 295 if (acb) { 296 qed_aio_next_io(acb, 0); 297 } 298 } 299 300 static void qed_finish_clear_need_check(void *opaque, int ret) 301 { 302 /* Do nothing */ 303 } 304 305 static void qed_flush_after_clear_need_check(void *opaque, int ret) 306 { 307 BDRVQEDState *s = opaque; 308 309 bdrv_aio_flush(s->bs, qed_finish_clear_need_check, s); 310 311 /* No need to wait until flush completes */ 312 qed_unplug_allocating_write_reqs(s); 313 } 314 315 static void qed_clear_need_check(void *opaque, int ret) 316 { 317 BDRVQEDState *s = opaque; 318 319 if (ret) { 320 qed_unplug_allocating_write_reqs(s); 321 return; 322 } 323 324 s->header.features &= ~QED_F_NEED_CHECK; 325 qed_write_header(s, qed_flush_after_clear_need_check, s); 326 } 327 328 static void qed_need_check_timer_cb(void *opaque) 329 { 330 BDRVQEDState *s = opaque; 331 332 /* The timer should only fire when allocating writes have drained */ 333 assert(!QSIMPLEQ_FIRST(&s->allocating_write_reqs)); 334 335 trace_qed_need_check_timer_cb(s); 336 337 qed_plug_allocating_write_reqs(s); 338 339 /* Ensure writes are on disk before clearing flag */ 340 bdrv_aio_flush(s->bs, qed_clear_need_check, s); 341 } 342 343 static void qed_start_need_check_timer(BDRVQEDState *s) 344 { 345 trace_qed_start_need_check_timer(s); 346 347 /* Use QEMU_CLOCK_VIRTUAL so we don't alter the image file while suspended for 348 * migration. 349 */ 350 timer_mod(s->need_check_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 351 NANOSECONDS_PER_SECOND * QED_NEED_CHECK_TIMEOUT); 352 } 353 354 /* It's okay to call this multiple times or when no timer is started */ 355 static void qed_cancel_need_check_timer(BDRVQEDState *s) 356 { 357 trace_qed_cancel_need_check_timer(s); 358 timer_del(s->need_check_timer); 359 } 360 361 static void bdrv_qed_detach_aio_context(BlockDriverState *bs) 362 { 363 BDRVQEDState *s = bs->opaque; 364 365 qed_cancel_need_check_timer(s); 366 timer_free(s->need_check_timer); 367 } 368 369 static void bdrv_qed_attach_aio_context(BlockDriverState *bs, 370 AioContext *new_context) 371 { 372 BDRVQEDState *s = bs->opaque; 373 374 s->need_check_timer = aio_timer_new(new_context, 375 QEMU_CLOCK_VIRTUAL, SCALE_NS, 376 qed_need_check_timer_cb, s); 377 if (s->header.features & QED_F_NEED_CHECK) { 378 qed_start_need_check_timer(s); 379 } 380 } 381 382 static int bdrv_qed_open(BlockDriverState *bs, QDict *options, int flags, 383 Error **errp) 384 { 385 BDRVQEDState *s = bs->opaque; 386 QEDHeader le_header; 387 int64_t file_size; 388 int ret; 389 390 s->bs = bs; 391 QSIMPLEQ_INIT(&s->allocating_write_reqs); 392 393 ret = bdrv_pread(bs->file->bs, 0, &le_header, sizeof(le_header)); 394 if (ret < 0) { 395 return ret; 396 } 397 qed_header_le_to_cpu(&le_header, &s->header); 398 399 if (s->header.magic != QED_MAGIC) { 400 error_setg(errp, "Image not in QED format"); 401 return -EINVAL; 402 } 403 if (s->header.features & ~QED_FEATURE_MASK) { 404 /* image uses unsupported feature bits */ 405 error_setg(errp, "Unsupported QED features: %" PRIx64, 406 s->header.features & ~QED_FEATURE_MASK); 407 return -ENOTSUP; 408 } 409 if (!qed_is_cluster_size_valid(s->header.cluster_size)) { 410 return -EINVAL; 411 } 412 413 /* Round down file size to the last cluster */ 414 file_size = bdrv_getlength(bs->file->bs); 415 if (file_size < 0) { 416 return file_size; 417 } 418 s->file_size = qed_start_of_cluster(s, file_size); 419 420 if (!qed_is_table_size_valid(s->header.table_size)) { 421 return -EINVAL; 422 } 423 if (!qed_is_image_size_valid(s->header.image_size, 424 s->header.cluster_size, 425 s->header.table_size)) { 426 return -EINVAL; 427 } 428 if (!qed_check_table_offset(s, s->header.l1_table_offset)) { 429 return -EINVAL; 430 } 431 432 s->table_nelems = (s->header.cluster_size * s->header.table_size) / 433 sizeof(uint64_t); 434 s->l2_shift = ctz32(s->header.cluster_size); 435 s->l2_mask = s->table_nelems - 1; 436 s->l1_shift = s->l2_shift + ctz32(s->table_nelems); 437 438 /* Header size calculation must not overflow uint32_t */ 439 if (s->header.header_size > UINT32_MAX / s->header.cluster_size) { 440 return -EINVAL; 441 } 442 443 if ((s->header.features & QED_F_BACKING_FILE)) { 444 if ((uint64_t)s->header.backing_filename_offset + 445 s->header.backing_filename_size > 446 s->header.cluster_size * s->header.header_size) { 447 return -EINVAL; 448 } 449 450 ret = qed_read_string(bs->file->bs, s->header.backing_filename_offset, 451 s->header.backing_filename_size, bs->backing_file, 452 sizeof(bs->backing_file)); 453 if (ret < 0) { 454 return ret; 455 } 456 457 if (s->header.features & QED_F_BACKING_FORMAT_NO_PROBE) { 458 pstrcpy(bs->backing_format, sizeof(bs->backing_format), "raw"); 459 } 460 } 461 462 /* Reset unknown autoclear feature bits. This is a backwards 463 * compatibility mechanism that allows images to be opened by older 464 * programs, which "knock out" unknown feature bits. When an image is 465 * opened by a newer program again it can detect that the autoclear 466 * feature is no longer valid. 467 */ 468 if ((s->header.autoclear_features & ~QED_AUTOCLEAR_FEATURE_MASK) != 0 && 469 !bdrv_is_read_only(bs->file->bs) && !(flags & BDRV_O_INACTIVE)) { 470 s->header.autoclear_features &= QED_AUTOCLEAR_FEATURE_MASK; 471 472 ret = qed_write_header_sync(s); 473 if (ret) { 474 return ret; 475 } 476 477 /* From here on only known autoclear feature bits are valid */ 478 bdrv_flush(bs->file->bs); 479 } 480 481 s->l1_table = qed_alloc_table(s); 482 qed_init_l2_cache(&s->l2_cache); 483 484 ret = qed_read_l1_table_sync(s); 485 if (ret) { 486 goto out; 487 } 488 489 /* If image was not closed cleanly, check consistency */ 490 if (!(flags & BDRV_O_CHECK) && (s->header.features & QED_F_NEED_CHECK)) { 491 /* Read-only images cannot be fixed. There is no risk of corruption 492 * since write operations are not possible. Therefore, allow 493 * potentially inconsistent images to be opened read-only. This can 494 * aid data recovery from an otherwise inconsistent image. 495 */ 496 if (!bdrv_is_read_only(bs->file->bs) && 497 !(flags & BDRV_O_INACTIVE)) { 498 BdrvCheckResult result = {0}; 499 500 ret = qed_check(s, &result, true); 501 if (ret) { 502 goto out; 503 } 504 } 505 } 506 507 bdrv_qed_attach_aio_context(bs, bdrv_get_aio_context(bs)); 508 509 out: 510 if (ret) { 511 qed_free_l2_cache(&s->l2_cache); 512 qemu_vfree(s->l1_table); 513 } 514 return ret; 515 } 516 517 static void bdrv_qed_refresh_limits(BlockDriverState *bs, Error **errp) 518 { 519 BDRVQEDState *s = bs->opaque; 520 521 bs->bl.write_zeroes_alignment = s->header.cluster_size >> BDRV_SECTOR_BITS; 522 } 523 524 /* We have nothing to do for QED reopen, stubs just return 525 * success */ 526 static int bdrv_qed_reopen_prepare(BDRVReopenState *state, 527 BlockReopenQueue *queue, Error **errp) 528 { 529 return 0; 530 } 531 532 static void bdrv_qed_close(BlockDriverState *bs) 533 { 534 BDRVQEDState *s = bs->opaque; 535 536 bdrv_qed_detach_aio_context(bs); 537 538 /* Ensure writes reach stable storage */ 539 bdrv_flush(bs->file->bs); 540 541 /* Clean shutdown, no check required on next open */ 542 if (s->header.features & QED_F_NEED_CHECK) { 543 s->header.features &= ~QED_F_NEED_CHECK; 544 qed_write_header_sync(s); 545 } 546 547 qed_free_l2_cache(&s->l2_cache); 548 qemu_vfree(s->l1_table); 549 } 550 551 static int qed_create(const char *filename, uint32_t cluster_size, 552 uint64_t image_size, uint32_t table_size, 553 const char *backing_file, const char *backing_fmt, 554 QemuOpts *opts, Error **errp) 555 { 556 QEDHeader header = { 557 .magic = QED_MAGIC, 558 .cluster_size = cluster_size, 559 .table_size = table_size, 560 .header_size = 1, 561 .features = 0, 562 .compat_features = 0, 563 .l1_table_offset = cluster_size, 564 .image_size = image_size, 565 }; 566 QEDHeader le_header; 567 uint8_t *l1_table = NULL; 568 size_t l1_size = header.cluster_size * header.table_size; 569 Error *local_err = NULL; 570 int ret = 0; 571 BlockBackend *blk; 572 573 ret = bdrv_create_file(filename, opts, &local_err); 574 if (ret < 0) { 575 error_propagate(errp, local_err); 576 return ret; 577 } 578 579 blk = blk_new_open(filename, NULL, NULL, 580 BDRV_O_RDWR | BDRV_O_PROTOCOL, &local_err); 581 if (blk == NULL) { 582 error_propagate(errp, local_err); 583 return -EIO; 584 } 585 586 blk_set_allow_write_beyond_eof(blk, true); 587 588 /* File must start empty and grow, check truncate is supported */ 589 ret = blk_truncate(blk, 0); 590 if (ret < 0) { 591 goto out; 592 } 593 594 if (backing_file) { 595 header.features |= QED_F_BACKING_FILE; 596 header.backing_filename_offset = sizeof(le_header); 597 header.backing_filename_size = strlen(backing_file); 598 599 if (qed_fmt_is_raw(backing_fmt)) { 600 header.features |= QED_F_BACKING_FORMAT_NO_PROBE; 601 } 602 } 603 604 qed_header_cpu_to_le(&header, &le_header); 605 ret = blk_pwrite(blk, 0, &le_header, sizeof(le_header), 0); 606 if (ret < 0) { 607 goto out; 608 } 609 ret = blk_pwrite(blk, sizeof(le_header), backing_file, 610 header.backing_filename_size, 0); 611 if (ret < 0) { 612 goto out; 613 } 614 615 l1_table = g_malloc0(l1_size); 616 ret = blk_pwrite(blk, header.l1_table_offset, l1_table, l1_size, 0); 617 if (ret < 0) { 618 goto out; 619 } 620 621 ret = 0; /* success */ 622 out: 623 g_free(l1_table); 624 blk_unref(blk); 625 return ret; 626 } 627 628 static int bdrv_qed_create(const char *filename, QemuOpts *opts, Error **errp) 629 { 630 uint64_t image_size = 0; 631 uint32_t cluster_size = QED_DEFAULT_CLUSTER_SIZE; 632 uint32_t table_size = QED_DEFAULT_TABLE_SIZE; 633 char *backing_file = NULL; 634 char *backing_fmt = NULL; 635 int ret; 636 637 image_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0), 638 BDRV_SECTOR_SIZE); 639 backing_file = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FILE); 640 backing_fmt = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FMT); 641 cluster_size = qemu_opt_get_size_del(opts, 642 BLOCK_OPT_CLUSTER_SIZE, 643 QED_DEFAULT_CLUSTER_SIZE); 644 table_size = qemu_opt_get_size_del(opts, BLOCK_OPT_TABLE_SIZE, 645 QED_DEFAULT_TABLE_SIZE); 646 647 if (!qed_is_cluster_size_valid(cluster_size)) { 648 error_setg(errp, "QED cluster size must be within range [%u, %u] " 649 "and power of 2", 650 QED_MIN_CLUSTER_SIZE, QED_MAX_CLUSTER_SIZE); 651 ret = -EINVAL; 652 goto finish; 653 } 654 if (!qed_is_table_size_valid(table_size)) { 655 error_setg(errp, "QED table size must be within range [%u, %u] " 656 "and power of 2", 657 QED_MIN_TABLE_SIZE, QED_MAX_TABLE_SIZE); 658 ret = -EINVAL; 659 goto finish; 660 } 661 if (!qed_is_image_size_valid(image_size, cluster_size, table_size)) { 662 error_setg(errp, "QED image size must be a non-zero multiple of " 663 "cluster size and less than %" PRIu64 " bytes", 664 qed_max_image_size(cluster_size, table_size)); 665 ret = -EINVAL; 666 goto finish; 667 } 668 669 ret = qed_create(filename, cluster_size, image_size, table_size, 670 backing_file, backing_fmt, opts, errp); 671 672 finish: 673 g_free(backing_file); 674 g_free(backing_fmt); 675 return ret; 676 } 677 678 typedef struct { 679 BlockDriverState *bs; 680 Coroutine *co; 681 uint64_t pos; 682 int64_t status; 683 int *pnum; 684 BlockDriverState **file; 685 } QEDIsAllocatedCB; 686 687 static void qed_is_allocated_cb(void *opaque, int ret, uint64_t offset, size_t len) 688 { 689 QEDIsAllocatedCB *cb = opaque; 690 BDRVQEDState *s = cb->bs->opaque; 691 *cb->pnum = len / BDRV_SECTOR_SIZE; 692 switch (ret) { 693 case QED_CLUSTER_FOUND: 694 offset |= qed_offset_into_cluster(s, cb->pos); 695 cb->status = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID | offset; 696 *cb->file = cb->bs->file->bs; 697 break; 698 case QED_CLUSTER_ZERO: 699 cb->status = BDRV_BLOCK_ZERO; 700 break; 701 case QED_CLUSTER_L2: 702 case QED_CLUSTER_L1: 703 cb->status = 0; 704 break; 705 default: 706 assert(ret < 0); 707 cb->status = ret; 708 break; 709 } 710 711 if (cb->co) { 712 qemu_coroutine_enter(cb->co, NULL); 713 } 714 } 715 716 static int64_t coroutine_fn bdrv_qed_co_get_block_status(BlockDriverState *bs, 717 int64_t sector_num, 718 int nb_sectors, int *pnum, 719 BlockDriverState **file) 720 { 721 BDRVQEDState *s = bs->opaque; 722 size_t len = (size_t)nb_sectors * BDRV_SECTOR_SIZE; 723 QEDIsAllocatedCB cb = { 724 .bs = bs, 725 .pos = (uint64_t)sector_num * BDRV_SECTOR_SIZE, 726 .status = BDRV_BLOCK_OFFSET_MASK, 727 .pnum = pnum, 728 .file = file, 729 }; 730 QEDRequest request = { .l2_table = NULL }; 731 732 qed_find_cluster(s, &request, cb.pos, len, qed_is_allocated_cb, &cb); 733 734 /* Now sleep if the callback wasn't invoked immediately */ 735 while (cb.status == BDRV_BLOCK_OFFSET_MASK) { 736 cb.co = qemu_coroutine_self(); 737 qemu_coroutine_yield(); 738 } 739 740 qed_unref_l2_cache_entry(request.l2_table); 741 742 return cb.status; 743 } 744 745 static BDRVQEDState *acb_to_s(QEDAIOCB *acb) 746 { 747 return acb->common.bs->opaque; 748 } 749 750 /** 751 * Read from the backing file or zero-fill if no backing file 752 * 753 * @s: QED state 754 * @pos: Byte position in device 755 * @qiov: Destination I/O vector 756 * @backing_qiov: Possibly shortened copy of qiov, to be allocated here 757 * @cb: Completion function 758 * @opaque: User data for completion function 759 * 760 * This function reads qiov->size bytes starting at pos from the backing file. 761 * If there is no backing file then zeroes are read. 762 */ 763 static void qed_read_backing_file(BDRVQEDState *s, uint64_t pos, 764 QEMUIOVector *qiov, 765 QEMUIOVector **backing_qiov, 766 BlockCompletionFunc *cb, void *opaque) 767 { 768 uint64_t backing_length = 0; 769 size_t size; 770 771 /* If there is a backing file, get its length. Treat the absence of a 772 * backing file like a zero length backing file. 773 */ 774 if (s->bs->backing) { 775 int64_t l = bdrv_getlength(s->bs->backing->bs); 776 if (l < 0) { 777 cb(opaque, l); 778 return; 779 } 780 backing_length = l; 781 } 782 783 /* Zero all sectors if reading beyond the end of the backing file */ 784 if (pos >= backing_length || 785 pos + qiov->size > backing_length) { 786 qemu_iovec_memset(qiov, 0, 0, qiov->size); 787 } 788 789 /* Complete now if there are no backing file sectors to read */ 790 if (pos >= backing_length) { 791 cb(opaque, 0); 792 return; 793 } 794 795 /* If the read straddles the end of the backing file, shorten it */ 796 size = MIN((uint64_t)backing_length - pos, qiov->size); 797 798 assert(*backing_qiov == NULL); 799 *backing_qiov = g_new(QEMUIOVector, 1); 800 qemu_iovec_init(*backing_qiov, qiov->niov); 801 qemu_iovec_concat(*backing_qiov, qiov, 0, size); 802 803 BLKDBG_EVENT(s->bs->file, BLKDBG_READ_BACKING_AIO); 804 bdrv_aio_readv(s->bs->backing->bs, pos / BDRV_SECTOR_SIZE, 805 *backing_qiov, size / BDRV_SECTOR_SIZE, cb, opaque); 806 } 807 808 typedef struct { 809 GenericCB gencb; 810 BDRVQEDState *s; 811 QEMUIOVector qiov; 812 QEMUIOVector *backing_qiov; 813 struct iovec iov; 814 uint64_t offset; 815 } CopyFromBackingFileCB; 816 817 static void qed_copy_from_backing_file_cb(void *opaque, int ret) 818 { 819 CopyFromBackingFileCB *copy_cb = opaque; 820 qemu_vfree(copy_cb->iov.iov_base); 821 gencb_complete(©_cb->gencb, ret); 822 } 823 824 static void qed_copy_from_backing_file_write(void *opaque, int ret) 825 { 826 CopyFromBackingFileCB *copy_cb = opaque; 827 BDRVQEDState *s = copy_cb->s; 828 829 if (copy_cb->backing_qiov) { 830 qemu_iovec_destroy(copy_cb->backing_qiov); 831 g_free(copy_cb->backing_qiov); 832 copy_cb->backing_qiov = NULL; 833 } 834 835 if (ret) { 836 qed_copy_from_backing_file_cb(copy_cb, ret); 837 return; 838 } 839 840 BLKDBG_EVENT(s->bs->file, BLKDBG_COW_WRITE); 841 bdrv_aio_writev(s->bs->file->bs, copy_cb->offset / BDRV_SECTOR_SIZE, 842 ©_cb->qiov, copy_cb->qiov.size / BDRV_SECTOR_SIZE, 843 qed_copy_from_backing_file_cb, copy_cb); 844 } 845 846 /** 847 * Copy data from backing file into the image 848 * 849 * @s: QED state 850 * @pos: Byte position in device 851 * @len: Number of bytes 852 * @offset: Byte offset in image file 853 * @cb: Completion function 854 * @opaque: User data for completion function 855 */ 856 static void qed_copy_from_backing_file(BDRVQEDState *s, uint64_t pos, 857 uint64_t len, uint64_t offset, 858 BlockCompletionFunc *cb, 859 void *opaque) 860 { 861 CopyFromBackingFileCB *copy_cb; 862 863 /* Skip copy entirely if there is no work to do */ 864 if (len == 0) { 865 cb(opaque, 0); 866 return; 867 } 868 869 copy_cb = gencb_alloc(sizeof(*copy_cb), cb, opaque); 870 copy_cb->s = s; 871 copy_cb->offset = offset; 872 copy_cb->backing_qiov = NULL; 873 copy_cb->iov.iov_base = qemu_blockalign(s->bs, len); 874 copy_cb->iov.iov_len = len; 875 qemu_iovec_init_external(©_cb->qiov, ©_cb->iov, 1); 876 877 qed_read_backing_file(s, pos, ©_cb->qiov, ©_cb->backing_qiov, 878 qed_copy_from_backing_file_write, copy_cb); 879 } 880 881 /** 882 * Link one or more contiguous clusters into a table 883 * 884 * @s: QED state 885 * @table: L2 table 886 * @index: First cluster index 887 * @n: Number of contiguous clusters 888 * @cluster: First cluster offset 889 * 890 * The cluster offset may be an allocated byte offset in the image file, the 891 * zero cluster marker, or the unallocated cluster marker. 892 */ 893 static void qed_update_l2_table(BDRVQEDState *s, QEDTable *table, int index, 894 unsigned int n, uint64_t cluster) 895 { 896 int i; 897 for (i = index; i < index + n; i++) { 898 table->offsets[i] = cluster; 899 if (!qed_offset_is_unalloc_cluster(cluster) && 900 !qed_offset_is_zero_cluster(cluster)) { 901 cluster += s->header.cluster_size; 902 } 903 } 904 } 905 906 static void qed_aio_complete_bh(void *opaque) 907 { 908 QEDAIOCB *acb = opaque; 909 BlockCompletionFunc *cb = acb->common.cb; 910 void *user_opaque = acb->common.opaque; 911 int ret = acb->bh_ret; 912 913 qemu_bh_delete(acb->bh); 914 qemu_aio_unref(acb); 915 916 /* Invoke callback */ 917 cb(user_opaque, ret); 918 } 919 920 static void qed_aio_complete(QEDAIOCB *acb, int ret) 921 { 922 BDRVQEDState *s = acb_to_s(acb); 923 924 trace_qed_aio_complete(s, acb, ret); 925 926 /* Free resources */ 927 qemu_iovec_destroy(&acb->cur_qiov); 928 qed_unref_l2_cache_entry(acb->request.l2_table); 929 930 /* Free the buffer we may have allocated for zero writes */ 931 if (acb->flags & QED_AIOCB_ZERO) { 932 qemu_vfree(acb->qiov->iov[0].iov_base); 933 acb->qiov->iov[0].iov_base = NULL; 934 } 935 936 /* Arrange for a bh to invoke the completion function */ 937 acb->bh_ret = ret; 938 acb->bh = aio_bh_new(bdrv_get_aio_context(acb->common.bs), 939 qed_aio_complete_bh, acb); 940 qemu_bh_schedule(acb->bh); 941 942 /* Start next allocating write request waiting behind this one. Note that 943 * requests enqueue themselves when they first hit an unallocated cluster 944 * but they wait until the entire request is finished before waking up the 945 * next request in the queue. This ensures that we don't cycle through 946 * requests multiple times but rather finish one at a time completely. 947 */ 948 if (acb == QSIMPLEQ_FIRST(&s->allocating_write_reqs)) { 949 QSIMPLEQ_REMOVE_HEAD(&s->allocating_write_reqs, next); 950 acb = QSIMPLEQ_FIRST(&s->allocating_write_reqs); 951 if (acb) { 952 qed_aio_next_io(acb, 0); 953 } else if (s->header.features & QED_F_NEED_CHECK) { 954 qed_start_need_check_timer(s); 955 } 956 } 957 } 958 959 /** 960 * Commit the current L2 table to the cache 961 */ 962 static void qed_commit_l2_update(void *opaque, int ret) 963 { 964 QEDAIOCB *acb = opaque; 965 BDRVQEDState *s = acb_to_s(acb); 966 CachedL2Table *l2_table = acb->request.l2_table; 967 uint64_t l2_offset = l2_table->offset; 968 969 qed_commit_l2_cache_entry(&s->l2_cache, l2_table); 970 971 /* This is guaranteed to succeed because we just committed the entry to the 972 * cache. 973 */ 974 acb->request.l2_table = qed_find_l2_cache_entry(&s->l2_cache, l2_offset); 975 assert(acb->request.l2_table != NULL); 976 977 qed_aio_next_io(opaque, ret); 978 } 979 980 /** 981 * Update L1 table with new L2 table offset and write it out 982 */ 983 static void qed_aio_write_l1_update(void *opaque, int ret) 984 { 985 QEDAIOCB *acb = opaque; 986 BDRVQEDState *s = acb_to_s(acb); 987 int index; 988 989 if (ret) { 990 qed_aio_complete(acb, ret); 991 return; 992 } 993 994 index = qed_l1_index(s, acb->cur_pos); 995 s->l1_table->offsets[index] = acb->request.l2_table->offset; 996 997 qed_write_l1_table(s, index, 1, qed_commit_l2_update, acb); 998 } 999 1000 /** 1001 * Update L2 table with new cluster offsets and write them out 1002 */ 1003 static void qed_aio_write_l2_update(QEDAIOCB *acb, int ret, uint64_t offset) 1004 { 1005 BDRVQEDState *s = acb_to_s(acb); 1006 bool need_alloc = acb->find_cluster_ret == QED_CLUSTER_L1; 1007 int index; 1008 1009 if (ret) { 1010 goto err; 1011 } 1012 1013 if (need_alloc) { 1014 qed_unref_l2_cache_entry(acb->request.l2_table); 1015 acb->request.l2_table = qed_new_l2_table(s); 1016 } 1017 1018 index = qed_l2_index(s, acb->cur_pos); 1019 qed_update_l2_table(s, acb->request.l2_table->table, index, acb->cur_nclusters, 1020 offset); 1021 1022 if (need_alloc) { 1023 /* Write out the whole new L2 table */ 1024 qed_write_l2_table(s, &acb->request, 0, s->table_nelems, true, 1025 qed_aio_write_l1_update, acb); 1026 } else { 1027 /* Write out only the updated part of the L2 table */ 1028 qed_write_l2_table(s, &acb->request, index, acb->cur_nclusters, false, 1029 qed_aio_next_io, acb); 1030 } 1031 return; 1032 1033 err: 1034 qed_aio_complete(acb, ret); 1035 } 1036 1037 static void qed_aio_write_l2_update_cb(void *opaque, int ret) 1038 { 1039 QEDAIOCB *acb = opaque; 1040 qed_aio_write_l2_update(acb, ret, acb->cur_cluster); 1041 } 1042 1043 /** 1044 * Flush new data clusters before updating the L2 table 1045 * 1046 * This flush is necessary when a backing file is in use. A crash during an 1047 * allocating write could result in empty clusters in the image. If the write 1048 * only touched a subregion of the cluster, then backing image sectors have 1049 * been lost in the untouched region. The solution is to flush after writing a 1050 * new data cluster and before updating the L2 table. 1051 */ 1052 static void qed_aio_write_flush_before_l2_update(void *opaque, int ret) 1053 { 1054 QEDAIOCB *acb = opaque; 1055 BDRVQEDState *s = acb_to_s(acb); 1056 1057 if (!bdrv_aio_flush(s->bs->file->bs, qed_aio_write_l2_update_cb, opaque)) { 1058 qed_aio_complete(acb, -EIO); 1059 } 1060 } 1061 1062 /** 1063 * Write data to the image file 1064 */ 1065 static void qed_aio_write_main(void *opaque, int ret) 1066 { 1067 QEDAIOCB *acb = opaque; 1068 BDRVQEDState *s = acb_to_s(acb); 1069 uint64_t offset = acb->cur_cluster + 1070 qed_offset_into_cluster(s, acb->cur_pos); 1071 BlockCompletionFunc *next_fn; 1072 1073 trace_qed_aio_write_main(s, acb, ret, offset, acb->cur_qiov.size); 1074 1075 if (ret) { 1076 qed_aio_complete(acb, ret); 1077 return; 1078 } 1079 1080 if (acb->find_cluster_ret == QED_CLUSTER_FOUND) { 1081 next_fn = qed_aio_next_io; 1082 } else { 1083 if (s->bs->backing) { 1084 next_fn = qed_aio_write_flush_before_l2_update; 1085 } else { 1086 next_fn = qed_aio_write_l2_update_cb; 1087 } 1088 } 1089 1090 BLKDBG_EVENT(s->bs->file, BLKDBG_WRITE_AIO); 1091 bdrv_aio_writev(s->bs->file->bs, offset / BDRV_SECTOR_SIZE, 1092 &acb->cur_qiov, acb->cur_qiov.size / BDRV_SECTOR_SIZE, 1093 next_fn, acb); 1094 } 1095 1096 /** 1097 * Populate back untouched region of new data cluster 1098 */ 1099 static void qed_aio_write_postfill(void *opaque, int ret) 1100 { 1101 QEDAIOCB *acb = opaque; 1102 BDRVQEDState *s = acb_to_s(acb); 1103 uint64_t start = acb->cur_pos + acb->cur_qiov.size; 1104 uint64_t len = 1105 qed_start_of_cluster(s, start + s->header.cluster_size - 1) - start; 1106 uint64_t offset = acb->cur_cluster + 1107 qed_offset_into_cluster(s, acb->cur_pos) + 1108 acb->cur_qiov.size; 1109 1110 if (ret) { 1111 qed_aio_complete(acb, ret); 1112 return; 1113 } 1114 1115 trace_qed_aio_write_postfill(s, acb, start, len, offset); 1116 qed_copy_from_backing_file(s, start, len, offset, 1117 qed_aio_write_main, acb); 1118 } 1119 1120 /** 1121 * Populate front untouched region of new data cluster 1122 */ 1123 static void qed_aio_write_prefill(void *opaque, int ret) 1124 { 1125 QEDAIOCB *acb = opaque; 1126 BDRVQEDState *s = acb_to_s(acb); 1127 uint64_t start = qed_start_of_cluster(s, acb->cur_pos); 1128 uint64_t len = qed_offset_into_cluster(s, acb->cur_pos); 1129 1130 trace_qed_aio_write_prefill(s, acb, start, len, acb->cur_cluster); 1131 qed_copy_from_backing_file(s, start, len, acb->cur_cluster, 1132 qed_aio_write_postfill, acb); 1133 } 1134 1135 /** 1136 * Check if the QED_F_NEED_CHECK bit should be set during allocating write 1137 */ 1138 static bool qed_should_set_need_check(BDRVQEDState *s) 1139 { 1140 /* The flush before L2 update path ensures consistency */ 1141 if (s->bs->backing) { 1142 return false; 1143 } 1144 1145 return !(s->header.features & QED_F_NEED_CHECK); 1146 } 1147 1148 static void qed_aio_write_zero_cluster(void *opaque, int ret) 1149 { 1150 QEDAIOCB *acb = opaque; 1151 1152 if (ret) { 1153 qed_aio_complete(acb, ret); 1154 return; 1155 } 1156 1157 qed_aio_write_l2_update(acb, 0, 1); 1158 } 1159 1160 /** 1161 * Write new data cluster 1162 * 1163 * @acb: Write request 1164 * @len: Length in bytes 1165 * 1166 * This path is taken when writing to previously unallocated clusters. 1167 */ 1168 static void qed_aio_write_alloc(QEDAIOCB *acb, size_t len) 1169 { 1170 BDRVQEDState *s = acb_to_s(acb); 1171 BlockCompletionFunc *cb; 1172 1173 /* Cancel timer when the first allocating request comes in */ 1174 if (QSIMPLEQ_EMPTY(&s->allocating_write_reqs)) { 1175 qed_cancel_need_check_timer(s); 1176 } 1177 1178 /* Freeze this request if another allocating write is in progress */ 1179 if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs)) { 1180 QSIMPLEQ_INSERT_TAIL(&s->allocating_write_reqs, acb, next); 1181 } 1182 if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs) || 1183 s->allocating_write_reqs_plugged) { 1184 return; /* wait for existing request to finish */ 1185 } 1186 1187 acb->cur_nclusters = qed_bytes_to_clusters(s, 1188 qed_offset_into_cluster(s, acb->cur_pos) + len); 1189 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len); 1190 1191 if (acb->flags & QED_AIOCB_ZERO) { 1192 /* Skip ahead if the clusters are already zero */ 1193 if (acb->find_cluster_ret == QED_CLUSTER_ZERO) { 1194 qed_aio_next_io(acb, 0); 1195 return; 1196 } 1197 1198 cb = qed_aio_write_zero_cluster; 1199 } else { 1200 cb = qed_aio_write_prefill; 1201 acb->cur_cluster = qed_alloc_clusters(s, acb->cur_nclusters); 1202 } 1203 1204 if (qed_should_set_need_check(s)) { 1205 s->header.features |= QED_F_NEED_CHECK; 1206 qed_write_header(s, cb, acb); 1207 } else { 1208 cb(acb, 0); 1209 } 1210 } 1211 1212 /** 1213 * Write data cluster in place 1214 * 1215 * @acb: Write request 1216 * @offset: Cluster offset in bytes 1217 * @len: Length in bytes 1218 * 1219 * This path is taken when writing to already allocated clusters. 1220 */ 1221 static void qed_aio_write_inplace(QEDAIOCB *acb, uint64_t offset, size_t len) 1222 { 1223 /* Allocate buffer for zero writes */ 1224 if (acb->flags & QED_AIOCB_ZERO) { 1225 struct iovec *iov = acb->qiov->iov; 1226 1227 if (!iov->iov_base) { 1228 iov->iov_base = qemu_try_blockalign(acb->common.bs, iov->iov_len); 1229 if (iov->iov_base == NULL) { 1230 qed_aio_complete(acb, -ENOMEM); 1231 return; 1232 } 1233 memset(iov->iov_base, 0, iov->iov_len); 1234 } 1235 } 1236 1237 /* Calculate the I/O vector */ 1238 acb->cur_cluster = offset; 1239 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len); 1240 1241 /* Do the actual write */ 1242 qed_aio_write_main(acb, 0); 1243 } 1244 1245 /** 1246 * Write data cluster 1247 * 1248 * @opaque: Write request 1249 * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2, QED_CLUSTER_L1, 1250 * or -errno 1251 * @offset: Cluster offset in bytes 1252 * @len: Length in bytes 1253 * 1254 * Callback from qed_find_cluster(). 1255 */ 1256 static void qed_aio_write_data(void *opaque, int ret, 1257 uint64_t offset, size_t len) 1258 { 1259 QEDAIOCB *acb = opaque; 1260 1261 trace_qed_aio_write_data(acb_to_s(acb), acb, ret, offset, len); 1262 1263 acb->find_cluster_ret = ret; 1264 1265 switch (ret) { 1266 case QED_CLUSTER_FOUND: 1267 qed_aio_write_inplace(acb, offset, len); 1268 break; 1269 1270 case QED_CLUSTER_L2: 1271 case QED_CLUSTER_L1: 1272 case QED_CLUSTER_ZERO: 1273 qed_aio_write_alloc(acb, len); 1274 break; 1275 1276 default: 1277 qed_aio_complete(acb, ret); 1278 break; 1279 } 1280 } 1281 1282 /** 1283 * Read data cluster 1284 * 1285 * @opaque: Read request 1286 * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2, QED_CLUSTER_L1, 1287 * or -errno 1288 * @offset: Cluster offset in bytes 1289 * @len: Length in bytes 1290 * 1291 * Callback from qed_find_cluster(). 1292 */ 1293 static void qed_aio_read_data(void *opaque, int ret, 1294 uint64_t offset, size_t len) 1295 { 1296 QEDAIOCB *acb = opaque; 1297 BDRVQEDState *s = acb_to_s(acb); 1298 BlockDriverState *bs = acb->common.bs; 1299 1300 /* Adjust offset into cluster */ 1301 offset += qed_offset_into_cluster(s, acb->cur_pos); 1302 1303 trace_qed_aio_read_data(s, acb, ret, offset, len); 1304 1305 if (ret < 0) { 1306 goto err; 1307 } 1308 1309 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len); 1310 1311 /* Handle zero cluster and backing file reads */ 1312 if (ret == QED_CLUSTER_ZERO) { 1313 qemu_iovec_memset(&acb->cur_qiov, 0, 0, acb->cur_qiov.size); 1314 qed_aio_next_io(acb, 0); 1315 return; 1316 } else if (ret != QED_CLUSTER_FOUND) { 1317 qed_read_backing_file(s, acb->cur_pos, &acb->cur_qiov, 1318 &acb->backing_qiov, qed_aio_next_io, acb); 1319 return; 1320 } 1321 1322 BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO); 1323 bdrv_aio_readv(bs->file->bs, offset / BDRV_SECTOR_SIZE, 1324 &acb->cur_qiov, acb->cur_qiov.size / BDRV_SECTOR_SIZE, 1325 qed_aio_next_io, acb); 1326 return; 1327 1328 err: 1329 qed_aio_complete(acb, ret); 1330 } 1331 1332 /** 1333 * Begin next I/O or complete the request 1334 */ 1335 static void qed_aio_next_io(void *opaque, int ret) 1336 { 1337 QEDAIOCB *acb = opaque; 1338 BDRVQEDState *s = acb_to_s(acb); 1339 QEDFindClusterFunc *io_fn = (acb->flags & QED_AIOCB_WRITE) ? 1340 qed_aio_write_data : qed_aio_read_data; 1341 1342 trace_qed_aio_next_io(s, acb, ret, acb->cur_pos + acb->cur_qiov.size); 1343 1344 if (acb->backing_qiov) { 1345 qemu_iovec_destroy(acb->backing_qiov); 1346 g_free(acb->backing_qiov); 1347 acb->backing_qiov = NULL; 1348 } 1349 1350 /* Handle I/O error */ 1351 if (ret) { 1352 qed_aio_complete(acb, ret); 1353 return; 1354 } 1355 1356 acb->qiov_offset += acb->cur_qiov.size; 1357 acb->cur_pos += acb->cur_qiov.size; 1358 qemu_iovec_reset(&acb->cur_qiov); 1359 1360 /* Complete request */ 1361 if (acb->cur_pos >= acb->end_pos) { 1362 qed_aio_complete(acb, 0); 1363 return; 1364 } 1365 1366 /* Find next cluster and start I/O */ 1367 qed_find_cluster(s, &acb->request, 1368 acb->cur_pos, acb->end_pos - acb->cur_pos, 1369 io_fn, acb); 1370 } 1371 1372 static BlockAIOCB *qed_aio_setup(BlockDriverState *bs, 1373 int64_t sector_num, 1374 QEMUIOVector *qiov, int nb_sectors, 1375 BlockCompletionFunc *cb, 1376 void *opaque, int flags) 1377 { 1378 QEDAIOCB *acb = qemu_aio_get(&qed_aiocb_info, bs, cb, opaque); 1379 1380 trace_qed_aio_setup(bs->opaque, acb, sector_num, nb_sectors, 1381 opaque, flags); 1382 1383 acb->flags = flags; 1384 acb->qiov = qiov; 1385 acb->qiov_offset = 0; 1386 acb->cur_pos = (uint64_t)sector_num * BDRV_SECTOR_SIZE; 1387 acb->end_pos = acb->cur_pos + nb_sectors * BDRV_SECTOR_SIZE; 1388 acb->backing_qiov = NULL; 1389 acb->request.l2_table = NULL; 1390 qemu_iovec_init(&acb->cur_qiov, qiov->niov); 1391 1392 /* Start request */ 1393 qed_aio_next_io(acb, 0); 1394 return &acb->common; 1395 } 1396 1397 static BlockAIOCB *bdrv_qed_aio_readv(BlockDriverState *bs, 1398 int64_t sector_num, 1399 QEMUIOVector *qiov, int nb_sectors, 1400 BlockCompletionFunc *cb, 1401 void *opaque) 1402 { 1403 return qed_aio_setup(bs, sector_num, qiov, nb_sectors, cb, opaque, 0); 1404 } 1405 1406 static BlockAIOCB *bdrv_qed_aio_writev(BlockDriverState *bs, 1407 int64_t sector_num, 1408 QEMUIOVector *qiov, int nb_sectors, 1409 BlockCompletionFunc *cb, 1410 void *opaque) 1411 { 1412 return qed_aio_setup(bs, sector_num, qiov, nb_sectors, cb, 1413 opaque, QED_AIOCB_WRITE); 1414 } 1415 1416 typedef struct { 1417 Coroutine *co; 1418 int ret; 1419 bool done; 1420 } QEDWriteZeroesCB; 1421 1422 static void coroutine_fn qed_co_write_zeroes_cb(void *opaque, int ret) 1423 { 1424 QEDWriteZeroesCB *cb = opaque; 1425 1426 cb->done = true; 1427 cb->ret = ret; 1428 if (cb->co) { 1429 qemu_coroutine_enter(cb->co, NULL); 1430 } 1431 } 1432 1433 static int coroutine_fn bdrv_qed_co_write_zeroes(BlockDriverState *bs, 1434 int64_t sector_num, 1435 int nb_sectors, 1436 BdrvRequestFlags flags) 1437 { 1438 BlockAIOCB *blockacb; 1439 BDRVQEDState *s = bs->opaque; 1440 QEDWriteZeroesCB cb = { .done = false }; 1441 QEMUIOVector qiov; 1442 struct iovec iov; 1443 1444 /* Refuse if there are untouched backing file sectors */ 1445 if (bs->backing) { 1446 if (qed_offset_into_cluster(s, sector_num * BDRV_SECTOR_SIZE) != 0) { 1447 return -ENOTSUP; 1448 } 1449 if (qed_offset_into_cluster(s, nb_sectors * BDRV_SECTOR_SIZE) != 0) { 1450 return -ENOTSUP; 1451 } 1452 } 1453 1454 /* Zero writes start without an I/O buffer. If a buffer becomes necessary 1455 * then it will be allocated during request processing. 1456 */ 1457 iov.iov_base = NULL, 1458 iov.iov_len = nb_sectors * BDRV_SECTOR_SIZE, 1459 1460 qemu_iovec_init_external(&qiov, &iov, 1); 1461 blockacb = qed_aio_setup(bs, sector_num, &qiov, nb_sectors, 1462 qed_co_write_zeroes_cb, &cb, 1463 QED_AIOCB_WRITE | QED_AIOCB_ZERO); 1464 if (!blockacb) { 1465 return -EIO; 1466 } 1467 if (!cb.done) { 1468 cb.co = qemu_coroutine_self(); 1469 qemu_coroutine_yield(); 1470 } 1471 assert(cb.done); 1472 return cb.ret; 1473 } 1474 1475 static int bdrv_qed_truncate(BlockDriverState *bs, int64_t offset) 1476 { 1477 BDRVQEDState *s = bs->opaque; 1478 uint64_t old_image_size; 1479 int ret; 1480 1481 if (!qed_is_image_size_valid(offset, s->header.cluster_size, 1482 s->header.table_size)) { 1483 return -EINVAL; 1484 } 1485 1486 /* Shrinking is currently not supported */ 1487 if ((uint64_t)offset < s->header.image_size) { 1488 return -ENOTSUP; 1489 } 1490 1491 old_image_size = s->header.image_size; 1492 s->header.image_size = offset; 1493 ret = qed_write_header_sync(s); 1494 if (ret < 0) { 1495 s->header.image_size = old_image_size; 1496 } 1497 return ret; 1498 } 1499 1500 static int64_t bdrv_qed_getlength(BlockDriverState *bs) 1501 { 1502 BDRVQEDState *s = bs->opaque; 1503 return s->header.image_size; 1504 } 1505 1506 static int bdrv_qed_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) 1507 { 1508 BDRVQEDState *s = bs->opaque; 1509 1510 memset(bdi, 0, sizeof(*bdi)); 1511 bdi->cluster_size = s->header.cluster_size; 1512 bdi->is_dirty = s->header.features & QED_F_NEED_CHECK; 1513 bdi->unallocated_blocks_are_zero = true; 1514 bdi->can_write_zeroes_with_unmap = true; 1515 return 0; 1516 } 1517 1518 static int bdrv_qed_change_backing_file(BlockDriverState *bs, 1519 const char *backing_file, 1520 const char *backing_fmt) 1521 { 1522 BDRVQEDState *s = bs->opaque; 1523 QEDHeader new_header, le_header; 1524 void *buffer; 1525 size_t buffer_len, backing_file_len; 1526 int ret; 1527 1528 /* Refuse to set backing filename if unknown compat feature bits are 1529 * active. If the image uses an unknown compat feature then we may not 1530 * know the layout of data following the header structure and cannot safely 1531 * add a new string. 1532 */ 1533 if (backing_file && (s->header.compat_features & 1534 ~QED_COMPAT_FEATURE_MASK)) { 1535 return -ENOTSUP; 1536 } 1537 1538 memcpy(&new_header, &s->header, sizeof(new_header)); 1539 1540 new_header.features &= ~(QED_F_BACKING_FILE | 1541 QED_F_BACKING_FORMAT_NO_PROBE); 1542 1543 /* Adjust feature flags */ 1544 if (backing_file) { 1545 new_header.features |= QED_F_BACKING_FILE; 1546 1547 if (qed_fmt_is_raw(backing_fmt)) { 1548 new_header.features |= QED_F_BACKING_FORMAT_NO_PROBE; 1549 } 1550 } 1551 1552 /* Calculate new header size */ 1553 backing_file_len = 0; 1554 1555 if (backing_file) { 1556 backing_file_len = strlen(backing_file); 1557 } 1558 1559 buffer_len = sizeof(new_header); 1560 new_header.backing_filename_offset = buffer_len; 1561 new_header.backing_filename_size = backing_file_len; 1562 buffer_len += backing_file_len; 1563 1564 /* Make sure we can rewrite header without failing */ 1565 if (buffer_len > new_header.header_size * new_header.cluster_size) { 1566 return -ENOSPC; 1567 } 1568 1569 /* Prepare new header */ 1570 buffer = g_malloc(buffer_len); 1571 1572 qed_header_cpu_to_le(&new_header, &le_header); 1573 memcpy(buffer, &le_header, sizeof(le_header)); 1574 buffer_len = sizeof(le_header); 1575 1576 if (backing_file) { 1577 memcpy(buffer + buffer_len, backing_file, backing_file_len); 1578 buffer_len += backing_file_len; 1579 } 1580 1581 /* Write new header */ 1582 ret = bdrv_pwrite_sync(bs->file->bs, 0, buffer, buffer_len); 1583 g_free(buffer); 1584 if (ret == 0) { 1585 memcpy(&s->header, &new_header, sizeof(new_header)); 1586 } 1587 return ret; 1588 } 1589 1590 static void bdrv_qed_invalidate_cache(BlockDriverState *bs, Error **errp) 1591 { 1592 BDRVQEDState *s = bs->opaque; 1593 Error *local_err = NULL; 1594 int ret; 1595 1596 bdrv_qed_close(bs); 1597 1598 memset(s, 0, sizeof(BDRVQEDState)); 1599 ret = bdrv_qed_open(bs, NULL, bs->open_flags, &local_err); 1600 if (local_err) { 1601 error_propagate(errp, local_err); 1602 error_prepend(errp, "Could not reopen qed layer: "); 1603 return; 1604 } else if (ret < 0) { 1605 error_setg_errno(errp, -ret, "Could not reopen qed layer"); 1606 return; 1607 } 1608 } 1609 1610 static int bdrv_qed_check(BlockDriverState *bs, BdrvCheckResult *result, 1611 BdrvCheckMode fix) 1612 { 1613 BDRVQEDState *s = bs->opaque; 1614 1615 return qed_check(s, result, !!fix); 1616 } 1617 1618 static QemuOptsList qed_create_opts = { 1619 .name = "qed-create-opts", 1620 .head = QTAILQ_HEAD_INITIALIZER(qed_create_opts.head), 1621 .desc = { 1622 { 1623 .name = BLOCK_OPT_SIZE, 1624 .type = QEMU_OPT_SIZE, 1625 .help = "Virtual disk size" 1626 }, 1627 { 1628 .name = BLOCK_OPT_BACKING_FILE, 1629 .type = QEMU_OPT_STRING, 1630 .help = "File name of a base image" 1631 }, 1632 { 1633 .name = BLOCK_OPT_BACKING_FMT, 1634 .type = QEMU_OPT_STRING, 1635 .help = "Image format of the base image" 1636 }, 1637 { 1638 .name = BLOCK_OPT_CLUSTER_SIZE, 1639 .type = QEMU_OPT_SIZE, 1640 .help = "Cluster size (in bytes)", 1641 .def_value_str = stringify(QED_DEFAULT_CLUSTER_SIZE) 1642 }, 1643 { 1644 .name = BLOCK_OPT_TABLE_SIZE, 1645 .type = QEMU_OPT_SIZE, 1646 .help = "L1/L2 table size (in clusters)" 1647 }, 1648 { /* end of list */ } 1649 } 1650 }; 1651 1652 static BlockDriver bdrv_qed = { 1653 .format_name = "qed", 1654 .instance_size = sizeof(BDRVQEDState), 1655 .create_opts = &qed_create_opts, 1656 .supports_backing = true, 1657 1658 .bdrv_probe = bdrv_qed_probe, 1659 .bdrv_open = bdrv_qed_open, 1660 .bdrv_close = bdrv_qed_close, 1661 .bdrv_reopen_prepare = bdrv_qed_reopen_prepare, 1662 .bdrv_create = bdrv_qed_create, 1663 .bdrv_has_zero_init = bdrv_has_zero_init_1, 1664 .bdrv_co_get_block_status = bdrv_qed_co_get_block_status, 1665 .bdrv_aio_readv = bdrv_qed_aio_readv, 1666 .bdrv_aio_writev = bdrv_qed_aio_writev, 1667 .bdrv_co_write_zeroes = bdrv_qed_co_write_zeroes, 1668 .bdrv_truncate = bdrv_qed_truncate, 1669 .bdrv_getlength = bdrv_qed_getlength, 1670 .bdrv_get_info = bdrv_qed_get_info, 1671 .bdrv_refresh_limits = bdrv_qed_refresh_limits, 1672 .bdrv_change_backing_file = bdrv_qed_change_backing_file, 1673 .bdrv_invalidate_cache = bdrv_qed_invalidate_cache, 1674 .bdrv_check = bdrv_qed_check, 1675 .bdrv_detach_aio_context = bdrv_qed_detach_aio_context, 1676 .bdrv_attach_aio_context = bdrv_qed_attach_aio_context, 1677 }; 1678 1679 static void bdrv_qed_init(void) 1680 { 1681 bdrv_register(&bdrv_qed); 1682 } 1683 1684 block_init(bdrv_qed_init); 1685