1 /* 2 * QEMU Enhanced Disk Format 3 * 4 * Copyright IBM, Corp. 2010 5 * 6 * Authors: 7 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com> 8 * Anthony Liguori <aliguori@us.ibm.com> 9 * 10 * This work is licensed under the terms of the GNU LGPL, version 2 or later. 11 * See the COPYING.LIB file in the top-level directory. 12 * 13 */ 14 15 #include "qemu/timer.h" 16 #include "trace.h" 17 #include "qed.h" 18 #include "qapi/qmp/qerror.h" 19 #include "migration/migration.h" 20 21 static const AIOCBInfo qed_aiocb_info = { 22 .aiocb_size = sizeof(QEDAIOCB), 23 }; 24 25 static int bdrv_qed_probe(const uint8_t *buf, int buf_size, 26 const char *filename) 27 { 28 const QEDHeader *header = (const QEDHeader *)buf; 29 30 if (buf_size < sizeof(*header)) { 31 return 0; 32 } 33 if (le32_to_cpu(header->magic) != QED_MAGIC) { 34 return 0; 35 } 36 return 100; 37 } 38 39 /** 40 * Check whether an image format is raw 41 * 42 * @fmt: Backing file format, may be NULL 43 */ 44 static bool qed_fmt_is_raw(const char *fmt) 45 { 46 return fmt && strcmp(fmt, "raw") == 0; 47 } 48 49 static void qed_header_le_to_cpu(const QEDHeader *le, QEDHeader *cpu) 50 { 51 cpu->magic = le32_to_cpu(le->magic); 52 cpu->cluster_size = le32_to_cpu(le->cluster_size); 53 cpu->table_size = le32_to_cpu(le->table_size); 54 cpu->header_size = le32_to_cpu(le->header_size); 55 cpu->features = le64_to_cpu(le->features); 56 cpu->compat_features = le64_to_cpu(le->compat_features); 57 cpu->autoclear_features = le64_to_cpu(le->autoclear_features); 58 cpu->l1_table_offset = le64_to_cpu(le->l1_table_offset); 59 cpu->image_size = le64_to_cpu(le->image_size); 60 cpu->backing_filename_offset = le32_to_cpu(le->backing_filename_offset); 61 cpu->backing_filename_size = le32_to_cpu(le->backing_filename_size); 62 } 63 64 static void qed_header_cpu_to_le(const QEDHeader *cpu, QEDHeader *le) 65 { 66 le->magic = cpu_to_le32(cpu->magic); 67 le->cluster_size = cpu_to_le32(cpu->cluster_size); 68 le->table_size = cpu_to_le32(cpu->table_size); 69 le->header_size = cpu_to_le32(cpu->header_size); 70 le->features = cpu_to_le64(cpu->features); 71 le->compat_features = cpu_to_le64(cpu->compat_features); 72 le->autoclear_features = cpu_to_le64(cpu->autoclear_features); 73 le->l1_table_offset = cpu_to_le64(cpu->l1_table_offset); 74 le->image_size = cpu_to_le64(cpu->image_size); 75 le->backing_filename_offset = cpu_to_le32(cpu->backing_filename_offset); 76 le->backing_filename_size = cpu_to_le32(cpu->backing_filename_size); 77 } 78 79 int qed_write_header_sync(BDRVQEDState *s) 80 { 81 QEDHeader le; 82 int ret; 83 84 qed_header_cpu_to_le(&s->header, &le); 85 ret = bdrv_pwrite(s->bs->file->bs, 0, &le, sizeof(le)); 86 if (ret != sizeof(le)) { 87 return ret; 88 } 89 return 0; 90 } 91 92 typedef struct { 93 GenericCB gencb; 94 BDRVQEDState *s; 95 struct iovec iov; 96 QEMUIOVector qiov; 97 int nsectors; 98 uint8_t *buf; 99 } QEDWriteHeaderCB; 100 101 static void qed_write_header_cb(void *opaque, int ret) 102 { 103 QEDWriteHeaderCB *write_header_cb = opaque; 104 105 qemu_vfree(write_header_cb->buf); 106 gencb_complete(write_header_cb, ret); 107 } 108 109 static void qed_write_header_read_cb(void *opaque, int ret) 110 { 111 QEDWriteHeaderCB *write_header_cb = opaque; 112 BDRVQEDState *s = write_header_cb->s; 113 114 if (ret) { 115 qed_write_header_cb(write_header_cb, ret); 116 return; 117 } 118 119 /* Update header */ 120 qed_header_cpu_to_le(&s->header, (QEDHeader *)write_header_cb->buf); 121 122 bdrv_aio_writev(s->bs->file->bs, 0, &write_header_cb->qiov, 123 write_header_cb->nsectors, qed_write_header_cb, 124 write_header_cb); 125 } 126 127 /** 128 * Update header in-place (does not rewrite backing filename or other strings) 129 * 130 * This function only updates known header fields in-place and does not affect 131 * extra data after the QED header. 132 */ 133 static void qed_write_header(BDRVQEDState *s, BlockCompletionFunc cb, 134 void *opaque) 135 { 136 /* We must write full sectors for O_DIRECT but cannot necessarily generate 137 * the data following the header if an unrecognized compat feature is 138 * active. Therefore, first read the sectors containing the header, update 139 * them, and write back. 140 */ 141 142 int nsectors = (sizeof(QEDHeader) + BDRV_SECTOR_SIZE - 1) / 143 BDRV_SECTOR_SIZE; 144 size_t len = nsectors * BDRV_SECTOR_SIZE; 145 QEDWriteHeaderCB *write_header_cb = gencb_alloc(sizeof(*write_header_cb), 146 cb, opaque); 147 148 write_header_cb->s = s; 149 write_header_cb->nsectors = nsectors; 150 write_header_cb->buf = qemu_blockalign(s->bs, len); 151 write_header_cb->iov.iov_base = write_header_cb->buf; 152 write_header_cb->iov.iov_len = len; 153 qemu_iovec_init_external(&write_header_cb->qiov, &write_header_cb->iov, 1); 154 155 bdrv_aio_readv(s->bs->file->bs, 0, &write_header_cb->qiov, nsectors, 156 qed_write_header_read_cb, write_header_cb); 157 } 158 159 static uint64_t qed_max_image_size(uint32_t cluster_size, uint32_t table_size) 160 { 161 uint64_t table_entries; 162 uint64_t l2_size; 163 164 table_entries = (table_size * cluster_size) / sizeof(uint64_t); 165 l2_size = table_entries * cluster_size; 166 167 return l2_size * table_entries; 168 } 169 170 static bool qed_is_cluster_size_valid(uint32_t cluster_size) 171 { 172 if (cluster_size < QED_MIN_CLUSTER_SIZE || 173 cluster_size > QED_MAX_CLUSTER_SIZE) { 174 return false; 175 } 176 if (cluster_size & (cluster_size - 1)) { 177 return false; /* not power of 2 */ 178 } 179 return true; 180 } 181 182 static bool qed_is_table_size_valid(uint32_t table_size) 183 { 184 if (table_size < QED_MIN_TABLE_SIZE || 185 table_size > QED_MAX_TABLE_SIZE) { 186 return false; 187 } 188 if (table_size & (table_size - 1)) { 189 return false; /* not power of 2 */ 190 } 191 return true; 192 } 193 194 static bool qed_is_image_size_valid(uint64_t image_size, uint32_t cluster_size, 195 uint32_t table_size) 196 { 197 if (image_size % BDRV_SECTOR_SIZE != 0) { 198 return false; /* not multiple of sector size */ 199 } 200 if (image_size > qed_max_image_size(cluster_size, table_size)) { 201 return false; /* image is too large */ 202 } 203 return true; 204 } 205 206 /** 207 * Read a string of known length from the image file 208 * 209 * @file: Image file 210 * @offset: File offset to start of string, in bytes 211 * @n: String length in bytes 212 * @buf: Destination buffer 213 * @buflen: Destination buffer length in bytes 214 * @ret: 0 on success, -errno on failure 215 * 216 * The string is NUL-terminated. 217 */ 218 static int qed_read_string(BlockDriverState *file, uint64_t offset, size_t n, 219 char *buf, size_t buflen) 220 { 221 int ret; 222 if (n >= buflen) { 223 return -EINVAL; 224 } 225 ret = bdrv_pread(file, offset, buf, n); 226 if (ret < 0) { 227 return ret; 228 } 229 buf[n] = '\0'; 230 return 0; 231 } 232 233 /** 234 * Allocate new clusters 235 * 236 * @s: QED state 237 * @n: Number of contiguous clusters to allocate 238 * @ret: Offset of first allocated cluster 239 * 240 * This function only produces the offset where the new clusters should be 241 * written. It updates BDRVQEDState but does not make any changes to the image 242 * file. 243 */ 244 static uint64_t qed_alloc_clusters(BDRVQEDState *s, unsigned int n) 245 { 246 uint64_t offset = s->file_size; 247 s->file_size += n * s->header.cluster_size; 248 return offset; 249 } 250 251 QEDTable *qed_alloc_table(BDRVQEDState *s) 252 { 253 /* Honor O_DIRECT memory alignment requirements */ 254 return qemu_blockalign(s->bs, 255 s->header.cluster_size * s->header.table_size); 256 } 257 258 /** 259 * Allocate a new zeroed L2 table 260 */ 261 static CachedL2Table *qed_new_l2_table(BDRVQEDState *s) 262 { 263 CachedL2Table *l2_table = qed_alloc_l2_cache_entry(&s->l2_cache); 264 265 l2_table->table = qed_alloc_table(s); 266 l2_table->offset = qed_alloc_clusters(s, s->header.table_size); 267 268 memset(l2_table->table->offsets, 0, 269 s->header.cluster_size * s->header.table_size); 270 return l2_table; 271 } 272 273 static void qed_aio_next_io(void *opaque, int ret); 274 275 static void qed_plug_allocating_write_reqs(BDRVQEDState *s) 276 { 277 assert(!s->allocating_write_reqs_plugged); 278 279 s->allocating_write_reqs_plugged = true; 280 } 281 282 static void qed_unplug_allocating_write_reqs(BDRVQEDState *s) 283 { 284 QEDAIOCB *acb; 285 286 assert(s->allocating_write_reqs_plugged); 287 288 s->allocating_write_reqs_plugged = false; 289 290 acb = QSIMPLEQ_FIRST(&s->allocating_write_reqs); 291 if (acb) { 292 qed_aio_next_io(acb, 0); 293 } 294 } 295 296 static void qed_finish_clear_need_check(void *opaque, int ret) 297 { 298 /* Do nothing */ 299 } 300 301 static void qed_flush_after_clear_need_check(void *opaque, int ret) 302 { 303 BDRVQEDState *s = opaque; 304 305 bdrv_aio_flush(s->bs, qed_finish_clear_need_check, s); 306 307 /* No need to wait until flush completes */ 308 qed_unplug_allocating_write_reqs(s); 309 } 310 311 static void qed_clear_need_check(void *opaque, int ret) 312 { 313 BDRVQEDState *s = opaque; 314 315 if (ret) { 316 qed_unplug_allocating_write_reqs(s); 317 return; 318 } 319 320 s->header.features &= ~QED_F_NEED_CHECK; 321 qed_write_header(s, qed_flush_after_clear_need_check, s); 322 } 323 324 static void qed_need_check_timer_cb(void *opaque) 325 { 326 BDRVQEDState *s = opaque; 327 328 /* The timer should only fire when allocating writes have drained */ 329 assert(!QSIMPLEQ_FIRST(&s->allocating_write_reqs)); 330 331 trace_qed_need_check_timer_cb(s); 332 333 qed_plug_allocating_write_reqs(s); 334 335 /* Ensure writes are on disk before clearing flag */ 336 bdrv_aio_flush(s->bs, qed_clear_need_check, s); 337 } 338 339 static void qed_start_need_check_timer(BDRVQEDState *s) 340 { 341 trace_qed_start_need_check_timer(s); 342 343 /* Use QEMU_CLOCK_VIRTUAL so we don't alter the image file while suspended for 344 * migration. 345 */ 346 timer_mod(s->need_check_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 347 get_ticks_per_sec() * QED_NEED_CHECK_TIMEOUT); 348 } 349 350 /* It's okay to call this multiple times or when no timer is started */ 351 static void qed_cancel_need_check_timer(BDRVQEDState *s) 352 { 353 trace_qed_cancel_need_check_timer(s); 354 timer_del(s->need_check_timer); 355 } 356 357 static void bdrv_qed_detach_aio_context(BlockDriverState *bs) 358 { 359 BDRVQEDState *s = bs->opaque; 360 361 qed_cancel_need_check_timer(s); 362 timer_free(s->need_check_timer); 363 } 364 365 static void bdrv_qed_attach_aio_context(BlockDriverState *bs, 366 AioContext *new_context) 367 { 368 BDRVQEDState *s = bs->opaque; 369 370 s->need_check_timer = aio_timer_new(new_context, 371 QEMU_CLOCK_VIRTUAL, SCALE_NS, 372 qed_need_check_timer_cb, s); 373 if (s->header.features & QED_F_NEED_CHECK) { 374 qed_start_need_check_timer(s); 375 } 376 } 377 378 static void bdrv_qed_drain(BlockDriverState *bs) 379 { 380 BDRVQEDState *s = bs->opaque; 381 382 /* Cancel timer and start doing I/O that were meant to happen as if it 383 * fired, that way we get bdrv_drain() taking care of the ongoing requests 384 * correctly. */ 385 qed_cancel_need_check_timer(s); 386 qed_plug_allocating_write_reqs(s); 387 bdrv_aio_flush(s->bs, qed_clear_need_check, s); 388 } 389 390 static int bdrv_qed_open(BlockDriverState *bs, QDict *options, int flags, 391 Error **errp) 392 { 393 BDRVQEDState *s = bs->opaque; 394 QEDHeader le_header; 395 int64_t file_size; 396 int ret; 397 398 s->bs = bs; 399 QSIMPLEQ_INIT(&s->allocating_write_reqs); 400 401 ret = bdrv_pread(bs->file->bs, 0, &le_header, sizeof(le_header)); 402 if (ret < 0) { 403 return ret; 404 } 405 qed_header_le_to_cpu(&le_header, &s->header); 406 407 if (s->header.magic != QED_MAGIC) { 408 error_setg(errp, "Image not in QED format"); 409 return -EINVAL; 410 } 411 if (s->header.features & ~QED_FEATURE_MASK) { 412 /* image uses unsupported feature bits */ 413 char buf[64]; 414 snprintf(buf, sizeof(buf), "%" PRIx64, 415 s->header.features & ~QED_FEATURE_MASK); 416 error_setg(errp, QERR_UNKNOWN_BLOCK_FORMAT_FEATURE, 417 bdrv_get_device_or_node_name(bs), "QED", buf); 418 return -ENOTSUP; 419 } 420 if (!qed_is_cluster_size_valid(s->header.cluster_size)) { 421 return -EINVAL; 422 } 423 424 /* Round down file size to the last cluster */ 425 file_size = bdrv_getlength(bs->file->bs); 426 if (file_size < 0) { 427 return file_size; 428 } 429 s->file_size = qed_start_of_cluster(s, file_size); 430 431 if (!qed_is_table_size_valid(s->header.table_size)) { 432 return -EINVAL; 433 } 434 if (!qed_is_image_size_valid(s->header.image_size, 435 s->header.cluster_size, 436 s->header.table_size)) { 437 return -EINVAL; 438 } 439 if (!qed_check_table_offset(s, s->header.l1_table_offset)) { 440 return -EINVAL; 441 } 442 443 s->table_nelems = (s->header.cluster_size * s->header.table_size) / 444 sizeof(uint64_t); 445 s->l2_shift = ctz32(s->header.cluster_size); 446 s->l2_mask = s->table_nelems - 1; 447 s->l1_shift = s->l2_shift + ctz32(s->table_nelems); 448 449 /* Header size calculation must not overflow uint32_t */ 450 if (s->header.header_size > UINT32_MAX / s->header.cluster_size) { 451 return -EINVAL; 452 } 453 454 if ((s->header.features & QED_F_BACKING_FILE)) { 455 if ((uint64_t)s->header.backing_filename_offset + 456 s->header.backing_filename_size > 457 s->header.cluster_size * s->header.header_size) { 458 return -EINVAL; 459 } 460 461 ret = qed_read_string(bs->file->bs, s->header.backing_filename_offset, 462 s->header.backing_filename_size, bs->backing_file, 463 sizeof(bs->backing_file)); 464 if (ret < 0) { 465 return ret; 466 } 467 468 if (s->header.features & QED_F_BACKING_FORMAT_NO_PROBE) { 469 pstrcpy(bs->backing_format, sizeof(bs->backing_format), "raw"); 470 } 471 } 472 473 /* Reset unknown autoclear feature bits. This is a backwards 474 * compatibility mechanism that allows images to be opened by older 475 * programs, which "knock out" unknown feature bits. When an image is 476 * opened by a newer program again it can detect that the autoclear 477 * feature is no longer valid. 478 */ 479 if ((s->header.autoclear_features & ~QED_AUTOCLEAR_FEATURE_MASK) != 0 && 480 !bdrv_is_read_only(bs->file->bs) && !(flags & BDRV_O_INCOMING)) { 481 s->header.autoclear_features &= QED_AUTOCLEAR_FEATURE_MASK; 482 483 ret = qed_write_header_sync(s); 484 if (ret) { 485 return ret; 486 } 487 488 /* From here on only known autoclear feature bits are valid */ 489 bdrv_flush(bs->file->bs); 490 } 491 492 s->l1_table = qed_alloc_table(s); 493 qed_init_l2_cache(&s->l2_cache); 494 495 ret = qed_read_l1_table_sync(s); 496 if (ret) { 497 goto out; 498 } 499 500 /* If image was not closed cleanly, check consistency */ 501 if (!(flags & BDRV_O_CHECK) && (s->header.features & QED_F_NEED_CHECK)) { 502 /* Read-only images cannot be fixed. There is no risk of corruption 503 * since write operations are not possible. Therefore, allow 504 * potentially inconsistent images to be opened read-only. This can 505 * aid data recovery from an otherwise inconsistent image. 506 */ 507 if (!bdrv_is_read_only(bs->file->bs) && 508 !(flags & BDRV_O_INCOMING)) { 509 BdrvCheckResult result = {0}; 510 511 ret = qed_check(s, &result, true); 512 if (ret) { 513 goto out; 514 } 515 } 516 } 517 518 bdrv_qed_attach_aio_context(bs, bdrv_get_aio_context(bs)); 519 520 out: 521 if (ret) { 522 qed_free_l2_cache(&s->l2_cache); 523 qemu_vfree(s->l1_table); 524 } 525 return ret; 526 } 527 528 static void bdrv_qed_refresh_limits(BlockDriverState *bs, Error **errp) 529 { 530 BDRVQEDState *s = bs->opaque; 531 532 bs->bl.write_zeroes_alignment = s->header.cluster_size >> BDRV_SECTOR_BITS; 533 } 534 535 /* We have nothing to do for QED reopen, stubs just return 536 * success */ 537 static int bdrv_qed_reopen_prepare(BDRVReopenState *state, 538 BlockReopenQueue *queue, Error **errp) 539 { 540 return 0; 541 } 542 543 static void bdrv_qed_close(BlockDriverState *bs) 544 { 545 BDRVQEDState *s = bs->opaque; 546 547 bdrv_qed_detach_aio_context(bs); 548 549 /* Ensure writes reach stable storage */ 550 bdrv_flush(bs->file->bs); 551 552 /* Clean shutdown, no check required on next open */ 553 if (s->header.features & QED_F_NEED_CHECK) { 554 s->header.features &= ~QED_F_NEED_CHECK; 555 qed_write_header_sync(s); 556 } 557 558 qed_free_l2_cache(&s->l2_cache); 559 qemu_vfree(s->l1_table); 560 } 561 562 static int qed_create(const char *filename, uint32_t cluster_size, 563 uint64_t image_size, uint32_t table_size, 564 const char *backing_file, const char *backing_fmt, 565 QemuOpts *opts, Error **errp) 566 { 567 QEDHeader header = { 568 .magic = QED_MAGIC, 569 .cluster_size = cluster_size, 570 .table_size = table_size, 571 .header_size = 1, 572 .features = 0, 573 .compat_features = 0, 574 .l1_table_offset = cluster_size, 575 .image_size = image_size, 576 }; 577 QEDHeader le_header; 578 uint8_t *l1_table = NULL; 579 size_t l1_size = header.cluster_size * header.table_size; 580 Error *local_err = NULL; 581 int ret = 0; 582 BlockDriverState *bs; 583 584 ret = bdrv_create_file(filename, opts, &local_err); 585 if (ret < 0) { 586 error_propagate(errp, local_err); 587 return ret; 588 } 589 590 bs = NULL; 591 ret = bdrv_open(&bs, filename, NULL, NULL, 592 BDRV_O_RDWR | BDRV_O_CACHE_WB | BDRV_O_PROTOCOL, 593 &local_err); 594 if (ret < 0) { 595 error_propagate(errp, local_err); 596 return ret; 597 } 598 599 /* File must start empty and grow, check truncate is supported */ 600 ret = bdrv_truncate(bs, 0); 601 if (ret < 0) { 602 goto out; 603 } 604 605 if (backing_file) { 606 header.features |= QED_F_BACKING_FILE; 607 header.backing_filename_offset = sizeof(le_header); 608 header.backing_filename_size = strlen(backing_file); 609 610 if (qed_fmt_is_raw(backing_fmt)) { 611 header.features |= QED_F_BACKING_FORMAT_NO_PROBE; 612 } 613 } 614 615 qed_header_cpu_to_le(&header, &le_header); 616 ret = bdrv_pwrite(bs, 0, &le_header, sizeof(le_header)); 617 if (ret < 0) { 618 goto out; 619 } 620 ret = bdrv_pwrite(bs, sizeof(le_header), backing_file, 621 header.backing_filename_size); 622 if (ret < 0) { 623 goto out; 624 } 625 626 l1_table = g_malloc0(l1_size); 627 ret = bdrv_pwrite(bs, header.l1_table_offset, l1_table, l1_size); 628 if (ret < 0) { 629 goto out; 630 } 631 632 ret = 0; /* success */ 633 out: 634 g_free(l1_table); 635 bdrv_unref(bs); 636 return ret; 637 } 638 639 static int bdrv_qed_create(const char *filename, QemuOpts *opts, Error **errp) 640 { 641 uint64_t image_size = 0; 642 uint32_t cluster_size = QED_DEFAULT_CLUSTER_SIZE; 643 uint32_t table_size = QED_DEFAULT_TABLE_SIZE; 644 char *backing_file = NULL; 645 char *backing_fmt = NULL; 646 int ret; 647 648 image_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0), 649 BDRV_SECTOR_SIZE); 650 backing_file = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FILE); 651 backing_fmt = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FMT); 652 cluster_size = qemu_opt_get_size_del(opts, 653 BLOCK_OPT_CLUSTER_SIZE, 654 QED_DEFAULT_CLUSTER_SIZE); 655 table_size = qemu_opt_get_size_del(opts, BLOCK_OPT_TABLE_SIZE, 656 QED_DEFAULT_TABLE_SIZE); 657 658 if (!qed_is_cluster_size_valid(cluster_size)) { 659 error_setg(errp, "QED cluster size must be within range [%u, %u] " 660 "and power of 2", 661 QED_MIN_CLUSTER_SIZE, QED_MAX_CLUSTER_SIZE); 662 ret = -EINVAL; 663 goto finish; 664 } 665 if (!qed_is_table_size_valid(table_size)) { 666 error_setg(errp, "QED table size must be within range [%u, %u] " 667 "and power of 2", 668 QED_MIN_TABLE_SIZE, QED_MAX_TABLE_SIZE); 669 ret = -EINVAL; 670 goto finish; 671 } 672 if (!qed_is_image_size_valid(image_size, cluster_size, table_size)) { 673 error_setg(errp, "QED image size must be a non-zero multiple of " 674 "cluster size and less than %" PRIu64 " bytes", 675 qed_max_image_size(cluster_size, table_size)); 676 ret = -EINVAL; 677 goto finish; 678 } 679 680 ret = qed_create(filename, cluster_size, image_size, table_size, 681 backing_file, backing_fmt, opts, errp); 682 683 finish: 684 g_free(backing_file); 685 g_free(backing_fmt); 686 return ret; 687 } 688 689 typedef struct { 690 BlockDriverState *bs; 691 Coroutine *co; 692 uint64_t pos; 693 int64_t status; 694 int *pnum; 695 } QEDIsAllocatedCB; 696 697 static void qed_is_allocated_cb(void *opaque, int ret, uint64_t offset, size_t len) 698 { 699 QEDIsAllocatedCB *cb = opaque; 700 BDRVQEDState *s = cb->bs->opaque; 701 *cb->pnum = len / BDRV_SECTOR_SIZE; 702 switch (ret) { 703 case QED_CLUSTER_FOUND: 704 offset |= qed_offset_into_cluster(s, cb->pos); 705 cb->status = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID | offset; 706 break; 707 case QED_CLUSTER_ZERO: 708 cb->status = BDRV_BLOCK_ZERO; 709 break; 710 case QED_CLUSTER_L2: 711 case QED_CLUSTER_L1: 712 cb->status = 0; 713 break; 714 default: 715 assert(ret < 0); 716 cb->status = ret; 717 break; 718 } 719 720 if (cb->co) { 721 qemu_coroutine_enter(cb->co, NULL); 722 } 723 } 724 725 static int64_t coroutine_fn bdrv_qed_co_get_block_status(BlockDriverState *bs, 726 int64_t sector_num, 727 int nb_sectors, int *pnum) 728 { 729 BDRVQEDState *s = bs->opaque; 730 size_t len = (size_t)nb_sectors * BDRV_SECTOR_SIZE; 731 QEDIsAllocatedCB cb = { 732 .bs = bs, 733 .pos = (uint64_t)sector_num * BDRV_SECTOR_SIZE, 734 .status = BDRV_BLOCK_OFFSET_MASK, 735 .pnum = pnum, 736 }; 737 QEDRequest request = { .l2_table = NULL }; 738 739 qed_find_cluster(s, &request, cb.pos, len, qed_is_allocated_cb, &cb); 740 741 /* Now sleep if the callback wasn't invoked immediately */ 742 while (cb.status == BDRV_BLOCK_OFFSET_MASK) { 743 cb.co = qemu_coroutine_self(); 744 qemu_coroutine_yield(); 745 } 746 747 qed_unref_l2_cache_entry(request.l2_table); 748 749 return cb.status; 750 } 751 752 static BDRVQEDState *acb_to_s(QEDAIOCB *acb) 753 { 754 return acb->common.bs->opaque; 755 } 756 757 /** 758 * Read from the backing file or zero-fill if no backing file 759 * 760 * @s: QED state 761 * @pos: Byte position in device 762 * @qiov: Destination I/O vector 763 * @backing_qiov: Possibly shortened copy of qiov, to be allocated here 764 * @cb: Completion function 765 * @opaque: User data for completion function 766 * 767 * This function reads qiov->size bytes starting at pos from the backing file. 768 * If there is no backing file then zeroes are read. 769 */ 770 static void qed_read_backing_file(BDRVQEDState *s, uint64_t pos, 771 QEMUIOVector *qiov, 772 QEMUIOVector **backing_qiov, 773 BlockCompletionFunc *cb, void *opaque) 774 { 775 uint64_t backing_length = 0; 776 size_t size; 777 778 /* If there is a backing file, get its length. Treat the absence of a 779 * backing file like a zero length backing file. 780 */ 781 if (s->bs->backing) { 782 int64_t l = bdrv_getlength(s->bs->backing->bs); 783 if (l < 0) { 784 cb(opaque, l); 785 return; 786 } 787 backing_length = l; 788 } 789 790 /* Zero all sectors if reading beyond the end of the backing file */ 791 if (pos >= backing_length || 792 pos + qiov->size > backing_length) { 793 qemu_iovec_memset(qiov, 0, 0, qiov->size); 794 } 795 796 /* Complete now if there are no backing file sectors to read */ 797 if (pos >= backing_length) { 798 cb(opaque, 0); 799 return; 800 } 801 802 /* If the read straddles the end of the backing file, shorten it */ 803 size = MIN((uint64_t)backing_length - pos, qiov->size); 804 805 assert(*backing_qiov == NULL); 806 *backing_qiov = g_new(QEMUIOVector, 1); 807 qemu_iovec_init(*backing_qiov, qiov->niov); 808 qemu_iovec_concat(*backing_qiov, qiov, 0, size); 809 810 BLKDBG_EVENT(s->bs->file, BLKDBG_READ_BACKING_AIO); 811 bdrv_aio_readv(s->bs->backing->bs, pos / BDRV_SECTOR_SIZE, 812 *backing_qiov, size / BDRV_SECTOR_SIZE, cb, opaque); 813 } 814 815 typedef struct { 816 GenericCB gencb; 817 BDRVQEDState *s; 818 QEMUIOVector qiov; 819 QEMUIOVector *backing_qiov; 820 struct iovec iov; 821 uint64_t offset; 822 } CopyFromBackingFileCB; 823 824 static void qed_copy_from_backing_file_cb(void *opaque, int ret) 825 { 826 CopyFromBackingFileCB *copy_cb = opaque; 827 qemu_vfree(copy_cb->iov.iov_base); 828 gencb_complete(©_cb->gencb, ret); 829 } 830 831 static void qed_copy_from_backing_file_write(void *opaque, int ret) 832 { 833 CopyFromBackingFileCB *copy_cb = opaque; 834 BDRVQEDState *s = copy_cb->s; 835 836 if (copy_cb->backing_qiov) { 837 qemu_iovec_destroy(copy_cb->backing_qiov); 838 g_free(copy_cb->backing_qiov); 839 copy_cb->backing_qiov = NULL; 840 } 841 842 if (ret) { 843 qed_copy_from_backing_file_cb(copy_cb, ret); 844 return; 845 } 846 847 BLKDBG_EVENT(s->bs->file, BLKDBG_COW_WRITE); 848 bdrv_aio_writev(s->bs->file->bs, copy_cb->offset / BDRV_SECTOR_SIZE, 849 ©_cb->qiov, copy_cb->qiov.size / BDRV_SECTOR_SIZE, 850 qed_copy_from_backing_file_cb, copy_cb); 851 } 852 853 /** 854 * Copy data from backing file into the image 855 * 856 * @s: QED state 857 * @pos: Byte position in device 858 * @len: Number of bytes 859 * @offset: Byte offset in image file 860 * @cb: Completion function 861 * @opaque: User data for completion function 862 */ 863 static void qed_copy_from_backing_file(BDRVQEDState *s, uint64_t pos, 864 uint64_t len, uint64_t offset, 865 BlockCompletionFunc *cb, 866 void *opaque) 867 { 868 CopyFromBackingFileCB *copy_cb; 869 870 /* Skip copy entirely if there is no work to do */ 871 if (len == 0) { 872 cb(opaque, 0); 873 return; 874 } 875 876 copy_cb = gencb_alloc(sizeof(*copy_cb), cb, opaque); 877 copy_cb->s = s; 878 copy_cb->offset = offset; 879 copy_cb->backing_qiov = NULL; 880 copy_cb->iov.iov_base = qemu_blockalign(s->bs, len); 881 copy_cb->iov.iov_len = len; 882 qemu_iovec_init_external(©_cb->qiov, ©_cb->iov, 1); 883 884 qed_read_backing_file(s, pos, ©_cb->qiov, ©_cb->backing_qiov, 885 qed_copy_from_backing_file_write, copy_cb); 886 } 887 888 /** 889 * Link one or more contiguous clusters into a table 890 * 891 * @s: QED state 892 * @table: L2 table 893 * @index: First cluster index 894 * @n: Number of contiguous clusters 895 * @cluster: First cluster offset 896 * 897 * The cluster offset may be an allocated byte offset in the image file, the 898 * zero cluster marker, or the unallocated cluster marker. 899 */ 900 static void qed_update_l2_table(BDRVQEDState *s, QEDTable *table, int index, 901 unsigned int n, uint64_t cluster) 902 { 903 int i; 904 for (i = index; i < index + n; i++) { 905 table->offsets[i] = cluster; 906 if (!qed_offset_is_unalloc_cluster(cluster) && 907 !qed_offset_is_zero_cluster(cluster)) { 908 cluster += s->header.cluster_size; 909 } 910 } 911 } 912 913 static void qed_aio_complete_bh(void *opaque) 914 { 915 QEDAIOCB *acb = opaque; 916 BlockCompletionFunc *cb = acb->common.cb; 917 void *user_opaque = acb->common.opaque; 918 int ret = acb->bh_ret; 919 920 qemu_bh_delete(acb->bh); 921 qemu_aio_unref(acb); 922 923 /* Invoke callback */ 924 cb(user_opaque, ret); 925 } 926 927 static void qed_aio_complete(QEDAIOCB *acb, int ret) 928 { 929 BDRVQEDState *s = acb_to_s(acb); 930 931 trace_qed_aio_complete(s, acb, ret); 932 933 /* Free resources */ 934 qemu_iovec_destroy(&acb->cur_qiov); 935 qed_unref_l2_cache_entry(acb->request.l2_table); 936 937 /* Free the buffer we may have allocated for zero writes */ 938 if (acb->flags & QED_AIOCB_ZERO) { 939 qemu_vfree(acb->qiov->iov[0].iov_base); 940 acb->qiov->iov[0].iov_base = NULL; 941 } 942 943 /* Arrange for a bh to invoke the completion function */ 944 acb->bh_ret = ret; 945 acb->bh = aio_bh_new(bdrv_get_aio_context(acb->common.bs), 946 qed_aio_complete_bh, acb); 947 qemu_bh_schedule(acb->bh); 948 949 /* Start next allocating write request waiting behind this one. Note that 950 * requests enqueue themselves when they first hit an unallocated cluster 951 * but they wait until the entire request is finished before waking up the 952 * next request in the queue. This ensures that we don't cycle through 953 * requests multiple times but rather finish one at a time completely. 954 */ 955 if (acb == QSIMPLEQ_FIRST(&s->allocating_write_reqs)) { 956 QSIMPLEQ_REMOVE_HEAD(&s->allocating_write_reqs, next); 957 acb = QSIMPLEQ_FIRST(&s->allocating_write_reqs); 958 if (acb) { 959 qed_aio_next_io(acb, 0); 960 } else if (s->header.features & QED_F_NEED_CHECK) { 961 qed_start_need_check_timer(s); 962 } 963 } 964 } 965 966 /** 967 * Commit the current L2 table to the cache 968 */ 969 static void qed_commit_l2_update(void *opaque, int ret) 970 { 971 QEDAIOCB *acb = opaque; 972 BDRVQEDState *s = acb_to_s(acb); 973 CachedL2Table *l2_table = acb->request.l2_table; 974 uint64_t l2_offset = l2_table->offset; 975 976 qed_commit_l2_cache_entry(&s->l2_cache, l2_table); 977 978 /* This is guaranteed to succeed because we just committed the entry to the 979 * cache. 980 */ 981 acb->request.l2_table = qed_find_l2_cache_entry(&s->l2_cache, l2_offset); 982 assert(acb->request.l2_table != NULL); 983 984 qed_aio_next_io(opaque, ret); 985 } 986 987 /** 988 * Update L1 table with new L2 table offset and write it out 989 */ 990 static void qed_aio_write_l1_update(void *opaque, int ret) 991 { 992 QEDAIOCB *acb = opaque; 993 BDRVQEDState *s = acb_to_s(acb); 994 int index; 995 996 if (ret) { 997 qed_aio_complete(acb, ret); 998 return; 999 } 1000 1001 index = qed_l1_index(s, acb->cur_pos); 1002 s->l1_table->offsets[index] = acb->request.l2_table->offset; 1003 1004 qed_write_l1_table(s, index, 1, qed_commit_l2_update, acb); 1005 } 1006 1007 /** 1008 * Update L2 table with new cluster offsets and write them out 1009 */ 1010 static void qed_aio_write_l2_update(QEDAIOCB *acb, int ret, uint64_t offset) 1011 { 1012 BDRVQEDState *s = acb_to_s(acb); 1013 bool need_alloc = acb->find_cluster_ret == QED_CLUSTER_L1; 1014 int index; 1015 1016 if (ret) { 1017 goto err; 1018 } 1019 1020 if (need_alloc) { 1021 qed_unref_l2_cache_entry(acb->request.l2_table); 1022 acb->request.l2_table = qed_new_l2_table(s); 1023 } 1024 1025 index = qed_l2_index(s, acb->cur_pos); 1026 qed_update_l2_table(s, acb->request.l2_table->table, index, acb->cur_nclusters, 1027 offset); 1028 1029 if (need_alloc) { 1030 /* Write out the whole new L2 table */ 1031 qed_write_l2_table(s, &acb->request, 0, s->table_nelems, true, 1032 qed_aio_write_l1_update, acb); 1033 } else { 1034 /* Write out only the updated part of the L2 table */ 1035 qed_write_l2_table(s, &acb->request, index, acb->cur_nclusters, false, 1036 qed_aio_next_io, acb); 1037 } 1038 return; 1039 1040 err: 1041 qed_aio_complete(acb, ret); 1042 } 1043 1044 static void qed_aio_write_l2_update_cb(void *opaque, int ret) 1045 { 1046 QEDAIOCB *acb = opaque; 1047 qed_aio_write_l2_update(acb, ret, acb->cur_cluster); 1048 } 1049 1050 /** 1051 * Flush new data clusters before updating the L2 table 1052 * 1053 * This flush is necessary when a backing file is in use. A crash during an 1054 * allocating write could result in empty clusters in the image. If the write 1055 * only touched a subregion of the cluster, then backing image sectors have 1056 * been lost in the untouched region. The solution is to flush after writing a 1057 * new data cluster and before updating the L2 table. 1058 */ 1059 static void qed_aio_write_flush_before_l2_update(void *opaque, int ret) 1060 { 1061 QEDAIOCB *acb = opaque; 1062 BDRVQEDState *s = acb_to_s(acb); 1063 1064 if (!bdrv_aio_flush(s->bs->file->bs, qed_aio_write_l2_update_cb, opaque)) { 1065 qed_aio_complete(acb, -EIO); 1066 } 1067 } 1068 1069 /** 1070 * Write data to the image file 1071 */ 1072 static void qed_aio_write_main(void *opaque, int ret) 1073 { 1074 QEDAIOCB *acb = opaque; 1075 BDRVQEDState *s = acb_to_s(acb); 1076 uint64_t offset = acb->cur_cluster + 1077 qed_offset_into_cluster(s, acb->cur_pos); 1078 BlockCompletionFunc *next_fn; 1079 1080 trace_qed_aio_write_main(s, acb, ret, offset, acb->cur_qiov.size); 1081 1082 if (ret) { 1083 qed_aio_complete(acb, ret); 1084 return; 1085 } 1086 1087 if (acb->find_cluster_ret == QED_CLUSTER_FOUND) { 1088 next_fn = qed_aio_next_io; 1089 } else { 1090 if (s->bs->backing) { 1091 next_fn = qed_aio_write_flush_before_l2_update; 1092 } else { 1093 next_fn = qed_aio_write_l2_update_cb; 1094 } 1095 } 1096 1097 BLKDBG_EVENT(s->bs->file, BLKDBG_WRITE_AIO); 1098 bdrv_aio_writev(s->bs->file->bs, offset / BDRV_SECTOR_SIZE, 1099 &acb->cur_qiov, acb->cur_qiov.size / BDRV_SECTOR_SIZE, 1100 next_fn, acb); 1101 } 1102 1103 /** 1104 * Populate back untouched region of new data cluster 1105 */ 1106 static void qed_aio_write_postfill(void *opaque, int ret) 1107 { 1108 QEDAIOCB *acb = opaque; 1109 BDRVQEDState *s = acb_to_s(acb); 1110 uint64_t start = acb->cur_pos + acb->cur_qiov.size; 1111 uint64_t len = 1112 qed_start_of_cluster(s, start + s->header.cluster_size - 1) - start; 1113 uint64_t offset = acb->cur_cluster + 1114 qed_offset_into_cluster(s, acb->cur_pos) + 1115 acb->cur_qiov.size; 1116 1117 if (ret) { 1118 qed_aio_complete(acb, ret); 1119 return; 1120 } 1121 1122 trace_qed_aio_write_postfill(s, acb, start, len, offset); 1123 qed_copy_from_backing_file(s, start, len, offset, 1124 qed_aio_write_main, acb); 1125 } 1126 1127 /** 1128 * Populate front untouched region of new data cluster 1129 */ 1130 static void qed_aio_write_prefill(void *opaque, int ret) 1131 { 1132 QEDAIOCB *acb = opaque; 1133 BDRVQEDState *s = acb_to_s(acb); 1134 uint64_t start = qed_start_of_cluster(s, acb->cur_pos); 1135 uint64_t len = qed_offset_into_cluster(s, acb->cur_pos); 1136 1137 trace_qed_aio_write_prefill(s, acb, start, len, acb->cur_cluster); 1138 qed_copy_from_backing_file(s, start, len, acb->cur_cluster, 1139 qed_aio_write_postfill, acb); 1140 } 1141 1142 /** 1143 * Check if the QED_F_NEED_CHECK bit should be set during allocating write 1144 */ 1145 static bool qed_should_set_need_check(BDRVQEDState *s) 1146 { 1147 /* The flush before L2 update path ensures consistency */ 1148 if (s->bs->backing) { 1149 return false; 1150 } 1151 1152 return !(s->header.features & QED_F_NEED_CHECK); 1153 } 1154 1155 static void qed_aio_write_zero_cluster(void *opaque, int ret) 1156 { 1157 QEDAIOCB *acb = opaque; 1158 1159 if (ret) { 1160 qed_aio_complete(acb, ret); 1161 return; 1162 } 1163 1164 qed_aio_write_l2_update(acb, 0, 1); 1165 } 1166 1167 /** 1168 * Write new data cluster 1169 * 1170 * @acb: Write request 1171 * @len: Length in bytes 1172 * 1173 * This path is taken when writing to previously unallocated clusters. 1174 */ 1175 static void qed_aio_write_alloc(QEDAIOCB *acb, size_t len) 1176 { 1177 BDRVQEDState *s = acb_to_s(acb); 1178 BlockCompletionFunc *cb; 1179 1180 /* Cancel timer when the first allocating request comes in */ 1181 if (QSIMPLEQ_EMPTY(&s->allocating_write_reqs)) { 1182 qed_cancel_need_check_timer(s); 1183 } 1184 1185 /* Freeze this request if another allocating write is in progress */ 1186 if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs)) { 1187 QSIMPLEQ_INSERT_TAIL(&s->allocating_write_reqs, acb, next); 1188 } 1189 if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs) || 1190 s->allocating_write_reqs_plugged) { 1191 return; /* wait for existing request to finish */ 1192 } 1193 1194 acb->cur_nclusters = qed_bytes_to_clusters(s, 1195 qed_offset_into_cluster(s, acb->cur_pos) + len); 1196 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len); 1197 1198 if (acb->flags & QED_AIOCB_ZERO) { 1199 /* Skip ahead if the clusters are already zero */ 1200 if (acb->find_cluster_ret == QED_CLUSTER_ZERO) { 1201 qed_aio_next_io(acb, 0); 1202 return; 1203 } 1204 1205 cb = qed_aio_write_zero_cluster; 1206 } else { 1207 cb = qed_aio_write_prefill; 1208 acb->cur_cluster = qed_alloc_clusters(s, acb->cur_nclusters); 1209 } 1210 1211 if (qed_should_set_need_check(s)) { 1212 s->header.features |= QED_F_NEED_CHECK; 1213 qed_write_header(s, cb, acb); 1214 } else { 1215 cb(acb, 0); 1216 } 1217 } 1218 1219 /** 1220 * Write data cluster in place 1221 * 1222 * @acb: Write request 1223 * @offset: Cluster offset in bytes 1224 * @len: Length in bytes 1225 * 1226 * This path is taken when writing to already allocated clusters. 1227 */ 1228 static void qed_aio_write_inplace(QEDAIOCB *acb, uint64_t offset, size_t len) 1229 { 1230 /* Allocate buffer for zero writes */ 1231 if (acb->flags & QED_AIOCB_ZERO) { 1232 struct iovec *iov = acb->qiov->iov; 1233 1234 if (!iov->iov_base) { 1235 iov->iov_base = qemu_try_blockalign(acb->common.bs, iov->iov_len); 1236 if (iov->iov_base == NULL) { 1237 qed_aio_complete(acb, -ENOMEM); 1238 return; 1239 } 1240 memset(iov->iov_base, 0, iov->iov_len); 1241 } 1242 } 1243 1244 /* Calculate the I/O vector */ 1245 acb->cur_cluster = offset; 1246 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len); 1247 1248 /* Do the actual write */ 1249 qed_aio_write_main(acb, 0); 1250 } 1251 1252 /** 1253 * Write data cluster 1254 * 1255 * @opaque: Write request 1256 * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2, QED_CLUSTER_L1, 1257 * or -errno 1258 * @offset: Cluster offset in bytes 1259 * @len: Length in bytes 1260 * 1261 * Callback from qed_find_cluster(). 1262 */ 1263 static void qed_aio_write_data(void *opaque, int ret, 1264 uint64_t offset, size_t len) 1265 { 1266 QEDAIOCB *acb = opaque; 1267 1268 trace_qed_aio_write_data(acb_to_s(acb), acb, ret, offset, len); 1269 1270 acb->find_cluster_ret = ret; 1271 1272 switch (ret) { 1273 case QED_CLUSTER_FOUND: 1274 qed_aio_write_inplace(acb, offset, len); 1275 break; 1276 1277 case QED_CLUSTER_L2: 1278 case QED_CLUSTER_L1: 1279 case QED_CLUSTER_ZERO: 1280 qed_aio_write_alloc(acb, len); 1281 break; 1282 1283 default: 1284 qed_aio_complete(acb, ret); 1285 break; 1286 } 1287 } 1288 1289 /** 1290 * Read data cluster 1291 * 1292 * @opaque: Read request 1293 * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2, QED_CLUSTER_L1, 1294 * or -errno 1295 * @offset: Cluster offset in bytes 1296 * @len: Length in bytes 1297 * 1298 * Callback from qed_find_cluster(). 1299 */ 1300 static void qed_aio_read_data(void *opaque, int ret, 1301 uint64_t offset, size_t len) 1302 { 1303 QEDAIOCB *acb = opaque; 1304 BDRVQEDState *s = acb_to_s(acb); 1305 BlockDriverState *bs = acb->common.bs; 1306 1307 /* Adjust offset into cluster */ 1308 offset += qed_offset_into_cluster(s, acb->cur_pos); 1309 1310 trace_qed_aio_read_data(s, acb, ret, offset, len); 1311 1312 if (ret < 0) { 1313 goto err; 1314 } 1315 1316 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len); 1317 1318 /* Handle zero cluster and backing file reads */ 1319 if (ret == QED_CLUSTER_ZERO) { 1320 qemu_iovec_memset(&acb->cur_qiov, 0, 0, acb->cur_qiov.size); 1321 qed_aio_next_io(acb, 0); 1322 return; 1323 } else if (ret != QED_CLUSTER_FOUND) { 1324 qed_read_backing_file(s, acb->cur_pos, &acb->cur_qiov, 1325 &acb->backing_qiov, qed_aio_next_io, acb); 1326 return; 1327 } 1328 1329 BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO); 1330 bdrv_aio_readv(bs->file->bs, offset / BDRV_SECTOR_SIZE, 1331 &acb->cur_qiov, acb->cur_qiov.size / BDRV_SECTOR_SIZE, 1332 qed_aio_next_io, acb); 1333 return; 1334 1335 err: 1336 qed_aio_complete(acb, ret); 1337 } 1338 1339 /** 1340 * Begin next I/O or complete the request 1341 */ 1342 static void qed_aio_next_io(void *opaque, int ret) 1343 { 1344 QEDAIOCB *acb = opaque; 1345 BDRVQEDState *s = acb_to_s(acb); 1346 QEDFindClusterFunc *io_fn = (acb->flags & QED_AIOCB_WRITE) ? 1347 qed_aio_write_data : qed_aio_read_data; 1348 1349 trace_qed_aio_next_io(s, acb, ret, acb->cur_pos + acb->cur_qiov.size); 1350 1351 if (acb->backing_qiov) { 1352 qemu_iovec_destroy(acb->backing_qiov); 1353 g_free(acb->backing_qiov); 1354 acb->backing_qiov = NULL; 1355 } 1356 1357 /* Handle I/O error */ 1358 if (ret) { 1359 qed_aio_complete(acb, ret); 1360 return; 1361 } 1362 1363 acb->qiov_offset += acb->cur_qiov.size; 1364 acb->cur_pos += acb->cur_qiov.size; 1365 qemu_iovec_reset(&acb->cur_qiov); 1366 1367 /* Complete request */ 1368 if (acb->cur_pos >= acb->end_pos) { 1369 qed_aio_complete(acb, 0); 1370 return; 1371 } 1372 1373 /* Find next cluster and start I/O */ 1374 qed_find_cluster(s, &acb->request, 1375 acb->cur_pos, acb->end_pos - acb->cur_pos, 1376 io_fn, acb); 1377 } 1378 1379 static BlockAIOCB *qed_aio_setup(BlockDriverState *bs, 1380 int64_t sector_num, 1381 QEMUIOVector *qiov, int nb_sectors, 1382 BlockCompletionFunc *cb, 1383 void *opaque, int flags) 1384 { 1385 QEDAIOCB *acb = qemu_aio_get(&qed_aiocb_info, bs, cb, opaque); 1386 1387 trace_qed_aio_setup(bs->opaque, acb, sector_num, nb_sectors, 1388 opaque, flags); 1389 1390 acb->flags = flags; 1391 acb->qiov = qiov; 1392 acb->qiov_offset = 0; 1393 acb->cur_pos = (uint64_t)sector_num * BDRV_SECTOR_SIZE; 1394 acb->end_pos = acb->cur_pos + nb_sectors * BDRV_SECTOR_SIZE; 1395 acb->backing_qiov = NULL; 1396 acb->request.l2_table = NULL; 1397 qemu_iovec_init(&acb->cur_qiov, qiov->niov); 1398 1399 /* Start request */ 1400 qed_aio_next_io(acb, 0); 1401 return &acb->common; 1402 } 1403 1404 static BlockAIOCB *bdrv_qed_aio_readv(BlockDriverState *bs, 1405 int64_t sector_num, 1406 QEMUIOVector *qiov, int nb_sectors, 1407 BlockCompletionFunc *cb, 1408 void *opaque) 1409 { 1410 return qed_aio_setup(bs, sector_num, qiov, nb_sectors, cb, opaque, 0); 1411 } 1412 1413 static BlockAIOCB *bdrv_qed_aio_writev(BlockDriverState *bs, 1414 int64_t sector_num, 1415 QEMUIOVector *qiov, int nb_sectors, 1416 BlockCompletionFunc *cb, 1417 void *opaque) 1418 { 1419 return qed_aio_setup(bs, sector_num, qiov, nb_sectors, cb, 1420 opaque, QED_AIOCB_WRITE); 1421 } 1422 1423 typedef struct { 1424 Coroutine *co; 1425 int ret; 1426 bool done; 1427 } QEDWriteZeroesCB; 1428 1429 static void coroutine_fn qed_co_write_zeroes_cb(void *opaque, int ret) 1430 { 1431 QEDWriteZeroesCB *cb = opaque; 1432 1433 cb->done = true; 1434 cb->ret = ret; 1435 if (cb->co) { 1436 qemu_coroutine_enter(cb->co, NULL); 1437 } 1438 } 1439 1440 static int coroutine_fn bdrv_qed_co_write_zeroes(BlockDriverState *bs, 1441 int64_t sector_num, 1442 int nb_sectors, 1443 BdrvRequestFlags flags) 1444 { 1445 BlockAIOCB *blockacb; 1446 BDRVQEDState *s = bs->opaque; 1447 QEDWriteZeroesCB cb = { .done = false }; 1448 QEMUIOVector qiov; 1449 struct iovec iov; 1450 1451 /* Refuse if there are untouched backing file sectors */ 1452 if (bs->backing) { 1453 if (qed_offset_into_cluster(s, sector_num * BDRV_SECTOR_SIZE) != 0) { 1454 return -ENOTSUP; 1455 } 1456 if (qed_offset_into_cluster(s, nb_sectors * BDRV_SECTOR_SIZE) != 0) { 1457 return -ENOTSUP; 1458 } 1459 } 1460 1461 /* Zero writes start without an I/O buffer. If a buffer becomes necessary 1462 * then it will be allocated during request processing. 1463 */ 1464 iov.iov_base = NULL, 1465 iov.iov_len = nb_sectors * BDRV_SECTOR_SIZE, 1466 1467 qemu_iovec_init_external(&qiov, &iov, 1); 1468 blockacb = qed_aio_setup(bs, sector_num, &qiov, nb_sectors, 1469 qed_co_write_zeroes_cb, &cb, 1470 QED_AIOCB_WRITE | QED_AIOCB_ZERO); 1471 if (!blockacb) { 1472 return -EIO; 1473 } 1474 if (!cb.done) { 1475 cb.co = qemu_coroutine_self(); 1476 qemu_coroutine_yield(); 1477 } 1478 assert(cb.done); 1479 return cb.ret; 1480 } 1481 1482 static int bdrv_qed_truncate(BlockDriverState *bs, int64_t offset) 1483 { 1484 BDRVQEDState *s = bs->opaque; 1485 uint64_t old_image_size; 1486 int ret; 1487 1488 if (!qed_is_image_size_valid(offset, s->header.cluster_size, 1489 s->header.table_size)) { 1490 return -EINVAL; 1491 } 1492 1493 /* Shrinking is currently not supported */ 1494 if ((uint64_t)offset < s->header.image_size) { 1495 return -ENOTSUP; 1496 } 1497 1498 old_image_size = s->header.image_size; 1499 s->header.image_size = offset; 1500 ret = qed_write_header_sync(s); 1501 if (ret < 0) { 1502 s->header.image_size = old_image_size; 1503 } 1504 return ret; 1505 } 1506 1507 static int64_t bdrv_qed_getlength(BlockDriverState *bs) 1508 { 1509 BDRVQEDState *s = bs->opaque; 1510 return s->header.image_size; 1511 } 1512 1513 static int bdrv_qed_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) 1514 { 1515 BDRVQEDState *s = bs->opaque; 1516 1517 memset(bdi, 0, sizeof(*bdi)); 1518 bdi->cluster_size = s->header.cluster_size; 1519 bdi->is_dirty = s->header.features & QED_F_NEED_CHECK; 1520 bdi->unallocated_blocks_are_zero = true; 1521 bdi->can_write_zeroes_with_unmap = true; 1522 return 0; 1523 } 1524 1525 static int bdrv_qed_change_backing_file(BlockDriverState *bs, 1526 const char *backing_file, 1527 const char *backing_fmt) 1528 { 1529 BDRVQEDState *s = bs->opaque; 1530 QEDHeader new_header, le_header; 1531 void *buffer; 1532 size_t buffer_len, backing_file_len; 1533 int ret; 1534 1535 /* Refuse to set backing filename if unknown compat feature bits are 1536 * active. If the image uses an unknown compat feature then we may not 1537 * know the layout of data following the header structure and cannot safely 1538 * add a new string. 1539 */ 1540 if (backing_file && (s->header.compat_features & 1541 ~QED_COMPAT_FEATURE_MASK)) { 1542 return -ENOTSUP; 1543 } 1544 1545 memcpy(&new_header, &s->header, sizeof(new_header)); 1546 1547 new_header.features &= ~(QED_F_BACKING_FILE | 1548 QED_F_BACKING_FORMAT_NO_PROBE); 1549 1550 /* Adjust feature flags */ 1551 if (backing_file) { 1552 new_header.features |= QED_F_BACKING_FILE; 1553 1554 if (qed_fmt_is_raw(backing_fmt)) { 1555 new_header.features |= QED_F_BACKING_FORMAT_NO_PROBE; 1556 } 1557 } 1558 1559 /* Calculate new header size */ 1560 backing_file_len = 0; 1561 1562 if (backing_file) { 1563 backing_file_len = strlen(backing_file); 1564 } 1565 1566 buffer_len = sizeof(new_header); 1567 new_header.backing_filename_offset = buffer_len; 1568 new_header.backing_filename_size = backing_file_len; 1569 buffer_len += backing_file_len; 1570 1571 /* Make sure we can rewrite header without failing */ 1572 if (buffer_len > new_header.header_size * new_header.cluster_size) { 1573 return -ENOSPC; 1574 } 1575 1576 /* Prepare new header */ 1577 buffer = g_malloc(buffer_len); 1578 1579 qed_header_cpu_to_le(&new_header, &le_header); 1580 memcpy(buffer, &le_header, sizeof(le_header)); 1581 buffer_len = sizeof(le_header); 1582 1583 if (backing_file) { 1584 memcpy(buffer + buffer_len, backing_file, backing_file_len); 1585 buffer_len += backing_file_len; 1586 } 1587 1588 /* Write new header */ 1589 ret = bdrv_pwrite_sync(bs->file->bs, 0, buffer, buffer_len); 1590 g_free(buffer); 1591 if (ret == 0) { 1592 memcpy(&s->header, &new_header, sizeof(new_header)); 1593 } 1594 return ret; 1595 } 1596 1597 static void bdrv_qed_invalidate_cache(BlockDriverState *bs, Error **errp) 1598 { 1599 BDRVQEDState *s = bs->opaque; 1600 Error *local_err = NULL; 1601 int ret; 1602 1603 bdrv_qed_close(bs); 1604 1605 bdrv_invalidate_cache(bs->file->bs, &local_err); 1606 if (local_err) { 1607 error_propagate(errp, local_err); 1608 return; 1609 } 1610 1611 memset(s, 0, sizeof(BDRVQEDState)); 1612 ret = bdrv_qed_open(bs, NULL, bs->open_flags, &local_err); 1613 if (local_err) { 1614 error_setg(errp, "Could not reopen qed layer: %s", 1615 error_get_pretty(local_err)); 1616 error_free(local_err); 1617 return; 1618 } else if (ret < 0) { 1619 error_setg_errno(errp, -ret, "Could not reopen qed layer"); 1620 return; 1621 } 1622 } 1623 1624 static int bdrv_qed_check(BlockDriverState *bs, BdrvCheckResult *result, 1625 BdrvCheckMode fix) 1626 { 1627 BDRVQEDState *s = bs->opaque; 1628 1629 return qed_check(s, result, !!fix); 1630 } 1631 1632 static QemuOptsList qed_create_opts = { 1633 .name = "qed-create-opts", 1634 .head = QTAILQ_HEAD_INITIALIZER(qed_create_opts.head), 1635 .desc = { 1636 { 1637 .name = BLOCK_OPT_SIZE, 1638 .type = QEMU_OPT_SIZE, 1639 .help = "Virtual disk size" 1640 }, 1641 { 1642 .name = BLOCK_OPT_BACKING_FILE, 1643 .type = QEMU_OPT_STRING, 1644 .help = "File name of a base image" 1645 }, 1646 { 1647 .name = BLOCK_OPT_BACKING_FMT, 1648 .type = QEMU_OPT_STRING, 1649 .help = "Image format of the base image" 1650 }, 1651 { 1652 .name = BLOCK_OPT_CLUSTER_SIZE, 1653 .type = QEMU_OPT_SIZE, 1654 .help = "Cluster size (in bytes)", 1655 .def_value_str = stringify(QED_DEFAULT_CLUSTER_SIZE) 1656 }, 1657 { 1658 .name = BLOCK_OPT_TABLE_SIZE, 1659 .type = QEMU_OPT_SIZE, 1660 .help = "L1/L2 table size (in clusters)" 1661 }, 1662 { /* end of list */ } 1663 } 1664 }; 1665 1666 static BlockDriver bdrv_qed = { 1667 .format_name = "qed", 1668 .instance_size = sizeof(BDRVQEDState), 1669 .create_opts = &qed_create_opts, 1670 .supports_backing = true, 1671 1672 .bdrv_probe = bdrv_qed_probe, 1673 .bdrv_open = bdrv_qed_open, 1674 .bdrv_close = bdrv_qed_close, 1675 .bdrv_reopen_prepare = bdrv_qed_reopen_prepare, 1676 .bdrv_create = bdrv_qed_create, 1677 .bdrv_has_zero_init = bdrv_has_zero_init_1, 1678 .bdrv_co_get_block_status = bdrv_qed_co_get_block_status, 1679 .bdrv_aio_readv = bdrv_qed_aio_readv, 1680 .bdrv_aio_writev = bdrv_qed_aio_writev, 1681 .bdrv_co_write_zeroes = bdrv_qed_co_write_zeroes, 1682 .bdrv_truncate = bdrv_qed_truncate, 1683 .bdrv_getlength = bdrv_qed_getlength, 1684 .bdrv_get_info = bdrv_qed_get_info, 1685 .bdrv_refresh_limits = bdrv_qed_refresh_limits, 1686 .bdrv_change_backing_file = bdrv_qed_change_backing_file, 1687 .bdrv_invalidate_cache = bdrv_qed_invalidate_cache, 1688 .bdrv_check = bdrv_qed_check, 1689 .bdrv_detach_aio_context = bdrv_qed_detach_aio_context, 1690 .bdrv_attach_aio_context = bdrv_qed_attach_aio_context, 1691 .bdrv_drain = bdrv_qed_drain, 1692 }; 1693 1694 static void bdrv_qed_init(void) 1695 { 1696 bdrv_register(&bdrv_qed); 1697 } 1698 1699 block_init(bdrv_qed_init); 1700