1 /* 2 * QEMU Enhanced Disk Format 3 * 4 * Copyright IBM, Corp. 2010 5 * 6 * Authors: 7 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com> 8 * Anthony Liguori <aliguori@us.ibm.com> 9 * 10 * This work is licensed under the terms of the GNU LGPL, version 2 or later. 11 * See the COPYING.LIB file in the top-level directory. 12 * 13 */ 14 15 #include "qemu/osdep.h" 16 #include "block/qdict.h" 17 #include "qapi/error.h" 18 #include "qemu/timer.h" 19 #include "qemu/bswap.h" 20 #include "qemu/option.h" 21 #include "trace.h" 22 #include "qed.h" 23 #include "sysemu/block-backend.h" 24 #include "qapi/qmp/qdict.h" 25 #include "qapi/qobject-input-visitor.h" 26 #include "qapi/qapi-visit-block-core.h" 27 28 static QemuOptsList qed_create_opts; 29 30 static int bdrv_qed_probe(const uint8_t *buf, int buf_size, 31 const char *filename) 32 { 33 const QEDHeader *header = (const QEDHeader *)buf; 34 35 if (buf_size < sizeof(*header)) { 36 return 0; 37 } 38 if (le32_to_cpu(header->magic) != QED_MAGIC) { 39 return 0; 40 } 41 return 100; 42 } 43 44 /** 45 * Check whether an image format is raw 46 * 47 * @fmt: Backing file format, may be NULL 48 */ 49 static bool qed_fmt_is_raw(const char *fmt) 50 { 51 return fmt && strcmp(fmt, "raw") == 0; 52 } 53 54 static void qed_header_le_to_cpu(const QEDHeader *le, QEDHeader *cpu) 55 { 56 cpu->magic = le32_to_cpu(le->magic); 57 cpu->cluster_size = le32_to_cpu(le->cluster_size); 58 cpu->table_size = le32_to_cpu(le->table_size); 59 cpu->header_size = le32_to_cpu(le->header_size); 60 cpu->features = le64_to_cpu(le->features); 61 cpu->compat_features = le64_to_cpu(le->compat_features); 62 cpu->autoclear_features = le64_to_cpu(le->autoclear_features); 63 cpu->l1_table_offset = le64_to_cpu(le->l1_table_offset); 64 cpu->image_size = le64_to_cpu(le->image_size); 65 cpu->backing_filename_offset = le32_to_cpu(le->backing_filename_offset); 66 cpu->backing_filename_size = le32_to_cpu(le->backing_filename_size); 67 } 68 69 static void qed_header_cpu_to_le(const QEDHeader *cpu, QEDHeader *le) 70 { 71 le->magic = cpu_to_le32(cpu->magic); 72 le->cluster_size = cpu_to_le32(cpu->cluster_size); 73 le->table_size = cpu_to_le32(cpu->table_size); 74 le->header_size = cpu_to_le32(cpu->header_size); 75 le->features = cpu_to_le64(cpu->features); 76 le->compat_features = cpu_to_le64(cpu->compat_features); 77 le->autoclear_features = cpu_to_le64(cpu->autoclear_features); 78 le->l1_table_offset = cpu_to_le64(cpu->l1_table_offset); 79 le->image_size = cpu_to_le64(cpu->image_size); 80 le->backing_filename_offset = cpu_to_le32(cpu->backing_filename_offset); 81 le->backing_filename_size = cpu_to_le32(cpu->backing_filename_size); 82 } 83 84 int qed_write_header_sync(BDRVQEDState *s) 85 { 86 QEDHeader le; 87 int ret; 88 89 qed_header_cpu_to_le(&s->header, &le); 90 ret = bdrv_pwrite(s->bs->file, 0, &le, sizeof(le)); 91 if (ret != sizeof(le)) { 92 return ret; 93 } 94 return 0; 95 } 96 97 /** 98 * Update header in-place (does not rewrite backing filename or other strings) 99 * 100 * This function only updates known header fields in-place and does not affect 101 * extra data after the QED header. 102 * 103 * No new allocating reqs can start while this function runs. 104 */ 105 static int coroutine_fn qed_write_header(BDRVQEDState *s) 106 { 107 /* We must write full sectors for O_DIRECT but cannot necessarily generate 108 * the data following the header if an unrecognized compat feature is 109 * active. Therefore, first read the sectors containing the header, update 110 * them, and write back. 111 */ 112 113 int nsectors = DIV_ROUND_UP(sizeof(QEDHeader), BDRV_SECTOR_SIZE); 114 size_t len = nsectors * BDRV_SECTOR_SIZE; 115 uint8_t *buf; 116 QEMUIOVector qiov; 117 int ret; 118 119 assert(s->allocating_acb || s->allocating_write_reqs_plugged); 120 121 buf = qemu_blockalign(s->bs, len); 122 qemu_iovec_init_buf(&qiov, buf, len); 123 124 ret = bdrv_co_preadv(s->bs->file, 0, qiov.size, &qiov, 0); 125 if (ret < 0) { 126 goto out; 127 } 128 129 /* Update header */ 130 qed_header_cpu_to_le(&s->header, (QEDHeader *) buf); 131 132 ret = bdrv_co_pwritev(s->bs->file, 0, qiov.size, &qiov, 0); 133 if (ret < 0) { 134 goto out; 135 } 136 137 ret = 0; 138 out: 139 qemu_vfree(buf); 140 return ret; 141 } 142 143 static uint64_t qed_max_image_size(uint32_t cluster_size, uint32_t table_size) 144 { 145 uint64_t table_entries; 146 uint64_t l2_size; 147 148 table_entries = (table_size * cluster_size) / sizeof(uint64_t); 149 l2_size = table_entries * cluster_size; 150 151 return l2_size * table_entries; 152 } 153 154 static bool qed_is_cluster_size_valid(uint32_t cluster_size) 155 { 156 if (cluster_size < QED_MIN_CLUSTER_SIZE || 157 cluster_size > QED_MAX_CLUSTER_SIZE) { 158 return false; 159 } 160 if (cluster_size & (cluster_size - 1)) { 161 return false; /* not power of 2 */ 162 } 163 return true; 164 } 165 166 static bool qed_is_table_size_valid(uint32_t table_size) 167 { 168 if (table_size < QED_MIN_TABLE_SIZE || 169 table_size > QED_MAX_TABLE_SIZE) { 170 return false; 171 } 172 if (table_size & (table_size - 1)) { 173 return false; /* not power of 2 */ 174 } 175 return true; 176 } 177 178 static bool qed_is_image_size_valid(uint64_t image_size, uint32_t cluster_size, 179 uint32_t table_size) 180 { 181 if (image_size % BDRV_SECTOR_SIZE != 0) { 182 return false; /* not multiple of sector size */ 183 } 184 if (image_size > qed_max_image_size(cluster_size, table_size)) { 185 return false; /* image is too large */ 186 } 187 return true; 188 } 189 190 /** 191 * Read a string of known length from the image file 192 * 193 * @file: Image file 194 * @offset: File offset to start of string, in bytes 195 * @n: String length in bytes 196 * @buf: Destination buffer 197 * @buflen: Destination buffer length in bytes 198 * @ret: 0 on success, -errno on failure 199 * 200 * The string is NUL-terminated. 201 */ 202 static int qed_read_string(BdrvChild *file, uint64_t offset, size_t n, 203 char *buf, size_t buflen) 204 { 205 int ret; 206 if (n >= buflen) { 207 return -EINVAL; 208 } 209 ret = bdrv_pread(file, offset, buf, n); 210 if (ret < 0) { 211 return ret; 212 } 213 buf[n] = '\0'; 214 return 0; 215 } 216 217 /** 218 * Allocate new clusters 219 * 220 * @s: QED state 221 * @n: Number of contiguous clusters to allocate 222 * @ret: Offset of first allocated cluster 223 * 224 * This function only produces the offset where the new clusters should be 225 * written. It updates BDRVQEDState but does not make any changes to the image 226 * file. 227 * 228 * Called with table_lock held. 229 */ 230 static uint64_t qed_alloc_clusters(BDRVQEDState *s, unsigned int n) 231 { 232 uint64_t offset = s->file_size; 233 s->file_size += n * s->header.cluster_size; 234 return offset; 235 } 236 237 QEDTable *qed_alloc_table(BDRVQEDState *s) 238 { 239 /* Honor O_DIRECT memory alignment requirements */ 240 return qemu_blockalign(s->bs, 241 s->header.cluster_size * s->header.table_size); 242 } 243 244 /** 245 * Allocate a new zeroed L2 table 246 * 247 * Called with table_lock held. 248 */ 249 static CachedL2Table *qed_new_l2_table(BDRVQEDState *s) 250 { 251 CachedL2Table *l2_table = qed_alloc_l2_cache_entry(&s->l2_cache); 252 253 l2_table->table = qed_alloc_table(s); 254 l2_table->offset = qed_alloc_clusters(s, s->header.table_size); 255 256 memset(l2_table->table->offsets, 0, 257 s->header.cluster_size * s->header.table_size); 258 return l2_table; 259 } 260 261 static bool qed_plug_allocating_write_reqs(BDRVQEDState *s) 262 { 263 qemu_co_mutex_lock(&s->table_lock); 264 265 /* No reentrancy is allowed. */ 266 assert(!s->allocating_write_reqs_plugged); 267 if (s->allocating_acb != NULL) { 268 /* Another allocating write came concurrently. This cannot happen 269 * from bdrv_qed_co_drain_begin, but it can happen when the timer runs. 270 */ 271 qemu_co_mutex_unlock(&s->table_lock); 272 return false; 273 } 274 275 s->allocating_write_reqs_plugged = true; 276 qemu_co_mutex_unlock(&s->table_lock); 277 return true; 278 } 279 280 static void qed_unplug_allocating_write_reqs(BDRVQEDState *s) 281 { 282 qemu_co_mutex_lock(&s->table_lock); 283 assert(s->allocating_write_reqs_plugged); 284 s->allocating_write_reqs_plugged = false; 285 qemu_co_queue_next(&s->allocating_write_reqs); 286 qemu_co_mutex_unlock(&s->table_lock); 287 } 288 289 static void coroutine_fn qed_need_check_timer_entry(void *opaque) 290 { 291 BDRVQEDState *s = opaque; 292 int ret; 293 294 trace_qed_need_check_timer_cb(s); 295 296 if (!qed_plug_allocating_write_reqs(s)) { 297 return; 298 } 299 300 /* Ensure writes are on disk before clearing flag */ 301 ret = bdrv_co_flush(s->bs->file->bs); 302 if (ret < 0) { 303 qed_unplug_allocating_write_reqs(s); 304 return; 305 } 306 307 s->header.features &= ~QED_F_NEED_CHECK; 308 ret = qed_write_header(s); 309 (void) ret; 310 311 qed_unplug_allocating_write_reqs(s); 312 313 ret = bdrv_co_flush(s->bs); 314 (void) ret; 315 } 316 317 static void qed_need_check_timer_cb(void *opaque) 318 { 319 Coroutine *co = qemu_coroutine_create(qed_need_check_timer_entry, opaque); 320 qemu_coroutine_enter(co); 321 } 322 323 static void qed_start_need_check_timer(BDRVQEDState *s) 324 { 325 trace_qed_start_need_check_timer(s); 326 327 /* Use QEMU_CLOCK_VIRTUAL so we don't alter the image file while suspended for 328 * migration. 329 */ 330 timer_mod(s->need_check_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 331 NANOSECONDS_PER_SECOND * QED_NEED_CHECK_TIMEOUT); 332 } 333 334 /* It's okay to call this multiple times or when no timer is started */ 335 static void qed_cancel_need_check_timer(BDRVQEDState *s) 336 { 337 trace_qed_cancel_need_check_timer(s); 338 timer_del(s->need_check_timer); 339 } 340 341 static void bdrv_qed_detach_aio_context(BlockDriverState *bs) 342 { 343 BDRVQEDState *s = bs->opaque; 344 345 qed_cancel_need_check_timer(s); 346 timer_free(s->need_check_timer); 347 } 348 349 static void bdrv_qed_attach_aio_context(BlockDriverState *bs, 350 AioContext *new_context) 351 { 352 BDRVQEDState *s = bs->opaque; 353 354 s->need_check_timer = aio_timer_new(new_context, 355 QEMU_CLOCK_VIRTUAL, SCALE_NS, 356 qed_need_check_timer_cb, s); 357 if (s->header.features & QED_F_NEED_CHECK) { 358 qed_start_need_check_timer(s); 359 } 360 } 361 362 static void coroutine_fn bdrv_qed_co_drain_begin(BlockDriverState *bs) 363 { 364 BDRVQEDState *s = bs->opaque; 365 366 /* Fire the timer immediately in order to start doing I/O as soon as the 367 * header is flushed. 368 */ 369 if (s->need_check_timer && timer_pending(s->need_check_timer)) { 370 qed_cancel_need_check_timer(s); 371 qed_need_check_timer_entry(s); 372 } 373 } 374 375 static void bdrv_qed_init_state(BlockDriverState *bs) 376 { 377 BDRVQEDState *s = bs->opaque; 378 379 memset(s, 0, sizeof(BDRVQEDState)); 380 s->bs = bs; 381 qemu_co_mutex_init(&s->table_lock); 382 qemu_co_queue_init(&s->allocating_write_reqs); 383 } 384 385 /* Called with table_lock held. */ 386 static int coroutine_fn bdrv_qed_do_open(BlockDriverState *bs, QDict *options, 387 int flags, Error **errp) 388 { 389 BDRVQEDState *s = bs->opaque; 390 QEDHeader le_header; 391 int64_t file_size; 392 int ret; 393 394 ret = bdrv_pread(bs->file, 0, &le_header, sizeof(le_header)); 395 if (ret < 0) { 396 return ret; 397 } 398 qed_header_le_to_cpu(&le_header, &s->header); 399 400 if (s->header.magic != QED_MAGIC) { 401 error_setg(errp, "Image not in QED format"); 402 return -EINVAL; 403 } 404 if (s->header.features & ~QED_FEATURE_MASK) { 405 /* image uses unsupported feature bits */ 406 error_setg(errp, "Unsupported QED features: %" PRIx64, 407 s->header.features & ~QED_FEATURE_MASK); 408 return -ENOTSUP; 409 } 410 if (!qed_is_cluster_size_valid(s->header.cluster_size)) { 411 return -EINVAL; 412 } 413 414 /* Round down file size to the last cluster */ 415 file_size = bdrv_getlength(bs->file->bs); 416 if (file_size < 0) { 417 return file_size; 418 } 419 s->file_size = qed_start_of_cluster(s, file_size); 420 421 if (!qed_is_table_size_valid(s->header.table_size)) { 422 return -EINVAL; 423 } 424 if (!qed_is_image_size_valid(s->header.image_size, 425 s->header.cluster_size, 426 s->header.table_size)) { 427 return -EINVAL; 428 } 429 if (!qed_check_table_offset(s, s->header.l1_table_offset)) { 430 return -EINVAL; 431 } 432 433 s->table_nelems = (s->header.cluster_size * s->header.table_size) / 434 sizeof(uint64_t); 435 s->l2_shift = ctz32(s->header.cluster_size); 436 s->l2_mask = s->table_nelems - 1; 437 s->l1_shift = s->l2_shift + ctz32(s->table_nelems); 438 439 /* Header size calculation must not overflow uint32_t */ 440 if (s->header.header_size > UINT32_MAX / s->header.cluster_size) { 441 return -EINVAL; 442 } 443 444 if ((s->header.features & QED_F_BACKING_FILE)) { 445 if ((uint64_t)s->header.backing_filename_offset + 446 s->header.backing_filename_size > 447 s->header.cluster_size * s->header.header_size) { 448 return -EINVAL; 449 } 450 451 ret = qed_read_string(bs->file, s->header.backing_filename_offset, 452 s->header.backing_filename_size, bs->backing_file, 453 sizeof(bs->backing_file)); 454 if (ret < 0) { 455 return ret; 456 } 457 458 if (s->header.features & QED_F_BACKING_FORMAT_NO_PROBE) { 459 pstrcpy(bs->backing_format, sizeof(bs->backing_format), "raw"); 460 } 461 } 462 463 /* Reset unknown autoclear feature bits. This is a backwards 464 * compatibility mechanism that allows images to be opened by older 465 * programs, which "knock out" unknown feature bits. When an image is 466 * opened by a newer program again it can detect that the autoclear 467 * feature is no longer valid. 468 */ 469 if ((s->header.autoclear_features & ~QED_AUTOCLEAR_FEATURE_MASK) != 0 && 470 !bdrv_is_read_only(bs->file->bs) && !(flags & BDRV_O_INACTIVE)) { 471 s->header.autoclear_features &= QED_AUTOCLEAR_FEATURE_MASK; 472 473 ret = qed_write_header_sync(s); 474 if (ret) { 475 return ret; 476 } 477 478 /* From here on only known autoclear feature bits are valid */ 479 bdrv_flush(bs->file->bs); 480 } 481 482 s->l1_table = qed_alloc_table(s); 483 qed_init_l2_cache(&s->l2_cache); 484 485 ret = qed_read_l1_table_sync(s); 486 if (ret) { 487 goto out; 488 } 489 490 /* If image was not closed cleanly, check consistency */ 491 if (!(flags & BDRV_O_CHECK) && (s->header.features & QED_F_NEED_CHECK)) { 492 /* Read-only images cannot be fixed. There is no risk of corruption 493 * since write operations are not possible. Therefore, allow 494 * potentially inconsistent images to be opened read-only. This can 495 * aid data recovery from an otherwise inconsistent image. 496 */ 497 if (!bdrv_is_read_only(bs->file->bs) && 498 !(flags & BDRV_O_INACTIVE)) { 499 BdrvCheckResult result = {0}; 500 501 ret = qed_check(s, &result, true); 502 if (ret) { 503 goto out; 504 } 505 } 506 } 507 508 bdrv_qed_attach_aio_context(bs, bdrv_get_aio_context(bs)); 509 510 out: 511 if (ret) { 512 qed_free_l2_cache(&s->l2_cache); 513 qemu_vfree(s->l1_table); 514 } 515 return ret; 516 } 517 518 typedef struct QEDOpenCo { 519 BlockDriverState *bs; 520 QDict *options; 521 int flags; 522 Error **errp; 523 int ret; 524 } QEDOpenCo; 525 526 static void coroutine_fn bdrv_qed_open_entry(void *opaque) 527 { 528 QEDOpenCo *qoc = opaque; 529 BDRVQEDState *s = qoc->bs->opaque; 530 531 qemu_co_mutex_lock(&s->table_lock); 532 qoc->ret = bdrv_qed_do_open(qoc->bs, qoc->options, qoc->flags, qoc->errp); 533 qemu_co_mutex_unlock(&s->table_lock); 534 } 535 536 static int bdrv_qed_open(BlockDriverState *bs, QDict *options, int flags, 537 Error **errp) 538 { 539 QEDOpenCo qoc = { 540 .bs = bs, 541 .options = options, 542 .flags = flags, 543 .errp = errp, 544 .ret = -EINPROGRESS 545 }; 546 547 bs->file = bdrv_open_child(NULL, options, "file", bs, &child_file, 548 false, errp); 549 if (!bs->file) { 550 return -EINVAL; 551 } 552 553 bdrv_qed_init_state(bs); 554 if (qemu_in_coroutine()) { 555 bdrv_qed_open_entry(&qoc); 556 } else { 557 assert(qemu_get_current_aio_context() == qemu_get_aio_context()); 558 qemu_coroutine_enter(qemu_coroutine_create(bdrv_qed_open_entry, &qoc)); 559 BDRV_POLL_WHILE(bs, qoc.ret == -EINPROGRESS); 560 } 561 BDRV_POLL_WHILE(bs, qoc.ret == -EINPROGRESS); 562 return qoc.ret; 563 } 564 565 static void bdrv_qed_refresh_limits(BlockDriverState *bs, Error **errp) 566 { 567 BDRVQEDState *s = bs->opaque; 568 569 bs->bl.pwrite_zeroes_alignment = s->header.cluster_size; 570 } 571 572 /* We have nothing to do for QED reopen, stubs just return 573 * success */ 574 static int bdrv_qed_reopen_prepare(BDRVReopenState *state, 575 BlockReopenQueue *queue, Error **errp) 576 { 577 return 0; 578 } 579 580 static void bdrv_qed_close(BlockDriverState *bs) 581 { 582 BDRVQEDState *s = bs->opaque; 583 584 bdrv_qed_detach_aio_context(bs); 585 586 /* Ensure writes reach stable storage */ 587 bdrv_flush(bs->file->bs); 588 589 /* Clean shutdown, no check required on next open */ 590 if (s->header.features & QED_F_NEED_CHECK) { 591 s->header.features &= ~QED_F_NEED_CHECK; 592 qed_write_header_sync(s); 593 } 594 595 qed_free_l2_cache(&s->l2_cache); 596 qemu_vfree(s->l1_table); 597 } 598 599 static int coroutine_fn bdrv_qed_co_create(BlockdevCreateOptions *opts, 600 Error **errp) 601 { 602 BlockdevCreateOptionsQed *qed_opts; 603 BlockBackend *blk = NULL; 604 BlockDriverState *bs = NULL; 605 606 QEDHeader header; 607 QEDHeader le_header; 608 uint8_t *l1_table = NULL; 609 size_t l1_size; 610 int ret = 0; 611 612 assert(opts->driver == BLOCKDEV_DRIVER_QED); 613 qed_opts = &opts->u.qed; 614 615 /* Validate options and set default values */ 616 if (!qed_opts->has_cluster_size) { 617 qed_opts->cluster_size = QED_DEFAULT_CLUSTER_SIZE; 618 } 619 if (!qed_opts->has_table_size) { 620 qed_opts->table_size = QED_DEFAULT_TABLE_SIZE; 621 } 622 623 if (!qed_is_cluster_size_valid(qed_opts->cluster_size)) { 624 error_setg(errp, "QED cluster size must be within range [%u, %u] " 625 "and power of 2", 626 QED_MIN_CLUSTER_SIZE, QED_MAX_CLUSTER_SIZE); 627 return -EINVAL; 628 } 629 if (!qed_is_table_size_valid(qed_opts->table_size)) { 630 error_setg(errp, "QED table size must be within range [%u, %u] " 631 "and power of 2", 632 QED_MIN_TABLE_SIZE, QED_MAX_TABLE_SIZE); 633 return -EINVAL; 634 } 635 if (!qed_is_image_size_valid(qed_opts->size, qed_opts->cluster_size, 636 qed_opts->table_size)) 637 { 638 error_setg(errp, "QED image size must be a non-zero multiple of " 639 "cluster size and less than %" PRIu64 " bytes", 640 qed_max_image_size(qed_opts->cluster_size, 641 qed_opts->table_size)); 642 return -EINVAL; 643 } 644 645 /* Create BlockBackend to write to the image */ 646 bs = bdrv_open_blockdev_ref(qed_opts->file, errp); 647 if (bs == NULL) { 648 return -EIO; 649 } 650 651 blk = blk_new(BLK_PERM_WRITE | BLK_PERM_RESIZE, BLK_PERM_ALL); 652 ret = blk_insert_bs(blk, bs, errp); 653 if (ret < 0) { 654 goto out; 655 } 656 blk_set_allow_write_beyond_eof(blk, true); 657 658 /* Prepare image format */ 659 header = (QEDHeader) { 660 .magic = QED_MAGIC, 661 .cluster_size = qed_opts->cluster_size, 662 .table_size = qed_opts->table_size, 663 .header_size = 1, 664 .features = 0, 665 .compat_features = 0, 666 .l1_table_offset = qed_opts->cluster_size, 667 .image_size = qed_opts->size, 668 }; 669 670 l1_size = header.cluster_size * header.table_size; 671 672 /* File must start empty and grow, check truncate is supported */ 673 ret = blk_truncate(blk, 0, PREALLOC_MODE_OFF, errp); 674 if (ret < 0) { 675 goto out; 676 } 677 678 if (qed_opts->has_backing_file) { 679 header.features |= QED_F_BACKING_FILE; 680 header.backing_filename_offset = sizeof(le_header); 681 header.backing_filename_size = strlen(qed_opts->backing_file); 682 683 if (qed_opts->has_backing_fmt) { 684 const char *backing_fmt = BlockdevDriver_str(qed_opts->backing_fmt); 685 if (qed_fmt_is_raw(backing_fmt)) { 686 header.features |= QED_F_BACKING_FORMAT_NO_PROBE; 687 } 688 } 689 } 690 691 qed_header_cpu_to_le(&header, &le_header); 692 ret = blk_pwrite(blk, 0, &le_header, sizeof(le_header), 0); 693 if (ret < 0) { 694 goto out; 695 } 696 ret = blk_pwrite(blk, sizeof(le_header), qed_opts->backing_file, 697 header.backing_filename_size, 0); 698 if (ret < 0) { 699 goto out; 700 } 701 702 l1_table = g_malloc0(l1_size); 703 ret = blk_pwrite(blk, header.l1_table_offset, l1_table, l1_size, 0); 704 if (ret < 0) { 705 goto out; 706 } 707 708 ret = 0; /* success */ 709 out: 710 g_free(l1_table); 711 blk_unref(blk); 712 bdrv_unref(bs); 713 return ret; 714 } 715 716 static int coroutine_fn bdrv_qed_co_create_opts(const char *filename, 717 QemuOpts *opts, 718 Error **errp) 719 { 720 BlockdevCreateOptions *create_options = NULL; 721 QDict *qdict; 722 Visitor *v; 723 BlockDriverState *bs = NULL; 724 Error *local_err = NULL; 725 int ret; 726 727 static const QDictRenames opt_renames[] = { 728 { BLOCK_OPT_BACKING_FILE, "backing-file" }, 729 { BLOCK_OPT_BACKING_FMT, "backing-fmt" }, 730 { BLOCK_OPT_CLUSTER_SIZE, "cluster-size" }, 731 { BLOCK_OPT_TABLE_SIZE, "table-size" }, 732 { NULL, NULL }, 733 }; 734 735 /* Parse options and convert legacy syntax */ 736 qdict = qemu_opts_to_qdict_filtered(opts, NULL, &qed_create_opts, true); 737 738 if (!qdict_rename_keys(qdict, opt_renames, errp)) { 739 ret = -EINVAL; 740 goto fail; 741 } 742 743 /* Create and open the file (protocol layer) */ 744 ret = bdrv_create_file(filename, opts, &local_err); 745 if (ret < 0) { 746 error_propagate(errp, local_err); 747 goto fail; 748 } 749 750 bs = bdrv_open(filename, NULL, NULL, 751 BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL, errp); 752 if (bs == NULL) { 753 ret = -EIO; 754 goto fail; 755 } 756 757 /* Now get the QAPI type BlockdevCreateOptions */ 758 qdict_put_str(qdict, "driver", "qed"); 759 qdict_put_str(qdict, "file", bs->node_name); 760 761 v = qobject_input_visitor_new_flat_confused(qdict, errp); 762 if (!v) { 763 ret = -EINVAL; 764 goto fail; 765 } 766 767 visit_type_BlockdevCreateOptions(v, NULL, &create_options, &local_err); 768 visit_free(v); 769 770 if (local_err) { 771 error_propagate(errp, local_err); 772 ret = -EINVAL; 773 goto fail; 774 } 775 776 /* Silently round up size */ 777 assert(create_options->driver == BLOCKDEV_DRIVER_QED); 778 create_options->u.qed.size = 779 ROUND_UP(create_options->u.qed.size, BDRV_SECTOR_SIZE); 780 781 /* Create the qed image (format layer) */ 782 ret = bdrv_qed_co_create(create_options, errp); 783 784 fail: 785 qobject_unref(qdict); 786 bdrv_unref(bs); 787 qapi_free_BlockdevCreateOptions(create_options); 788 return ret; 789 } 790 791 static int coroutine_fn bdrv_qed_co_block_status(BlockDriverState *bs, 792 bool want_zero, 793 int64_t pos, int64_t bytes, 794 int64_t *pnum, int64_t *map, 795 BlockDriverState **file) 796 { 797 BDRVQEDState *s = bs->opaque; 798 size_t len = MIN(bytes, SIZE_MAX); 799 int status; 800 QEDRequest request = { .l2_table = NULL }; 801 uint64_t offset; 802 int ret; 803 804 qemu_co_mutex_lock(&s->table_lock); 805 ret = qed_find_cluster(s, &request, pos, &len, &offset); 806 807 *pnum = len; 808 switch (ret) { 809 case QED_CLUSTER_FOUND: 810 *map = offset | qed_offset_into_cluster(s, pos); 811 status = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID; 812 *file = bs->file->bs; 813 break; 814 case QED_CLUSTER_ZERO: 815 status = BDRV_BLOCK_ZERO; 816 break; 817 case QED_CLUSTER_L2: 818 case QED_CLUSTER_L1: 819 status = 0; 820 break; 821 default: 822 assert(ret < 0); 823 status = ret; 824 break; 825 } 826 827 qed_unref_l2_cache_entry(request.l2_table); 828 qemu_co_mutex_unlock(&s->table_lock); 829 830 return status; 831 } 832 833 static BDRVQEDState *acb_to_s(QEDAIOCB *acb) 834 { 835 return acb->bs->opaque; 836 } 837 838 /** 839 * Read from the backing file or zero-fill if no backing file 840 * 841 * @s: QED state 842 * @pos: Byte position in device 843 * @qiov: Destination I/O vector 844 * @backing_qiov: Possibly shortened copy of qiov, to be allocated here 845 * @cb: Completion function 846 * @opaque: User data for completion function 847 * 848 * This function reads qiov->size bytes starting at pos from the backing file. 849 * If there is no backing file then zeroes are read. 850 */ 851 static int coroutine_fn qed_read_backing_file(BDRVQEDState *s, uint64_t pos, 852 QEMUIOVector *qiov, 853 QEMUIOVector **backing_qiov) 854 { 855 uint64_t backing_length = 0; 856 size_t size; 857 int ret; 858 859 /* If there is a backing file, get its length. Treat the absence of a 860 * backing file like a zero length backing file. 861 */ 862 if (s->bs->backing) { 863 int64_t l = bdrv_getlength(s->bs->backing->bs); 864 if (l < 0) { 865 return l; 866 } 867 backing_length = l; 868 } 869 870 /* Zero all sectors if reading beyond the end of the backing file */ 871 if (pos >= backing_length || 872 pos + qiov->size > backing_length) { 873 qemu_iovec_memset(qiov, 0, 0, qiov->size); 874 } 875 876 /* Complete now if there are no backing file sectors to read */ 877 if (pos >= backing_length) { 878 return 0; 879 } 880 881 /* If the read straddles the end of the backing file, shorten it */ 882 size = MIN((uint64_t)backing_length - pos, qiov->size); 883 884 assert(*backing_qiov == NULL); 885 *backing_qiov = g_new(QEMUIOVector, 1); 886 qemu_iovec_init(*backing_qiov, qiov->niov); 887 qemu_iovec_concat(*backing_qiov, qiov, 0, size); 888 889 BLKDBG_EVENT(s->bs->file, BLKDBG_READ_BACKING_AIO); 890 ret = bdrv_co_preadv(s->bs->backing, pos, size, *backing_qiov, 0); 891 if (ret < 0) { 892 return ret; 893 } 894 return 0; 895 } 896 897 /** 898 * Copy data from backing file into the image 899 * 900 * @s: QED state 901 * @pos: Byte position in device 902 * @len: Number of bytes 903 * @offset: Byte offset in image file 904 */ 905 static int coroutine_fn qed_copy_from_backing_file(BDRVQEDState *s, 906 uint64_t pos, uint64_t len, 907 uint64_t offset) 908 { 909 QEMUIOVector qiov; 910 QEMUIOVector *backing_qiov = NULL; 911 int ret; 912 913 /* Skip copy entirely if there is no work to do */ 914 if (len == 0) { 915 return 0; 916 } 917 918 qemu_iovec_init_buf(&qiov, qemu_blockalign(s->bs, len), len); 919 920 ret = qed_read_backing_file(s, pos, &qiov, &backing_qiov); 921 922 if (backing_qiov) { 923 qemu_iovec_destroy(backing_qiov); 924 g_free(backing_qiov); 925 backing_qiov = NULL; 926 } 927 928 if (ret) { 929 goto out; 930 } 931 932 BLKDBG_EVENT(s->bs->file, BLKDBG_COW_WRITE); 933 ret = bdrv_co_pwritev(s->bs->file, offset, qiov.size, &qiov, 0); 934 if (ret < 0) { 935 goto out; 936 } 937 ret = 0; 938 out: 939 qemu_vfree(qemu_iovec_buf(&qiov)); 940 return ret; 941 } 942 943 /** 944 * Link one or more contiguous clusters into a table 945 * 946 * @s: QED state 947 * @table: L2 table 948 * @index: First cluster index 949 * @n: Number of contiguous clusters 950 * @cluster: First cluster offset 951 * 952 * The cluster offset may be an allocated byte offset in the image file, the 953 * zero cluster marker, or the unallocated cluster marker. 954 * 955 * Called with table_lock held. 956 */ 957 static void coroutine_fn qed_update_l2_table(BDRVQEDState *s, QEDTable *table, 958 int index, unsigned int n, 959 uint64_t cluster) 960 { 961 int i; 962 for (i = index; i < index + n; i++) { 963 table->offsets[i] = cluster; 964 if (!qed_offset_is_unalloc_cluster(cluster) && 965 !qed_offset_is_zero_cluster(cluster)) { 966 cluster += s->header.cluster_size; 967 } 968 } 969 } 970 971 /* Called with table_lock held. */ 972 static void coroutine_fn qed_aio_complete(QEDAIOCB *acb) 973 { 974 BDRVQEDState *s = acb_to_s(acb); 975 976 /* Free resources */ 977 qemu_iovec_destroy(&acb->cur_qiov); 978 qed_unref_l2_cache_entry(acb->request.l2_table); 979 980 /* Free the buffer we may have allocated for zero writes */ 981 if (acb->flags & QED_AIOCB_ZERO) { 982 qemu_vfree(acb->qiov->iov[0].iov_base); 983 acb->qiov->iov[0].iov_base = NULL; 984 } 985 986 /* Start next allocating write request waiting behind this one. Note that 987 * requests enqueue themselves when they first hit an unallocated cluster 988 * but they wait until the entire request is finished before waking up the 989 * next request in the queue. This ensures that we don't cycle through 990 * requests multiple times but rather finish one at a time completely. 991 */ 992 if (acb == s->allocating_acb) { 993 s->allocating_acb = NULL; 994 if (!qemu_co_queue_empty(&s->allocating_write_reqs)) { 995 qemu_co_queue_next(&s->allocating_write_reqs); 996 } else if (s->header.features & QED_F_NEED_CHECK) { 997 qed_start_need_check_timer(s); 998 } 999 } 1000 } 1001 1002 /** 1003 * Update L1 table with new L2 table offset and write it out 1004 * 1005 * Called with table_lock held. 1006 */ 1007 static int coroutine_fn qed_aio_write_l1_update(QEDAIOCB *acb) 1008 { 1009 BDRVQEDState *s = acb_to_s(acb); 1010 CachedL2Table *l2_table = acb->request.l2_table; 1011 uint64_t l2_offset = l2_table->offset; 1012 int index, ret; 1013 1014 index = qed_l1_index(s, acb->cur_pos); 1015 s->l1_table->offsets[index] = l2_table->offset; 1016 1017 ret = qed_write_l1_table(s, index, 1); 1018 1019 /* Commit the current L2 table to the cache */ 1020 qed_commit_l2_cache_entry(&s->l2_cache, l2_table); 1021 1022 /* This is guaranteed to succeed because we just committed the entry to the 1023 * cache. 1024 */ 1025 acb->request.l2_table = qed_find_l2_cache_entry(&s->l2_cache, l2_offset); 1026 assert(acb->request.l2_table != NULL); 1027 1028 return ret; 1029 } 1030 1031 1032 /** 1033 * Update L2 table with new cluster offsets and write them out 1034 * 1035 * Called with table_lock held. 1036 */ 1037 static int coroutine_fn qed_aio_write_l2_update(QEDAIOCB *acb, uint64_t offset) 1038 { 1039 BDRVQEDState *s = acb_to_s(acb); 1040 bool need_alloc = acb->find_cluster_ret == QED_CLUSTER_L1; 1041 int index, ret; 1042 1043 if (need_alloc) { 1044 qed_unref_l2_cache_entry(acb->request.l2_table); 1045 acb->request.l2_table = qed_new_l2_table(s); 1046 } 1047 1048 index = qed_l2_index(s, acb->cur_pos); 1049 qed_update_l2_table(s, acb->request.l2_table->table, index, acb->cur_nclusters, 1050 offset); 1051 1052 if (need_alloc) { 1053 /* Write out the whole new L2 table */ 1054 ret = qed_write_l2_table(s, &acb->request, 0, s->table_nelems, true); 1055 if (ret) { 1056 return ret; 1057 } 1058 return qed_aio_write_l1_update(acb); 1059 } else { 1060 /* Write out only the updated part of the L2 table */ 1061 ret = qed_write_l2_table(s, &acb->request, index, acb->cur_nclusters, 1062 false); 1063 if (ret) { 1064 return ret; 1065 } 1066 } 1067 return 0; 1068 } 1069 1070 /** 1071 * Write data to the image file 1072 * 1073 * Called with table_lock *not* held. 1074 */ 1075 static int coroutine_fn qed_aio_write_main(QEDAIOCB *acb) 1076 { 1077 BDRVQEDState *s = acb_to_s(acb); 1078 uint64_t offset = acb->cur_cluster + 1079 qed_offset_into_cluster(s, acb->cur_pos); 1080 1081 trace_qed_aio_write_main(s, acb, 0, offset, acb->cur_qiov.size); 1082 1083 BLKDBG_EVENT(s->bs->file, BLKDBG_WRITE_AIO); 1084 return bdrv_co_pwritev(s->bs->file, offset, acb->cur_qiov.size, 1085 &acb->cur_qiov, 0); 1086 } 1087 1088 /** 1089 * Populate untouched regions of new data cluster 1090 * 1091 * Called with table_lock held. 1092 */ 1093 static int coroutine_fn qed_aio_write_cow(QEDAIOCB *acb) 1094 { 1095 BDRVQEDState *s = acb_to_s(acb); 1096 uint64_t start, len, offset; 1097 int ret; 1098 1099 qemu_co_mutex_unlock(&s->table_lock); 1100 1101 /* Populate front untouched region of new data cluster */ 1102 start = qed_start_of_cluster(s, acb->cur_pos); 1103 len = qed_offset_into_cluster(s, acb->cur_pos); 1104 1105 trace_qed_aio_write_prefill(s, acb, start, len, acb->cur_cluster); 1106 ret = qed_copy_from_backing_file(s, start, len, acb->cur_cluster); 1107 if (ret < 0) { 1108 goto out; 1109 } 1110 1111 /* Populate back untouched region of new data cluster */ 1112 start = acb->cur_pos + acb->cur_qiov.size; 1113 len = qed_start_of_cluster(s, start + s->header.cluster_size - 1) - start; 1114 offset = acb->cur_cluster + 1115 qed_offset_into_cluster(s, acb->cur_pos) + 1116 acb->cur_qiov.size; 1117 1118 trace_qed_aio_write_postfill(s, acb, start, len, offset); 1119 ret = qed_copy_from_backing_file(s, start, len, offset); 1120 if (ret < 0) { 1121 goto out; 1122 } 1123 1124 ret = qed_aio_write_main(acb); 1125 if (ret < 0) { 1126 goto out; 1127 } 1128 1129 if (s->bs->backing) { 1130 /* 1131 * Flush new data clusters before updating the L2 table 1132 * 1133 * This flush is necessary when a backing file is in use. A crash 1134 * during an allocating write could result in empty clusters in the 1135 * image. If the write only touched a subregion of the cluster, 1136 * then backing image sectors have been lost in the untouched 1137 * region. The solution is to flush after writing a new data 1138 * cluster and before updating the L2 table. 1139 */ 1140 ret = bdrv_co_flush(s->bs->file->bs); 1141 } 1142 1143 out: 1144 qemu_co_mutex_lock(&s->table_lock); 1145 return ret; 1146 } 1147 1148 /** 1149 * Check if the QED_F_NEED_CHECK bit should be set during allocating write 1150 */ 1151 static bool qed_should_set_need_check(BDRVQEDState *s) 1152 { 1153 /* The flush before L2 update path ensures consistency */ 1154 if (s->bs->backing) { 1155 return false; 1156 } 1157 1158 return !(s->header.features & QED_F_NEED_CHECK); 1159 } 1160 1161 /** 1162 * Write new data cluster 1163 * 1164 * @acb: Write request 1165 * @len: Length in bytes 1166 * 1167 * This path is taken when writing to previously unallocated clusters. 1168 * 1169 * Called with table_lock held. 1170 */ 1171 static int coroutine_fn qed_aio_write_alloc(QEDAIOCB *acb, size_t len) 1172 { 1173 BDRVQEDState *s = acb_to_s(acb); 1174 int ret; 1175 1176 /* Cancel timer when the first allocating request comes in */ 1177 if (s->allocating_acb == NULL) { 1178 qed_cancel_need_check_timer(s); 1179 } 1180 1181 /* Freeze this request if another allocating write is in progress */ 1182 if (s->allocating_acb != acb || s->allocating_write_reqs_plugged) { 1183 if (s->allocating_acb != NULL) { 1184 qemu_co_queue_wait(&s->allocating_write_reqs, &s->table_lock); 1185 assert(s->allocating_acb == NULL); 1186 } 1187 s->allocating_acb = acb; 1188 return -EAGAIN; /* start over with looking up table entries */ 1189 } 1190 1191 acb->cur_nclusters = qed_bytes_to_clusters(s, 1192 qed_offset_into_cluster(s, acb->cur_pos) + len); 1193 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len); 1194 1195 if (acb->flags & QED_AIOCB_ZERO) { 1196 /* Skip ahead if the clusters are already zero */ 1197 if (acb->find_cluster_ret == QED_CLUSTER_ZERO) { 1198 return 0; 1199 } 1200 acb->cur_cluster = 1; 1201 } else { 1202 acb->cur_cluster = qed_alloc_clusters(s, acb->cur_nclusters); 1203 } 1204 1205 if (qed_should_set_need_check(s)) { 1206 s->header.features |= QED_F_NEED_CHECK; 1207 ret = qed_write_header(s); 1208 if (ret < 0) { 1209 return ret; 1210 } 1211 } 1212 1213 if (!(acb->flags & QED_AIOCB_ZERO)) { 1214 ret = qed_aio_write_cow(acb); 1215 if (ret < 0) { 1216 return ret; 1217 } 1218 } 1219 1220 return qed_aio_write_l2_update(acb, acb->cur_cluster); 1221 } 1222 1223 /** 1224 * Write data cluster in place 1225 * 1226 * @acb: Write request 1227 * @offset: Cluster offset in bytes 1228 * @len: Length in bytes 1229 * 1230 * This path is taken when writing to already allocated clusters. 1231 * 1232 * Called with table_lock held. 1233 */ 1234 static int coroutine_fn qed_aio_write_inplace(QEDAIOCB *acb, uint64_t offset, 1235 size_t len) 1236 { 1237 BDRVQEDState *s = acb_to_s(acb); 1238 int r; 1239 1240 qemu_co_mutex_unlock(&s->table_lock); 1241 1242 /* Allocate buffer for zero writes */ 1243 if (acb->flags & QED_AIOCB_ZERO) { 1244 struct iovec *iov = acb->qiov->iov; 1245 1246 if (!iov->iov_base) { 1247 iov->iov_base = qemu_try_blockalign(acb->bs, iov->iov_len); 1248 if (iov->iov_base == NULL) { 1249 r = -ENOMEM; 1250 goto out; 1251 } 1252 memset(iov->iov_base, 0, iov->iov_len); 1253 } 1254 } 1255 1256 /* Calculate the I/O vector */ 1257 acb->cur_cluster = offset; 1258 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len); 1259 1260 /* Do the actual write. */ 1261 r = qed_aio_write_main(acb); 1262 out: 1263 qemu_co_mutex_lock(&s->table_lock); 1264 return r; 1265 } 1266 1267 /** 1268 * Write data cluster 1269 * 1270 * @opaque: Write request 1271 * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2 or QED_CLUSTER_L1 1272 * @offset: Cluster offset in bytes 1273 * @len: Length in bytes 1274 * 1275 * Called with table_lock held. 1276 */ 1277 static int coroutine_fn qed_aio_write_data(void *opaque, int ret, 1278 uint64_t offset, size_t len) 1279 { 1280 QEDAIOCB *acb = opaque; 1281 1282 trace_qed_aio_write_data(acb_to_s(acb), acb, ret, offset, len); 1283 1284 acb->find_cluster_ret = ret; 1285 1286 switch (ret) { 1287 case QED_CLUSTER_FOUND: 1288 return qed_aio_write_inplace(acb, offset, len); 1289 1290 case QED_CLUSTER_L2: 1291 case QED_CLUSTER_L1: 1292 case QED_CLUSTER_ZERO: 1293 return qed_aio_write_alloc(acb, len); 1294 1295 default: 1296 g_assert_not_reached(); 1297 } 1298 } 1299 1300 /** 1301 * Read data cluster 1302 * 1303 * @opaque: Read request 1304 * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2 or QED_CLUSTER_L1 1305 * @offset: Cluster offset in bytes 1306 * @len: Length in bytes 1307 * 1308 * Called with table_lock held. 1309 */ 1310 static int coroutine_fn qed_aio_read_data(void *opaque, int ret, 1311 uint64_t offset, size_t len) 1312 { 1313 QEDAIOCB *acb = opaque; 1314 BDRVQEDState *s = acb_to_s(acb); 1315 BlockDriverState *bs = acb->bs; 1316 int r; 1317 1318 qemu_co_mutex_unlock(&s->table_lock); 1319 1320 /* Adjust offset into cluster */ 1321 offset += qed_offset_into_cluster(s, acb->cur_pos); 1322 1323 trace_qed_aio_read_data(s, acb, ret, offset, len); 1324 1325 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len); 1326 1327 /* Handle zero cluster and backing file reads, otherwise read 1328 * data cluster directly. 1329 */ 1330 if (ret == QED_CLUSTER_ZERO) { 1331 qemu_iovec_memset(&acb->cur_qiov, 0, 0, acb->cur_qiov.size); 1332 r = 0; 1333 } else if (ret != QED_CLUSTER_FOUND) { 1334 r = qed_read_backing_file(s, acb->cur_pos, &acb->cur_qiov, 1335 &acb->backing_qiov); 1336 } else { 1337 BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO); 1338 r = bdrv_co_preadv(bs->file, offset, acb->cur_qiov.size, 1339 &acb->cur_qiov, 0); 1340 } 1341 1342 qemu_co_mutex_lock(&s->table_lock); 1343 return r; 1344 } 1345 1346 /** 1347 * Begin next I/O or complete the request 1348 */ 1349 static int coroutine_fn qed_aio_next_io(QEDAIOCB *acb) 1350 { 1351 BDRVQEDState *s = acb_to_s(acb); 1352 uint64_t offset; 1353 size_t len; 1354 int ret; 1355 1356 qemu_co_mutex_lock(&s->table_lock); 1357 while (1) { 1358 trace_qed_aio_next_io(s, acb, 0, acb->cur_pos + acb->cur_qiov.size); 1359 1360 if (acb->backing_qiov) { 1361 qemu_iovec_destroy(acb->backing_qiov); 1362 g_free(acb->backing_qiov); 1363 acb->backing_qiov = NULL; 1364 } 1365 1366 acb->qiov_offset += acb->cur_qiov.size; 1367 acb->cur_pos += acb->cur_qiov.size; 1368 qemu_iovec_reset(&acb->cur_qiov); 1369 1370 /* Complete request */ 1371 if (acb->cur_pos >= acb->end_pos) { 1372 ret = 0; 1373 break; 1374 } 1375 1376 /* Find next cluster and start I/O */ 1377 len = acb->end_pos - acb->cur_pos; 1378 ret = qed_find_cluster(s, &acb->request, acb->cur_pos, &len, &offset); 1379 if (ret < 0) { 1380 break; 1381 } 1382 1383 if (acb->flags & QED_AIOCB_WRITE) { 1384 ret = qed_aio_write_data(acb, ret, offset, len); 1385 } else { 1386 ret = qed_aio_read_data(acb, ret, offset, len); 1387 } 1388 1389 if (ret < 0 && ret != -EAGAIN) { 1390 break; 1391 } 1392 } 1393 1394 trace_qed_aio_complete(s, acb, ret); 1395 qed_aio_complete(acb); 1396 qemu_co_mutex_unlock(&s->table_lock); 1397 return ret; 1398 } 1399 1400 static int coroutine_fn qed_co_request(BlockDriverState *bs, int64_t sector_num, 1401 QEMUIOVector *qiov, int nb_sectors, 1402 int flags) 1403 { 1404 QEDAIOCB acb = { 1405 .bs = bs, 1406 .cur_pos = (uint64_t) sector_num * BDRV_SECTOR_SIZE, 1407 .end_pos = (sector_num + nb_sectors) * BDRV_SECTOR_SIZE, 1408 .qiov = qiov, 1409 .flags = flags, 1410 }; 1411 qemu_iovec_init(&acb.cur_qiov, qiov->niov); 1412 1413 trace_qed_aio_setup(bs->opaque, &acb, sector_num, nb_sectors, NULL, flags); 1414 1415 /* Start request */ 1416 return qed_aio_next_io(&acb); 1417 } 1418 1419 static int coroutine_fn bdrv_qed_co_readv(BlockDriverState *bs, 1420 int64_t sector_num, int nb_sectors, 1421 QEMUIOVector *qiov) 1422 { 1423 return qed_co_request(bs, sector_num, qiov, nb_sectors, 0); 1424 } 1425 1426 static int coroutine_fn bdrv_qed_co_writev(BlockDriverState *bs, 1427 int64_t sector_num, int nb_sectors, 1428 QEMUIOVector *qiov, int flags) 1429 { 1430 assert(!flags); 1431 return qed_co_request(bs, sector_num, qiov, nb_sectors, QED_AIOCB_WRITE); 1432 } 1433 1434 static int coroutine_fn bdrv_qed_co_pwrite_zeroes(BlockDriverState *bs, 1435 int64_t offset, 1436 int bytes, 1437 BdrvRequestFlags flags) 1438 { 1439 BDRVQEDState *s = bs->opaque; 1440 1441 /* 1442 * Zero writes start without an I/O buffer. If a buffer becomes necessary 1443 * then it will be allocated during request processing. 1444 */ 1445 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, bytes); 1446 1447 /* Fall back if the request is not aligned */ 1448 if (qed_offset_into_cluster(s, offset) || 1449 qed_offset_into_cluster(s, bytes)) { 1450 return -ENOTSUP; 1451 } 1452 1453 return qed_co_request(bs, offset >> BDRV_SECTOR_BITS, &qiov, 1454 bytes >> BDRV_SECTOR_BITS, 1455 QED_AIOCB_WRITE | QED_AIOCB_ZERO); 1456 } 1457 1458 static int coroutine_fn bdrv_qed_co_truncate(BlockDriverState *bs, 1459 int64_t offset, 1460 PreallocMode prealloc, 1461 Error **errp) 1462 { 1463 BDRVQEDState *s = bs->opaque; 1464 uint64_t old_image_size; 1465 int ret; 1466 1467 if (prealloc != PREALLOC_MODE_OFF) { 1468 error_setg(errp, "Unsupported preallocation mode '%s'", 1469 PreallocMode_str(prealloc)); 1470 return -ENOTSUP; 1471 } 1472 1473 if (!qed_is_image_size_valid(offset, s->header.cluster_size, 1474 s->header.table_size)) { 1475 error_setg(errp, "Invalid image size specified"); 1476 return -EINVAL; 1477 } 1478 1479 if ((uint64_t)offset < s->header.image_size) { 1480 error_setg(errp, "Shrinking images is currently not supported"); 1481 return -ENOTSUP; 1482 } 1483 1484 old_image_size = s->header.image_size; 1485 s->header.image_size = offset; 1486 ret = qed_write_header_sync(s); 1487 if (ret < 0) { 1488 s->header.image_size = old_image_size; 1489 error_setg_errno(errp, -ret, "Failed to update the image size"); 1490 } 1491 return ret; 1492 } 1493 1494 static int64_t bdrv_qed_getlength(BlockDriverState *bs) 1495 { 1496 BDRVQEDState *s = bs->opaque; 1497 return s->header.image_size; 1498 } 1499 1500 static int bdrv_qed_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) 1501 { 1502 BDRVQEDState *s = bs->opaque; 1503 1504 memset(bdi, 0, sizeof(*bdi)); 1505 bdi->cluster_size = s->header.cluster_size; 1506 bdi->is_dirty = s->header.features & QED_F_NEED_CHECK; 1507 bdi->unallocated_blocks_are_zero = true; 1508 return 0; 1509 } 1510 1511 static int bdrv_qed_change_backing_file(BlockDriverState *bs, 1512 const char *backing_file, 1513 const char *backing_fmt) 1514 { 1515 BDRVQEDState *s = bs->opaque; 1516 QEDHeader new_header, le_header; 1517 void *buffer; 1518 size_t buffer_len, backing_file_len; 1519 int ret; 1520 1521 /* Refuse to set backing filename if unknown compat feature bits are 1522 * active. If the image uses an unknown compat feature then we may not 1523 * know the layout of data following the header structure and cannot safely 1524 * add a new string. 1525 */ 1526 if (backing_file && (s->header.compat_features & 1527 ~QED_COMPAT_FEATURE_MASK)) { 1528 return -ENOTSUP; 1529 } 1530 1531 memcpy(&new_header, &s->header, sizeof(new_header)); 1532 1533 new_header.features &= ~(QED_F_BACKING_FILE | 1534 QED_F_BACKING_FORMAT_NO_PROBE); 1535 1536 /* Adjust feature flags */ 1537 if (backing_file) { 1538 new_header.features |= QED_F_BACKING_FILE; 1539 1540 if (qed_fmt_is_raw(backing_fmt)) { 1541 new_header.features |= QED_F_BACKING_FORMAT_NO_PROBE; 1542 } 1543 } 1544 1545 /* Calculate new header size */ 1546 backing_file_len = 0; 1547 1548 if (backing_file) { 1549 backing_file_len = strlen(backing_file); 1550 } 1551 1552 buffer_len = sizeof(new_header); 1553 new_header.backing_filename_offset = buffer_len; 1554 new_header.backing_filename_size = backing_file_len; 1555 buffer_len += backing_file_len; 1556 1557 /* Make sure we can rewrite header without failing */ 1558 if (buffer_len > new_header.header_size * new_header.cluster_size) { 1559 return -ENOSPC; 1560 } 1561 1562 /* Prepare new header */ 1563 buffer = g_malloc(buffer_len); 1564 1565 qed_header_cpu_to_le(&new_header, &le_header); 1566 memcpy(buffer, &le_header, sizeof(le_header)); 1567 buffer_len = sizeof(le_header); 1568 1569 if (backing_file) { 1570 memcpy(buffer + buffer_len, backing_file, backing_file_len); 1571 buffer_len += backing_file_len; 1572 } 1573 1574 /* Write new header */ 1575 ret = bdrv_pwrite_sync(bs->file, 0, buffer, buffer_len); 1576 g_free(buffer); 1577 if (ret == 0) { 1578 memcpy(&s->header, &new_header, sizeof(new_header)); 1579 } 1580 return ret; 1581 } 1582 1583 static void coroutine_fn bdrv_qed_co_invalidate_cache(BlockDriverState *bs, 1584 Error **errp) 1585 { 1586 BDRVQEDState *s = bs->opaque; 1587 Error *local_err = NULL; 1588 int ret; 1589 1590 bdrv_qed_close(bs); 1591 1592 bdrv_qed_init_state(bs); 1593 qemu_co_mutex_lock(&s->table_lock); 1594 ret = bdrv_qed_do_open(bs, NULL, bs->open_flags, &local_err); 1595 qemu_co_mutex_unlock(&s->table_lock); 1596 if (local_err) { 1597 error_propagate_prepend(errp, local_err, 1598 "Could not reopen qed layer: "); 1599 return; 1600 } else if (ret < 0) { 1601 error_setg_errno(errp, -ret, "Could not reopen qed layer"); 1602 return; 1603 } 1604 } 1605 1606 static int bdrv_qed_co_check(BlockDriverState *bs, BdrvCheckResult *result, 1607 BdrvCheckMode fix) 1608 { 1609 BDRVQEDState *s = bs->opaque; 1610 int ret; 1611 1612 qemu_co_mutex_lock(&s->table_lock); 1613 ret = qed_check(s, result, !!fix); 1614 qemu_co_mutex_unlock(&s->table_lock); 1615 1616 return ret; 1617 } 1618 1619 static QemuOptsList qed_create_opts = { 1620 .name = "qed-create-opts", 1621 .head = QTAILQ_HEAD_INITIALIZER(qed_create_opts.head), 1622 .desc = { 1623 { 1624 .name = BLOCK_OPT_SIZE, 1625 .type = QEMU_OPT_SIZE, 1626 .help = "Virtual disk size" 1627 }, 1628 { 1629 .name = BLOCK_OPT_BACKING_FILE, 1630 .type = QEMU_OPT_STRING, 1631 .help = "File name of a base image" 1632 }, 1633 { 1634 .name = BLOCK_OPT_BACKING_FMT, 1635 .type = QEMU_OPT_STRING, 1636 .help = "Image format of the base image" 1637 }, 1638 { 1639 .name = BLOCK_OPT_CLUSTER_SIZE, 1640 .type = QEMU_OPT_SIZE, 1641 .help = "Cluster size (in bytes)", 1642 .def_value_str = stringify(QED_DEFAULT_CLUSTER_SIZE) 1643 }, 1644 { 1645 .name = BLOCK_OPT_TABLE_SIZE, 1646 .type = QEMU_OPT_SIZE, 1647 .help = "L1/L2 table size (in clusters)" 1648 }, 1649 { /* end of list */ } 1650 } 1651 }; 1652 1653 static BlockDriver bdrv_qed = { 1654 .format_name = "qed", 1655 .instance_size = sizeof(BDRVQEDState), 1656 .create_opts = &qed_create_opts, 1657 .supports_backing = true, 1658 1659 .bdrv_probe = bdrv_qed_probe, 1660 .bdrv_open = bdrv_qed_open, 1661 .bdrv_close = bdrv_qed_close, 1662 .bdrv_reopen_prepare = bdrv_qed_reopen_prepare, 1663 .bdrv_child_perm = bdrv_format_default_perms, 1664 .bdrv_co_create = bdrv_qed_co_create, 1665 .bdrv_co_create_opts = bdrv_qed_co_create_opts, 1666 .bdrv_has_zero_init = bdrv_has_zero_init_1, 1667 .bdrv_co_block_status = bdrv_qed_co_block_status, 1668 .bdrv_co_readv = bdrv_qed_co_readv, 1669 .bdrv_co_writev = bdrv_qed_co_writev, 1670 .bdrv_co_pwrite_zeroes = bdrv_qed_co_pwrite_zeroes, 1671 .bdrv_co_truncate = bdrv_qed_co_truncate, 1672 .bdrv_getlength = bdrv_qed_getlength, 1673 .bdrv_get_info = bdrv_qed_get_info, 1674 .bdrv_refresh_limits = bdrv_qed_refresh_limits, 1675 .bdrv_change_backing_file = bdrv_qed_change_backing_file, 1676 .bdrv_co_invalidate_cache = bdrv_qed_co_invalidate_cache, 1677 .bdrv_co_check = bdrv_qed_co_check, 1678 .bdrv_detach_aio_context = bdrv_qed_detach_aio_context, 1679 .bdrv_attach_aio_context = bdrv_qed_attach_aio_context, 1680 .bdrv_co_drain_begin = bdrv_qed_co_drain_begin, 1681 }; 1682 1683 static void bdrv_qed_init(void) 1684 { 1685 bdrv_register(&bdrv_qed); 1686 } 1687 1688 block_init(bdrv_qed_init); 1689