1 /* 2 * QEMU Enhanced Disk Format 3 * 4 * Copyright IBM, Corp. 2010 5 * 6 * Authors: 7 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com> 8 * Anthony Liguori <aliguori@us.ibm.com> 9 * 10 * This work is licensed under the terms of the GNU LGPL, version 2 or later. 11 * See the COPYING.LIB file in the top-level directory. 12 * 13 */ 14 15 #include "qemu/timer.h" 16 #include "trace.h" 17 #include "qed.h" 18 #include "qapi/qmp/qerror.h" 19 #include "migration/migration.h" 20 21 static void qed_aio_cancel(BlockDriverAIOCB *blockacb) 22 { 23 QEDAIOCB *acb = (QEDAIOCB *)blockacb; 24 bool finished = false; 25 26 /* Wait for the request to finish */ 27 acb->finished = &finished; 28 while (!finished) { 29 qemu_aio_wait(); 30 } 31 } 32 33 static const AIOCBInfo qed_aiocb_info = { 34 .aiocb_size = sizeof(QEDAIOCB), 35 .cancel = qed_aio_cancel, 36 }; 37 38 static int bdrv_qed_probe(const uint8_t *buf, int buf_size, 39 const char *filename) 40 { 41 const QEDHeader *header = (const QEDHeader *)buf; 42 43 if (buf_size < sizeof(*header)) { 44 return 0; 45 } 46 if (le32_to_cpu(header->magic) != QED_MAGIC) { 47 return 0; 48 } 49 return 100; 50 } 51 52 /** 53 * Check whether an image format is raw 54 * 55 * @fmt: Backing file format, may be NULL 56 */ 57 static bool qed_fmt_is_raw(const char *fmt) 58 { 59 return fmt && strcmp(fmt, "raw") == 0; 60 } 61 62 static void qed_header_le_to_cpu(const QEDHeader *le, QEDHeader *cpu) 63 { 64 cpu->magic = le32_to_cpu(le->magic); 65 cpu->cluster_size = le32_to_cpu(le->cluster_size); 66 cpu->table_size = le32_to_cpu(le->table_size); 67 cpu->header_size = le32_to_cpu(le->header_size); 68 cpu->features = le64_to_cpu(le->features); 69 cpu->compat_features = le64_to_cpu(le->compat_features); 70 cpu->autoclear_features = le64_to_cpu(le->autoclear_features); 71 cpu->l1_table_offset = le64_to_cpu(le->l1_table_offset); 72 cpu->image_size = le64_to_cpu(le->image_size); 73 cpu->backing_filename_offset = le32_to_cpu(le->backing_filename_offset); 74 cpu->backing_filename_size = le32_to_cpu(le->backing_filename_size); 75 } 76 77 static void qed_header_cpu_to_le(const QEDHeader *cpu, QEDHeader *le) 78 { 79 le->magic = cpu_to_le32(cpu->magic); 80 le->cluster_size = cpu_to_le32(cpu->cluster_size); 81 le->table_size = cpu_to_le32(cpu->table_size); 82 le->header_size = cpu_to_le32(cpu->header_size); 83 le->features = cpu_to_le64(cpu->features); 84 le->compat_features = cpu_to_le64(cpu->compat_features); 85 le->autoclear_features = cpu_to_le64(cpu->autoclear_features); 86 le->l1_table_offset = cpu_to_le64(cpu->l1_table_offset); 87 le->image_size = cpu_to_le64(cpu->image_size); 88 le->backing_filename_offset = cpu_to_le32(cpu->backing_filename_offset); 89 le->backing_filename_size = cpu_to_le32(cpu->backing_filename_size); 90 } 91 92 int qed_write_header_sync(BDRVQEDState *s) 93 { 94 QEDHeader le; 95 int ret; 96 97 qed_header_cpu_to_le(&s->header, &le); 98 ret = bdrv_pwrite(s->bs->file, 0, &le, sizeof(le)); 99 if (ret != sizeof(le)) { 100 return ret; 101 } 102 return 0; 103 } 104 105 typedef struct { 106 GenericCB gencb; 107 BDRVQEDState *s; 108 struct iovec iov; 109 QEMUIOVector qiov; 110 int nsectors; 111 uint8_t *buf; 112 } QEDWriteHeaderCB; 113 114 static void qed_write_header_cb(void *opaque, int ret) 115 { 116 QEDWriteHeaderCB *write_header_cb = opaque; 117 118 qemu_vfree(write_header_cb->buf); 119 gencb_complete(write_header_cb, ret); 120 } 121 122 static void qed_write_header_read_cb(void *opaque, int ret) 123 { 124 QEDWriteHeaderCB *write_header_cb = opaque; 125 BDRVQEDState *s = write_header_cb->s; 126 127 if (ret) { 128 qed_write_header_cb(write_header_cb, ret); 129 return; 130 } 131 132 /* Update header */ 133 qed_header_cpu_to_le(&s->header, (QEDHeader *)write_header_cb->buf); 134 135 bdrv_aio_writev(s->bs->file, 0, &write_header_cb->qiov, 136 write_header_cb->nsectors, qed_write_header_cb, 137 write_header_cb); 138 } 139 140 /** 141 * Update header in-place (does not rewrite backing filename or other strings) 142 * 143 * This function only updates known header fields in-place and does not affect 144 * extra data after the QED header. 145 */ 146 static void qed_write_header(BDRVQEDState *s, BlockDriverCompletionFunc cb, 147 void *opaque) 148 { 149 /* We must write full sectors for O_DIRECT but cannot necessarily generate 150 * the data following the header if an unrecognized compat feature is 151 * active. Therefore, first read the sectors containing the header, update 152 * them, and write back. 153 */ 154 155 int nsectors = (sizeof(QEDHeader) + BDRV_SECTOR_SIZE - 1) / 156 BDRV_SECTOR_SIZE; 157 size_t len = nsectors * BDRV_SECTOR_SIZE; 158 QEDWriteHeaderCB *write_header_cb = gencb_alloc(sizeof(*write_header_cb), 159 cb, opaque); 160 161 write_header_cb->s = s; 162 write_header_cb->nsectors = nsectors; 163 write_header_cb->buf = qemu_blockalign(s->bs, len); 164 write_header_cb->iov.iov_base = write_header_cb->buf; 165 write_header_cb->iov.iov_len = len; 166 qemu_iovec_init_external(&write_header_cb->qiov, &write_header_cb->iov, 1); 167 168 bdrv_aio_readv(s->bs->file, 0, &write_header_cb->qiov, nsectors, 169 qed_write_header_read_cb, write_header_cb); 170 } 171 172 static uint64_t qed_max_image_size(uint32_t cluster_size, uint32_t table_size) 173 { 174 uint64_t table_entries; 175 uint64_t l2_size; 176 177 table_entries = (table_size * cluster_size) / sizeof(uint64_t); 178 l2_size = table_entries * cluster_size; 179 180 return l2_size * table_entries; 181 } 182 183 static bool qed_is_cluster_size_valid(uint32_t cluster_size) 184 { 185 if (cluster_size < QED_MIN_CLUSTER_SIZE || 186 cluster_size > QED_MAX_CLUSTER_SIZE) { 187 return false; 188 } 189 if (cluster_size & (cluster_size - 1)) { 190 return false; /* not power of 2 */ 191 } 192 return true; 193 } 194 195 static bool qed_is_table_size_valid(uint32_t table_size) 196 { 197 if (table_size < QED_MIN_TABLE_SIZE || 198 table_size > QED_MAX_TABLE_SIZE) { 199 return false; 200 } 201 if (table_size & (table_size - 1)) { 202 return false; /* not power of 2 */ 203 } 204 return true; 205 } 206 207 static bool qed_is_image_size_valid(uint64_t image_size, uint32_t cluster_size, 208 uint32_t table_size) 209 { 210 if (image_size % BDRV_SECTOR_SIZE != 0) { 211 return false; /* not multiple of sector size */ 212 } 213 if (image_size > qed_max_image_size(cluster_size, table_size)) { 214 return false; /* image is too large */ 215 } 216 return true; 217 } 218 219 /** 220 * Read a string of known length from the image file 221 * 222 * @file: Image file 223 * @offset: File offset to start of string, in bytes 224 * @n: String length in bytes 225 * @buf: Destination buffer 226 * @buflen: Destination buffer length in bytes 227 * @ret: 0 on success, -errno on failure 228 * 229 * The string is NUL-terminated. 230 */ 231 static int qed_read_string(BlockDriverState *file, uint64_t offset, size_t n, 232 char *buf, size_t buflen) 233 { 234 int ret; 235 if (n >= buflen) { 236 return -EINVAL; 237 } 238 ret = bdrv_pread(file, offset, buf, n); 239 if (ret < 0) { 240 return ret; 241 } 242 buf[n] = '\0'; 243 return 0; 244 } 245 246 /** 247 * Allocate new clusters 248 * 249 * @s: QED state 250 * @n: Number of contiguous clusters to allocate 251 * @ret: Offset of first allocated cluster 252 * 253 * This function only produces the offset where the new clusters should be 254 * written. It updates BDRVQEDState but does not make any changes to the image 255 * file. 256 */ 257 static uint64_t qed_alloc_clusters(BDRVQEDState *s, unsigned int n) 258 { 259 uint64_t offset = s->file_size; 260 s->file_size += n * s->header.cluster_size; 261 return offset; 262 } 263 264 QEDTable *qed_alloc_table(BDRVQEDState *s) 265 { 266 /* Honor O_DIRECT memory alignment requirements */ 267 return qemu_blockalign(s->bs, 268 s->header.cluster_size * s->header.table_size); 269 } 270 271 /** 272 * Allocate a new zeroed L2 table 273 */ 274 static CachedL2Table *qed_new_l2_table(BDRVQEDState *s) 275 { 276 CachedL2Table *l2_table = qed_alloc_l2_cache_entry(&s->l2_cache); 277 278 l2_table->table = qed_alloc_table(s); 279 l2_table->offset = qed_alloc_clusters(s, s->header.table_size); 280 281 memset(l2_table->table->offsets, 0, 282 s->header.cluster_size * s->header.table_size); 283 return l2_table; 284 } 285 286 static void qed_aio_next_io(void *opaque, int ret); 287 288 static void qed_plug_allocating_write_reqs(BDRVQEDState *s) 289 { 290 assert(!s->allocating_write_reqs_plugged); 291 292 s->allocating_write_reqs_plugged = true; 293 } 294 295 static void qed_unplug_allocating_write_reqs(BDRVQEDState *s) 296 { 297 QEDAIOCB *acb; 298 299 assert(s->allocating_write_reqs_plugged); 300 301 s->allocating_write_reqs_plugged = false; 302 303 acb = QSIMPLEQ_FIRST(&s->allocating_write_reqs); 304 if (acb) { 305 qed_aio_next_io(acb, 0); 306 } 307 } 308 309 static void qed_finish_clear_need_check(void *opaque, int ret) 310 { 311 /* Do nothing */ 312 } 313 314 static void qed_flush_after_clear_need_check(void *opaque, int ret) 315 { 316 BDRVQEDState *s = opaque; 317 318 bdrv_aio_flush(s->bs, qed_finish_clear_need_check, s); 319 320 /* No need to wait until flush completes */ 321 qed_unplug_allocating_write_reqs(s); 322 } 323 324 static void qed_clear_need_check(void *opaque, int ret) 325 { 326 BDRVQEDState *s = opaque; 327 328 if (ret) { 329 qed_unplug_allocating_write_reqs(s); 330 return; 331 } 332 333 s->header.features &= ~QED_F_NEED_CHECK; 334 qed_write_header(s, qed_flush_after_clear_need_check, s); 335 } 336 337 static void qed_need_check_timer_cb(void *opaque) 338 { 339 BDRVQEDState *s = opaque; 340 341 /* The timer should only fire when allocating writes have drained */ 342 assert(!QSIMPLEQ_FIRST(&s->allocating_write_reqs)); 343 344 trace_qed_need_check_timer_cb(s); 345 346 qed_plug_allocating_write_reqs(s); 347 348 /* Ensure writes are on disk before clearing flag */ 349 bdrv_aio_flush(s->bs, qed_clear_need_check, s); 350 } 351 352 static void qed_start_need_check_timer(BDRVQEDState *s) 353 { 354 trace_qed_start_need_check_timer(s); 355 356 /* Use QEMU_CLOCK_VIRTUAL so we don't alter the image file while suspended for 357 * migration. 358 */ 359 timer_mod(s->need_check_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 360 get_ticks_per_sec() * QED_NEED_CHECK_TIMEOUT); 361 } 362 363 /* It's okay to call this multiple times or when no timer is started */ 364 static void qed_cancel_need_check_timer(BDRVQEDState *s) 365 { 366 trace_qed_cancel_need_check_timer(s); 367 timer_del(s->need_check_timer); 368 } 369 370 static void bdrv_qed_rebind(BlockDriverState *bs) 371 { 372 BDRVQEDState *s = bs->opaque; 373 s->bs = bs; 374 } 375 376 static int bdrv_qed_open(BlockDriverState *bs, QDict *options, int flags, 377 Error **errp) 378 { 379 BDRVQEDState *s = bs->opaque; 380 QEDHeader le_header; 381 int64_t file_size; 382 int ret; 383 384 s->bs = bs; 385 QSIMPLEQ_INIT(&s->allocating_write_reqs); 386 387 ret = bdrv_pread(bs->file, 0, &le_header, sizeof(le_header)); 388 if (ret < 0) { 389 return ret; 390 } 391 qed_header_le_to_cpu(&le_header, &s->header); 392 393 if (s->header.magic != QED_MAGIC) { 394 return -EMEDIUMTYPE; 395 } 396 if (s->header.features & ~QED_FEATURE_MASK) { 397 /* image uses unsupported feature bits */ 398 char buf[64]; 399 snprintf(buf, sizeof(buf), "%" PRIx64, 400 s->header.features & ~QED_FEATURE_MASK); 401 qerror_report(QERR_UNKNOWN_BLOCK_FORMAT_FEATURE, 402 bs->device_name, "QED", buf); 403 return -ENOTSUP; 404 } 405 if (!qed_is_cluster_size_valid(s->header.cluster_size)) { 406 return -EINVAL; 407 } 408 409 /* Round down file size to the last cluster */ 410 file_size = bdrv_getlength(bs->file); 411 if (file_size < 0) { 412 return file_size; 413 } 414 s->file_size = qed_start_of_cluster(s, file_size); 415 416 if (!qed_is_table_size_valid(s->header.table_size)) { 417 return -EINVAL; 418 } 419 if (!qed_is_image_size_valid(s->header.image_size, 420 s->header.cluster_size, 421 s->header.table_size)) { 422 return -EINVAL; 423 } 424 if (!qed_check_table_offset(s, s->header.l1_table_offset)) { 425 return -EINVAL; 426 } 427 428 s->table_nelems = (s->header.cluster_size * s->header.table_size) / 429 sizeof(uint64_t); 430 s->l2_shift = ffs(s->header.cluster_size) - 1; 431 s->l2_mask = s->table_nelems - 1; 432 s->l1_shift = s->l2_shift + ffs(s->table_nelems) - 1; 433 434 if ((s->header.features & QED_F_BACKING_FILE)) { 435 if ((uint64_t)s->header.backing_filename_offset + 436 s->header.backing_filename_size > 437 s->header.cluster_size * s->header.header_size) { 438 return -EINVAL; 439 } 440 441 ret = qed_read_string(bs->file, s->header.backing_filename_offset, 442 s->header.backing_filename_size, bs->backing_file, 443 sizeof(bs->backing_file)); 444 if (ret < 0) { 445 return ret; 446 } 447 448 if (s->header.features & QED_F_BACKING_FORMAT_NO_PROBE) { 449 pstrcpy(bs->backing_format, sizeof(bs->backing_format), "raw"); 450 } 451 } 452 453 /* Reset unknown autoclear feature bits. This is a backwards 454 * compatibility mechanism that allows images to be opened by older 455 * programs, which "knock out" unknown feature bits. When an image is 456 * opened by a newer program again it can detect that the autoclear 457 * feature is no longer valid. 458 */ 459 if ((s->header.autoclear_features & ~QED_AUTOCLEAR_FEATURE_MASK) != 0 && 460 !bdrv_is_read_only(bs->file) && !(flags & BDRV_O_INCOMING)) { 461 s->header.autoclear_features &= QED_AUTOCLEAR_FEATURE_MASK; 462 463 ret = qed_write_header_sync(s); 464 if (ret) { 465 return ret; 466 } 467 468 /* From here on only known autoclear feature bits are valid */ 469 bdrv_flush(bs->file); 470 } 471 472 s->l1_table = qed_alloc_table(s); 473 qed_init_l2_cache(&s->l2_cache); 474 475 ret = qed_read_l1_table_sync(s); 476 if (ret) { 477 goto out; 478 } 479 480 /* If image was not closed cleanly, check consistency */ 481 if (!(flags & BDRV_O_CHECK) && (s->header.features & QED_F_NEED_CHECK)) { 482 /* Read-only images cannot be fixed. There is no risk of corruption 483 * since write operations are not possible. Therefore, allow 484 * potentially inconsistent images to be opened read-only. This can 485 * aid data recovery from an otherwise inconsistent image. 486 */ 487 if (!bdrv_is_read_only(bs->file) && 488 !(flags & BDRV_O_INCOMING)) { 489 BdrvCheckResult result = {0}; 490 491 ret = qed_check(s, &result, true); 492 if (ret) { 493 goto out; 494 } 495 } 496 } 497 498 s->need_check_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, 499 qed_need_check_timer_cb, s); 500 501 out: 502 if (ret) { 503 qed_free_l2_cache(&s->l2_cache); 504 qemu_vfree(s->l1_table); 505 } 506 return ret; 507 } 508 509 /* We have nothing to do for QED reopen, stubs just return 510 * success */ 511 static int bdrv_qed_reopen_prepare(BDRVReopenState *state, 512 BlockReopenQueue *queue, Error **errp) 513 { 514 return 0; 515 } 516 517 static void bdrv_qed_close(BlockDriverState *bs) 518 { 519 BDRVQEDState *s = bs->opaque; 520 521 qed_cancel_need_check_timer(s); 522 timer_free(s->need_check_timer); 523 524 /* Ensure writes reach stable storage */ 525 bdrv_flush(bs->file); 526 527 /* Clean shutdown, no check required on next open */ 528 if (s->header.features & QED_F_NEED_CHECK) { 529 s->header.features &= ~QED_F_NEED_CHECK; 530 qed_write_header_sync(s); 531 } 532 533 qed_free_l2_cache(&s->l2_cache); 534 qemu_vfree(s->l1_table); 535 } 536 537 static int qed_create(const char *filename, uint32_t cluster_size, 538 uint64_t image_size, uint32_t table_size, 539 const char *backing_file, const char *backing_fmt) 540 { 541 QEDHeader header = { 542 .magic = QED_MAGIC, 543 .cluster_size = cluster_size, 544 .table_size = table_size, 545 .header_size = 1, 546 .features = 0, 547 .compat_features = 0, 548 .l1_table_offset = cluster_size, 549 .image_size = image_size, 550 }; 551 QEDHeader le_header; 552 uint8_t *l1_table = NULL; 553 size_t l1_size = header.cluster_size * header.table_size; 554 Error *local_err = NULL; 555 int ret = 0; 556 BlockDriverState *bs = NULL; 557 558 ret = bdrv_create_file(filename, NULL, &local_err); 559 if (ret < 0) { 560 qerror_report_err(local_err); 561 error_free(local_err); 562 return ret; 563 } 564 565 ret = bdrv_file_open(&bs, filename, NULL, BDRV_O_RDWR | BDRV_O_CACHE_WB, 566 &local_err); 567 if (ret < 0) { 568 qerror_report_err(local_err); 569 error_free(local_err); 570 return ret; 571 } 572 573 /* File must start empty and grow, check truncate is supported */ 574 ret = bdrv_truncate(bs, 0); 575 if (ret < 0) { 576 goto out; 577 } 578 579 if (backing_file) { 580 header.features |= QED_F_BACKING_FILE; 581 header.backing_filename_offset = sizeof(le_header); 582 header.backing_filename_size = strlen(backing_file); 583 584 if (qed_fmt_is_raw(backing_fmt)) { 585 header.features |= QED_F_BACKING_FORMAT_NO_PROBE; 586 } 587 } 588 589 qed_header_cpu_to_le(&header, &le_header); 590 ret = bdrv_pwrite(bs, 0, &le_header, sizeof(le_header)); 591 if (ret < 0) { 592 goto out; 593 } 594 ret = bdrv_pwrite(bs, sizeof(le_header), backing_file, 595 header.backing_filename_size); 596 if (ret < 0) { 597 goto out; 598 } 599 600 l1_table = g_malloc0(l1_size); 601 ret = bdrv_pwrite(bs, header.l1_table_offset, l1_table, l1_size); 602 if (ret < 0) { 603 goto out; 604 } 605 606 ret = 0; /* success */ 607 out: 608 g_free(l1_table); 609 bdrv_unref(bs); 610 return ret; 611 } 612 613 static int bdrv_qed_create(const char *filename, QEMUOptionParameter *options, 614 Error **errp) 615 { 616 uint64_t image_size = 0; 617 uint32_t cluster_size = QED_DEFAULT_CLUSTER_SIZE; 618 uint32_t table_size = QED_DEFAULT_TABLE_SIZE; 619 const char *backing_file = NULL; 620 const char *backing_fmt = NULL; 621 622 while (options && options->name) { 623 if (!strcmp(options->name, BLOCK_OPT_SIZE)) { 624 image_size = options->value.n; 625 } else if (!strcmp(options->name, BLOCK_OPT_BACKING_FILE)) { 626 backing_file = options->value.s; 627 } else if (!strcmp(options->name, BLOCK_OPT_BACKING_FMT)) { 628 backing_fmt = options->value.s; 629 } else if (!strcmp(options->name, BLOCK_OPT_CLUSTER_SIZE)) { 630 if (options->value.n) { 631 cluster_size = options->value.n; 632 } 633 } else if (!strcmp(options->name, BLOCK_OPT_TABLE_SIZE)) { 634 if (options->value.n) { 635 table_size = options->value.n; 636 } 637 } 638 options++; 639 } 640 641 if (!qed_is_cluster_size_valid(cluster_size)) { 642 fprintf(stderr, "QED cluster size must be within range [%u, %u] and power of 2\n", 643 QED_MIN_CLUSTER_SIZE, QED_MAX_CLUSTER_SIZE); 644 return -EINVAL; 645 } 646 if (!qed_is_table_size_valid(table_size)) { 647 fprintf(stderr, "QED table size must be within range [%u, %u] and power of 2\n", 648 QED_MIN_TABLE_SIZE, QED_MAX_TABLE_SIZE); 649 return -EINVAL; 650 } 651 if (!qed_is_image_size_valid(image_size, cluster_size, table_size)) { 652 fprintf(stderr, "QED image size must be a non-zero multiple of " 653 "cluster size and less than %" PRIu64 " bytes\n", 654 qed_max_image_size(cluster_size, table_size)); 655 return -EINVAL; 656 } 657 658 return qed_create(filename, cluster_size, image_size, table_size, 659 backing_file, backing_fmt); 660 } 661 662 typedef struct { 663 BlockDriverState *bs; 664 Coroutine *co; 665 uint64_t pos; 666 int64_t status; 667 int *pnum; 668 } QEDIsAllocatedCB; 669 670 static void qed_is_allocated_cb(void *opaque, int ret, uint64_t offset, size_t len) 671 { 672 QEDIsAllocatedCB *cb = opaque; 673 BDRVQEDState *s = cb->bs->opaque; 674 *cb->pnum = len / BDRV_SECTOR_SIZE; 675 switch (ret) { 676 case QED_CLUSTER_FOUND: 677 offset |= qed_offset_into_cluster(s, cb->pos); 678 cb->status = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID | offset; 679 break; 680 case QED_CLUSTER_ZERO: 681 cb->status = BDRV_BLOCK_ZERO; 682 break; 683 case QED_CLUSTER_L2: 684 case QED_CLUSTER_L1: 685 cb->status = 0; 686 break; 687 default: 688 assert(ret < 0); 689 cb->status = ret; 690 break; 691 } 692 693 if (cb->co) { 694 qemu_coroutine_enter(cb->co, NULL); 695 } 696 } 697 698 static int64_t coroutine_fn bdrv_qed_co_get_block_status(BlockDriverState *bs, 699 int64_t sector_num, 700 int nb_sectors, int *pnum) 701 { 702 BDRVQEDState *s = bs->opaque; 703 size_t len = (size_t)nb_sectors * BDRV_SECTOR_SIZE; 704 QEDIsAllocatedCB cb = { 705 .bs = bs, 706 .pos = (uint64_t)sector_num * BDRV_SECTOR_SIZE, 707 .status = BDRV_BLOCK_OFFSET_MASK, 708 .pnum = pnum, 709 }; 710 QEDRequest request = { .l2_table = NULL }; 711 712 qed_find_cluster(s, &request, cb.pos, len, qed_is_allocated_cb, &cb); 713 714 /* Now sleep if the callback wasn't invoked immediately */ 715 while (cb.status == BDRV_BLOCK_OFFSET_MASK) { 716 cb.co = qemu_coroutine_self(); 717 qemu_coroutine_yield(); 718 } 719 720 qed_unref_l2_cache_entry(request.l2_table); 721 722 return cb.status; 723 } 724 725 static int bdrv_qed_make_empty(BlockDriverState *bs) 726 { 727 return -ENOTSUP; 728 } 729 730 static BDRVQEDState *acb_to_s(QEDAIOCB *acb) 731 { 732 return acb->common.bs->opaque; 733 } 734 735 /** 736 * Read from the backing file or zero-fill if no backing file 737 * 738 * @s: QED state 739 * @pos: Byte position in device 740 * @qiov: Destination I/O vector 741 * @cb: Completion function 742 * @opaque: User data for completion function 743 * 744 * This function reads qiov->size bytes starting at pos from the backing file. 745 * If there is no backing file then zeroes are read. 746 */ 747 static void qed_read_backing_file(BDRVQEDState *s, uint64_t pos, 748 QEMUIOVector *qiov, 749 BlockDriverCompletionFunc *cb, void *opaque) 750 { 751 uint64_t backing_length = 0; 752 size_t size; 753 754 /* If there is a backing file, get its length. Treat the absence of a 755 * backing file like a zero length backing file. 756 */ 757 if (s->bs->backing_hd) { 758 int64_t l = bdrv_getlength(s->bs->backing_hd); 759 if (l < 0) { 760 cb(opaque, l); 761 return; 762 } 763 backing_length = l; 764 } 765 766 /* Zero all sectors if reading beyond the end of the backing file */ 767 if (pos >= backing_length || 768 pos + qiov->size > backing_length) { 769 qemu_iovec_memset(qiov, 0, 0, qiov->size); 770 } 771 772 /* Complete now if there are no backing file sectors to read */ 773 if (pos >= backing_length) { 774 cb(opaque, 0); 775 return; 776 } 777 778 /* If the read straddles the end of the backing file, shorten it */ 779 size = MIN((uint64_t)backing_length - pos, qiov->size); 780 781 BLKDBG_EVENT(s->bs->file, BLKDBG_READ_BACKING_AIO); 782 bdrv_aio_readv(s->bs->backing_hd, pos / BDRV_SECTOR_SIZE, 783 qiov, size / BDRV_SECTOR_SIZE, cb, opaque); 784 } 785 786 typedef struct { 787 GenericCB gencb; 788 BDRVQEDState *s; 789 QEMUIOVector qiov; 790 struct iovec iov; 791 uint64_t offset; 792 } CopyFromBackingFileCB; 793 794 static void qed_copy_from_backing_file_cb(void *opaque, int ret) 795 { 796 CopyFromBackingFileCB *copy_cb = opaque; 797 qemu_vfree(copy_cb->iov.iov_base); 798 gencb_complete(©_cb->gencb, ret); 799 } 800 801 static void qed_copy_from_backing_file_write(void *opaque, int ret) 802 { 803 CopyFromBackingFileCB *copy_cb = opaque; 804 BDRVQEDState *s = copy_cb->s; 805 806 if (ret) { 807 qed_copy_from_backing_file_cb(copy_cb, ret); 808 return; 809 } 810 811 BLKDBG_EVENT(s->bs->file, BLKDBG_COW_WRITE); 812 bdrv_aio_writev(s->bs->file, copy_cb->offset / BDRV_SECTOR_SIZE, 813 ©_cb->qiov, copy_cb->qiov.size / BDRV_SECTOR_SIZE, 814 qed_copy_from_backing_file_cb, copy_cb); 815 } 816 817 /** 818 * Copy data from backing file into the image 819 * 820 * @s: QED state 821 * @pos: Byte position in device 822 * @len: Number of bytes 823 * @offset: Byte offset in image file 824 * @cb: Completion function 825 * @opaque: User data for completion function 826 */ 827 static void qed_copy_from_backing_file(BDRVQEDState *s, uint64_t pos, 828 uint64_t len, uint64_t offset, 829 BlockDriverCompletionFunc *cb, 830 void *opaque) 831 { 832 CopyFromBackingFileCB *copy_cb; 833 834 /* Skip copy entirely if there is no work to do */ 835 if (len == 0) { 836 cb(opaque, 0); 837 return; 838 } 839 840 copy_cb = gencb_alloc(sizeof(*copy_cb), cb, opaque); 841 copy_cb->s = s; 842 copy_cb->offset = offset; 843 copy_cb->iov.iov_base = qemu_blockalign(s->bs, len); 844 copy_cb->iov.iov_len = len; 845 qemu_iovec_init_external(©_cb->qiov, ©_cb->iov, 1); 846 847 qed_read_backing_file(s, pos, ©_cb->qiov, 848 qed_copy_from_backing_file_write, copy_cb); 849 } 850 851 /** 852 * Link one or more contiguous clusters into a table 853 * 854 * @s: QED state 855 * @table: L2 table 856 * @index: First cluster index 857 * @n: Number of contiguous clusters 858 * @cluster: First cluster offset 859 * 860 * The cluster offset may be an allocated byte offset in the image file, the 861 * zero cluster marker, or the unallocated cluster marker. 862 */ 863 static void qed_update_l2_table(BDRVQEDState *s, QEDTable *table, int index, 864 unsigned int n, uint64_t cluster) 865 { 866 int i; 867 for (i = index; i < index + n; i++) { 868 table->offsets[i] = cluster; 869 if (!qed_offset_is_unalloc_cluster(cluster) && 870 !qed_offset_is_zero_cluster(cluster)) { 871 cluster += s->header.cluster_size; 872 } 873 } 874 } 875 876 static void qed_aio_complete_bh(void *opaque) 877 { 878 QEDAIOCB *acb = opaque; 879 BlockDriverCompletionFunc *cb = acb->common.cb; 880 void *user_opaque = acb->common.opaque; 881 int ret = acb->bh_ret; 882 bool *finished = acb->finished; 883 884 qemu_bh_delete(acb->bh); 885 qemu_aio_release(acb); 886 887 /* Invoke callback */ 888 cb(user_opaque, ret); 889 890 /* Signal cancel completion */ 891 if (finished) { 892 *finished = true; 893 } 894 } 895 896 static void qed_aio_complete(QEDAIOCB *acb, int ret) 897 { 898 BDRVQEDState *s = acb_to_s(acb); 899 900 trace_qed_aio_complete(s, acb, ret); 901 902 /* Free resources */ 903 qemu_iovec_destroy(&acb->cur_qiov); 904 qed_unref_l2_cache_entry(acb->request.l2_table); 905 906 /* Free the buffer we may have allocated for zero writes */ 907 if (acb->flags & QED_AIOCB_ZERO) { 908 qemu_vfree(acb->qiov->iov[0].iov_base); 909 acb->qiov->iov[0].iov_base = NULL; 910 } 911 912 /* Arrange for a bh to invoke the completion function */ 913 acb->bh_ret = ret; 914 acb->bh = qemu_bh_new(qed_aio_complete_bh, acb); 915 qemu_bh_schedule(acb->bh); 916 917 /* Start next allocating write request waiting behind this one. Note that 918 * requests enqueue themselves when they first hit an unallocated cluster 919 * but they wait until the entire request is finished before waking up the 920 * next request in the queue. This ensures that we don't cycle through 921 * requests multiple times but rather finish one at a time completely. 922 */ 923 if (acb == QSIMPLEQ_FIRST(&s->allocating_write_reqs)) { 924 QSIMPLEQ_REMOVE_HEAD(&s->allocating_write_reqs, next); 925 acb = QSIMPLEQ_FIRST(&s->allocating_write_reqs); 926 if (acb) { 927 qed_aio_next_io(acb, 0); 928 } else if (s->header.features & QED_F_NEED_CHECK) { 929 qed_start_need_check_timer(s); 930 } 931 } 932 } 933 934 /** 935 * Commit the current L2 table to the cache 936 */ 937 static void qed_commit_l2_update(void *opaque, int ret) 938 { 939 QEDAIOCB *acb = opaque; 940 BDRVQEDState *s = acb_to_s(acb); 941 CachedL2Table *l2_table = acb->request.l2_table; 942 uint64_t l2_offset = l2_table->offset; 943 944 qed_commit_l2_cache_entry(&s->l2_cache, l2_table); 945 946 /* This is guaranteed to succeed because we just committed the entry to the 947 * cache. 948 */ 949 acb->request.l2_table = qed_find_l2_cache_entry(&s->l2_cache, l2_offset); 950 assert(acb->request.l2_table != NULL); 951 952 qed_aio_next_io(opaque, ret); 953 } 954 955 /** 956 * Update L1 table with new L2 table offset and write it out 957 */ 958 static void qed_aio_write_l1_update(void *opaque, int ret) 959 { 960 QEDAIOCB *acb = opaque; 961 BDRVQEDState *s = acb_to_s(acb); 962 int index; 963 964 if (ret) { 965 qed_aio_complete(acb, ret); 966 return; 967 } 968 969 index = qed_l1_index(s, acb->cur_pos); 970 s->l1_table->offsets[index] = acb->request.l2_table->offset; 971 972 qed_write_l1_table(s, index, 1, qed_commit_l2_update, acb); 973 } 974 975 /** 976 * Update L2 table with new cluster offsets and write them out 977 */ 978 static void qed_aio_write_l2_update(QEDAIOCB *acb, int ret, uint64_t offset) 979 { 980 BDRVQEDState *s = acb_to_s(acb); 981 bool need_alloc = acb->find_cluster_ret == QED_CLUSTER_L1; 982 int index; 983 984 if (ret) { 985 goto err; 986 } 987 988 if (need_alloc) { 989 qed_unref_l2_cache_entry(acb->request.l2_table); 990 acb->request.l2_table = qed_new_l2_table(s); 991 } 992 993 index = qed_l2_index(s, acb->cur_pos); 994 qed_update_l2_table(s, acb->request.l2_table->table, index, acb->cur_nclusters, 995 offset); 996 997 if (need_alloc) { 998 /* Write out the whole new L2 table */ 999 qed_write_l2_table(s, &acb->request, 0, s->table_nelems, true, 1000 qed_aio_write_l1_update, acb); 1001 } else { 1002 /* Write out only the updated part of the L2 table */ 1003 qed_write_l2_table(s, &acb->request, index, acb->cur_nclusters, false, 1004 qed_aio_next_io, acb); 1005 } 1006 return; 1007 1008 err: 1009 qed_aio_complete(acb, ret); 1010 } 1011 1012 static void qed_aio_write_l2_update_cb(void *opaque, int ret) 1013 { 1014 QEDAIOCB *acb = opaque; 1015 qed_aio_write_l2_update(acb, ret, acb->cur_cluster); 1016 } 1017 1018 /** 1019 * Flush new data clusters before updating the L2 table 1020 * 1021 * This flush is necessary when a backing file is in use. A crash during an 1022 * allocating write could result in empty clusters in the image. If the write 1023 * only touched a subregion of the cluster, then backing image sectors have 1024 * been lost in the untouched region. The solution is to flush after writing a 1025 * new data cluster and before updating the L2 table. 1026 */ 1027 static void qed_aio_write_flush_before_l2_update(void *opaque, int ret) 1028 { 1029 QEDAIOCB *acb = opaque; 1030 BDRVQEDState *s = acb_to_s(acb); 1031 1032 if (!bdrv_aio_flush(s->bs->file, qed_aio_write_l2_update_cb, opaque)) { 1033 qed_aio_complete(acb, -EIO); 1034 } 1035 } 1036 1037 /** 1038 * Write data to the image file 1039 */ 1040 static void qed_aio_write_main(void *opaque, int ret) 1041 { 1042 QEDAIOCB *acb = opaque; 1043 BDRVQEDState *s = acb_to_s(acb); 1044 uint64_t offset = acb->cur_cluster + 1045 qed_offset_into_cluster(s, acb->cur_pos); 1046 BlockDriverCompletionFunc *next_fn; 1047 1048 trace_qed_aio_write_main(s, acb, ret, offset, acb->cur_qiov.size); 1049 1050 if (ret) { 1051 qed_aio_complete(acb, ret); 1052 return; 1053 } 1054 1055 if (acb->find_cluster_ret == QED_CLUSTER_FOUND) { 1056 next_fn = qed_aio_next_io; 1057 } else { 1058 if (s->bs->backing_hd) { 1059 next_fn = qed_aio_write_flush_before_l2_update; 1060 } else { 1061 next_fn = qed_aio_write_l2_update_cb; 1062 } 1063 } 1064 1065 BLKDBG_EVENT(s->bs->file, BLKDBG_WRITE_AIO); 1066 bdrv_aio_writev(s->bs->file, offset / BDRV_SECTOR_SIZE, 1067 &acb->cur_qiov, acb->cur_qiov.size / BDRV_SECTOR_SIZE, 1068 next_fn, acb); 1069 } 1070 1071 /** 1072 * Populate back untouched region of new data cluster 1073 */ 1074 static void qed_aio_write_postfill(void *opaque, int ret) 1075 { 1076 QEDAIOCB *acb = opaque; 1077 BDRVQEDState *s = acb_to_s(acb); 1078 uint64_t start = acb->cur_pos + acb->cur_qiov.size; 1079 uint64_t len = 1080 qed_start_of_cluster(s, start + s->header.cluster_size - 1) - start; 1081 uint64_t offset = acb->cur_cluster + 1082 qed_offset_into_cluster(s, acb->cur_pos) + 1083 acb->cur_qiov.size; 1084 1085 if (ret) { 1086 qed_aio_complete(acb, ret); 1087 return; 1088 } 1089 1090 trace_qed_aio_write_postfill(s, acb, start, len, offset); 1091 qed_copy_from_backing_file(s, start, len, offset, 1092 qed_aio_write_main, acb); 1093 } 1094 1095 /** 1096 * Populate front untouched region of new data cluster 1097 */ 1098 static void qed_aio_write_prefill(void *opaque, int ret) 1099 { 1100 QEDAIOCB *acb = opaque; 1101 BDRVQEDState *s = acb_to_s(acb); 1102 uint64_t start = qed_start_of_cluster(s, acb->cur_pos); 1103 uint64_t len = qed_offset_into_cluster(s, acb->cur_pos); 1104 1105 trace_qed_aio_write_prefill(s, acb, start, len, acb->cur_cluster); 1106 qed_copy_from_backing_file(s, start, len, acb->cur_cluster, 1107 qed_aio_write_postfill, acb); 1108 } 1109 1110 /** 1111 * Check if the QED_F_NEED_CHECK bit should be set during allocating write 1112 */ 1113 static bool qed_should_set_need_check(BDRVQEDState *s) 1114 { 1115 /* The flush before L2 update path ensures consistency */ 1116 if (s->bs->backing_hd) { 1117 return false; 1118 } 1119 1120 return !(s->header.features & QED_F_NEED_CHECK); 1121 } 1122 1123 static void qed_aio_write_zero_cluster(void *opaque, int ret) 1124 { 1125 QEDAIOCB *acb = opaque; 1126 1127 if (ret) { 1128 qed_aio_complete(acb, ret); 1129 return; 1130 } 1131 1132 qed_aio_write_l2_update(acb, 0, 1); 1133 } 1134 1135 /** 1136 * Write new data cluster 1137 * 1138 * @acb: Write request 1139 * @len: Length in bytes 1140 * 1141 * This path is taken when writing to previously unallocated clusters. 1142 */ 1143 static void qed_aio_write_alloc(QEDAIOCB *acb, size_t len) 1144 { 1145 BDRVQEDState *s = acb_to_s(acb); 1146 BlockDriverCompletionFunc *cb; 1147 1148 /* Cancel timer when the first allocating request comes in */ 1149 if (QSIMPLEQ_EMPTY(&s->allocating_write_reqs)) { 1150 qed_cancel_need_check_timer(s); 1151 } 1152 1153 /* Freeze this request if another allocating write is in progress */ 1154 if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs)) { 1155 QSIMPLEQ_INSERT_TAIL(&s->allocating_write_reqs, acb, next); 1156 } 1157 if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs) || 1158 s->allocating_write_reqs_plugged) { 1159 return; /* wait for existing request to finish */ 1160 } 1161 1162 acb->cur_nclusters = qed_bytes_to_clusters(s, 1163 qed_offset_into_cluster(s, acb->cur_pos) + len); 1164 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len); 1165 1166 if (acb->flags & QED_AIOCB_ZERO) { 1167 /* Skip ahead if the clusters are already zero */ 1168 if (acb->find_cluster_ret == QED_CLUSTER_ZERO) { 1169 qed_aio_next_io(acb, 0); 1170 return; 1171 } 1172 1173 cb = qed_aio_write_zero_cluster; 1174 } else { 1175 cb = qed_aio_write_prefill; 1176 acb->cur_cluster = qed_alloc_clusters(s, acb->cur_nclusters); 1177 } 1178 1179 if (qed_should_set_need_check(s)) { 1180 s->header.features |= QED_F_NEED_CHECK; 1181 qed_write_header(s, cb, acb); 1182 } else { 1183 cb(acb, 0); 1184 } 1185 } 1186 1187 /** 1188 * Write data cluster in place 1189 * 1190 * @acb: Write request 1191 * @offset: Cluster offset in bytes 1192 * @len: Length in bytes 1193 * 1194 * This path is taken when writing to already allocated clusters. 1195 */ 1196 static void qed_aio_write_inplace(QEDAIOCB *acb, uint64_t offset, size_t len) 1197 { 1198 /* Allocate buffer for zero writes */ 1199 if (acb->flags & QED_AIOCB_ZERO) { 1200 struct iovec *iov = acb->qiov->iov; 1201 1202 if (!iov->iov_base) { 1203 iov->iov_base = qemu_blockalign(acb->common.bs, iov->iov_len); 1204 memset(iov->iov_base, 0, iov->iov_len); 1205 } 1206 } 1207 1208 /* Calculate the I/O vector */ 1209 acb->cur_cluster = offset; 1210 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len); 1211 1212 /* Do the actual write */ 1213 qed_aio_write_main(acb, 0); 1214 } 1215 1216 /** 1217 * Write data cluster 1218 * 1219 * @opaque: Write request 1220 * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2, QED_CLUSTER_L1, 1221 * or -errno 1222 * @offset: Cluster offset in bytes 1223 * @len: Length in bytes 1224 * 1225 * Callback from qed_find_cluster(). 1226 */ 1227 static void qed_aio_write_data(void *opaque, int ret, 1228 uint64_t offset, size_t len) 1229 { 1230 QEDAIOCB *acb = opaque; 1231 1232 trace_qed_aio_write_data(acb_to_s(acb), acb, ret, offset, len); 1233 1234 acb->find_cluster_ret = ret; 1235 1236 switch (ret) { 1237 case QED_CLUSTER_FOUND: 1238 qed_aio_write_inplace(acb, offset, len); 1239 break; 1240 1241 case QED_CLUSTER_L2: 1242 case QED_CLUSTER_L1: 1243 case QED_CLUSTER_ZERO: 1244 qed_aio_write_alloc(acb, len); 1245 break; 1246 1247 default: 1248 qed_aio_complete(acb, ret); 1249 break; 1250 } 1251 } 1252 1253 /** 1254 * Read data cluster 1255 * 1256 * @opaque: Read request 1257 * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2, QED_CLUSTER_L1, 1258 * or -errno 1259 * @offset: Cluster offset in bytes 1260 * @len: Length in bytes 1261 * 1262 * Callback from qed_find_cluster(). 1263 */ 1264 static void qed_aio_read_data(void *opaque, int ret, 1265 uint64_t offset, size_t len) 1266 { 1267 QEDAIOCB *acb = opaque; 1268 BDRVQEDState *s = acb_to_s(acb); 1269 BlockDriverState *bs = acb->common.bs; 1270 1271 /* Adjust offset into cluster */ 1272 offset += qed_offset_into_cluster(s, acb->cur_pos); 1273 1274 trace_qed_aio_read_data(s, acb, ret, offset, len); 1275 1276 if (ret < 0) { 1277 goto err; 1278 } 1279 1280 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len); 1281 1282 /* Handle zero cluster and backing file reads */ 1283 if (ret == QED_CLUSTER_ZERO) { 1284 qemu_iovec_memset(&acb->cur_qiov, 0, 0, acb->cur_qiov.size); 1285 qed_aio_next_io(acb, 0); 1286 return; 1287 } else if (ret != QED_CLUSTER_FOUND) { 1288 qed_read_backing_file(s, acb->cur_pos, &acb->cur_qiov, 1289 qed_aio_next_io, acb); 1290 return; 1291 } 1292 1293 BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO); 1294 bdrv_aio_readv(bs->file, offset / BDRV_SECTOR_SIZE, 1295 &acb->cur_qiov, acb->cur_qiov.size / BDRV_SECTOR_SIZE, 1296 qed_aio_next_io, acb); 1297 return; 1298 1299 err: 1300 qed_aio_complete(acb, ret); 1301 } 1302 1303 /** 1304 * Begin next I/O or complete the request 1305 */ 1306 static void qed_aio_next_io(void *opaque, int ret) 1307 { 1308 QEDAIOCB *acb = opaque; 1309 BDRVQEDState *s = acb_to_s(acb); 1310 QEDFindClusterFunc *io_fn = (acb->flags & QED_AIOCB_WRITE) ? 1311 qed_aio_write_data : qed_aio_read_data; 1312 1313 trace_qed_aio_next_io(s, acb, ret, acb->cur_pos + acb->cur_qiov.size); 1314 1315 /* Handle I/O error */ 1316 if (ret) { 1317 qed_aio_complete(acb, ret); 1318 return; 1319 } 1320 1321 acb->qiov_offset += acb->cur_qiov.size; 1322 acb->cur_pos += acb->cur_qiov.size; 1323 qemu_iovec_reset(&acb->cur_qiov); 1324 1325 /* Complete request */ 1326 if (acb->cur_pos >= acb->end_pos) { 1327 qed_aio_complete(acb, 0); 1328 return; 1329 } 1330 1331 /* Find next cluster and start I/O */ 1332 qed_find_cluster(s, &acb->request, 1333 acb->cur_pos, acb->end_pos - acb->cur_pos, 1334 io_fn, acb); 1335 } 1336 1337 static BlockDriverAIOCB *qed_aio_setup(BlockDriverState *bs, 1338 int64_t sector_num, 1339 QEMUIOVector *qiov, int nb_sectors, 1340 BlockDriverCompletionFunc *cb, 1341 void *opaque, int flags) 1342 { 1343 QEDAIOCB *acb = qemu_aio_get(&qed_aiocb_info, bs, cb, opaque); 1344 1345 trace_qed_aio_setup(bs->opaque, acb, sector_num, nb_sectors, 1346 opaque, flags); 1347 1348 acb->flags = flags; 1349 acb->finished = NULL; 1350 acb->qiov = qiov; 1351 acb->qiov_offset = 0; 1352 acb->cur_pos = (uint64_t)sector_num * BDRV_SECTOR_SIZE; 1353 acb->end_pos = acb->cur_pos + nb_sectors * BDRV_SECTOR_SIZE; 1354 acb->request.l2_table = NULL; 1355 qemu_iovec_init(&acb->cur_qiov, qiov->niov); 1356 1357 /* Start request */ 1358 qed_aio_next_io(acb, 0); 1359 return &acb->common; 1360 } 1361 1362 static BlockDriverAIOCB *bdrv_qed_aio_readv(BlockDriverState *bs, 1363 int64_t sector_num, 1364 QEMUIOVector *qiov, int nb_sectors, 1365 BlockDriverCompletionFunc *cb, 1366 void *opaque) 1367 { 1368 return qed_aio_setup(bs, sector_num, qiov, nb_sectors, cb, opaque, 0); 1369 } 1370 1371 static BlockDriverAIOCB *bdrv_qed_aio_writev(BlockDriverState *bs, 1372 int64_t sector_num, 1373 QEMUIOVector *qiov, int nb_sectors, 1374 BlockDriverCompletionFunc *cb, 1375 void *opaque) 1376 { 1377 return qed_aio_setup(bs, sector_num, qiov, nb_sectors, cb, 1378 opaque, QED_AIOCB_WRITE); 1379 } 1380 1381 typedef struct { 1382 Coroutine *co; 1383 int ret; 1384 bool done; 1385 } QEDWriteZeroesCB; 1386 1387 static void coroutine_fn qed_co_write_zeroes_cb(void *opaque, int ret) 1388 { 1389 QEDWriteZeroesCB *cb = opaque; 1390 1391 cb->done = true; 1392 cb->ret = ret; 1393 if (cb->co) { 1394 qemu_coroutine_enter(cb->co, NULL); 1395 } 1396 } 1397 1398 static int coroutine_fn bdrv_qed_co_write_zeroes(BlockDriverState *bs, 1399 int64_t sector_num, 1400 int nb_sectors) 1401 { 1402 BlockDriverAIOCB *blockacb; 1403 BDRVQEDState *s = bs->opaque; 1404 QEDWriteZeroesCB cb = { .done = false }; 1405 QEMUIOVector qiov; 1406 struct iovec iov; 1407 1408 /* Refuse if there are untouched backing file sectors */ 1409 if (bs->backing_hd) { 1410 if (qed_offset_into_cluster(s, sector_num * BDRV_SECTOR_SIZE) != 0) { 1411 return -ENOTSUP; 1412 } 1413 if (qed_offset_into_cluster(s, nb_sectors * BDRV_SECTOR_SIZE) != 0) { 1414 return -ENOTSUP; 1415 } 1416 } 1417 1418 /* Zero writes start without an I/O buffer. If a buffer becomes necessary 1419 * then it will be allocated during request processing. 1420 */ 1421 iov.iov_base = NULL, 1422 iov.iov_len = nb_sectors * BDRV_SECTOR_SIZE, 1423 1424 qemu_iovec_init_external(&qiov, &iov, 1); 1425 blockacb = qed_aio_setup(bs, sector_num, &qiov, nb_sectors, 1426 qed_co_write_zeroes_cb, &cb, 1427 QED_AIOCB_WRITE | QED_AIOCB_ZERO); 1428 if (!blockacb) { 1429 return -EIO; 1430 } 1431 if (!cb.done) { 1432 cb.co = qemu_coroutine_self(); 1433 qemu_coroutine_yield(); 1434 } 1435 assert(cb.done); 1436 return cb.ret; 1437 } 1438 1439 static int bdrv_qed_truncate(BlockDriverState *bs, int64_t offset) 1440 { 1441 BDRVQEDState *s = bs->opaque; 1442 uint64_t old_image_size; 1443 int ret; 1444 1445 if (!qed_is_image_size_valid(offset, s->header.cluster_size, 1446 s->header.table_size)) { 1447 return -EINVAL; 1448 } 1449 1450 /* Shrinking is currently not supported */ 1451 if ((uint64_t)offset < s->header.image_size) { 1452 return -ENOTSUP; 1453 } 1454 1455 old_image_size = s->header.image_size; 1456 s->header.image_size = offset; 1457 ret = qed_write_header_sync(s); 1458 if (ret < 0) { 1459 s->header.image_size = old_image_size; 1460 } 1461 return ret; 1462 } 1463 1464 static int64_t bdrv_qed_getlength(BlockDriverState *bs) 1465 { 1466 BDRVQEDState *s = bs->opaque; 1467 return s->header.image_size; 1468 } 1469 1470 static int bdrv_qed_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) 1471 { 1472 BDRVQEDState *s = bs->opaque; 1473 1474 memset(bdi, 0, sizeof(*bdi)); 1475 bdi->cluster_size = s->header.cluster_size; 1476 bdi->is_dirty = s->header.features & QED_F_NEED_CHECK; 1477 return 0; 1478 } 1479 1480 static int bdrv_qed_change_backing_file(BlockDriverState *bs, 1481 const char *backing_file, 1482 const char *backing_fmt) 1483 { 1484 BDRVQEDState *s = bs->opaque; 1485 QEDHeader new_header, le_header; 1486 void *buffer; 1487 size_t buffer_len, backing_file_len; 1488 int ret; 1489 1490 /* Refuse to set backing filename if unknown compat feature bits are 1491 * active. If the image uses an unknown compat feature then we may not 1492 * know the layout of data following the header structure and cannot safely 1493 * add a new string. 1494 */ 1495 if (backing_file && (s->header.compat_features & 1496 ~QED_COMPAT_FEATURE_MASK)) { 1497 return -ENOTSUP; 1498 } 1499 1500 memcpy(&new_header, &s->header, sizeof(new_header)); 1501 1502 new_header.features &= ~(QED_F_BACKING_FILE | 1503 QED_F_BACKING_FORMAT_NO_PROBE); 1504 1505 /* Adjust feature flags */ 1506 if (backing_file) { 1507 new_header.features |= QED_F_BACKING_FILE; 1508 1509 if (qed_fmt_is_raw(backing_fmt)) { 1510 new_header.features |= QED_F_BACKING_FORMAT_NO_PROBE; 1511 } 1512 } 1513 1514 /* Calculate new header size */ 1515 backing_file_len = 0; 1516 1517 if (backing_file) { 1518 backing_file_len = strlen(backing_file); 1519 } 1520 1521 buffer_len = sizeof(new_header); 1522 new_header.backing_filename_offset = buffer_len; 1523 new_header.backing_filename_size = backing_file_len; 1524 buffer_len += backing_file_len; 1525 1526 /* Make sure we can rewrite header without failing */ 1527 if (buffer_len > new_header.header_size * new_header.cluster_size) { 1528 return -ENOSPC; 1529 } 1530 1531 /* Prepare new header */ 1532 buffer = g_malloc(buffer_len); 1533 1534 qed_header_cpu_to_le(&new_header, &le_header); 1535 memcpy(buffer, &le_header, sizeof(le_header)); 1536 buffer_len = sizeof(le_header); 1537 1538 if (backing_file) { 1539 memcpy(buffer + buffer_len, backing_file, backing_file_len); 1540 buffer_len += backing_file_len; 1541 } 1542 1543 /* Write new header */ 1544 ret = bdrv_pwrite_sync(bs->file, 0, buffer, buffer_len); 1545 g_free(buffer); 1546 if (ret == 0) { 1547 memcpy(&s->header, &new_header, sizeof(new_header)); 1548 } 1549 return ret; 1550 } 1551 1552 static void bdrv_qed_invalidate_cache(BlockDriverState *bs) 1553 { 1554 BDRVQEDState *s = bs->opaque; 1555 1556 bdrv_qed_close(bs); 1557 memset(s, 0, sizeof(BDRVQEDState)); 1558 bdrv_qed_open(bs, NULL, bs->open_flags, NULL); 1559 } 1560 1561 static int bdrv_qed_check(BlockDriverState *bs, BdrvCheckResult *result, 1562 BdrvCheckMode fix) 1563 { 1564 BDRVQEDState *s = bs->opaque; 1565 1566 return qed_check(s, result, !!fix); 1567 } 1568 1569 static QEMUOptionParameter qed_create_options[] = { 1570 { 1571 .name = BLOCK_OPT_SIZE, 1572 .type = OPT_SIZE, 1573 .help = "Virtual disk size (in bytes)" 1574 }, { 1575 .name = BLOCK_OPT_BACKING_FILE, 1576 .type = OPT_STRING, 1577 .help = "File name of a base image" 1578 }, { 1579 .name = BLOCK_OPT_BACKING_FMT, 1580 .type = OPT_STRING, 1581 .help = "Image format of the base image" 1582 }, { 1583 .name = BLOCK_OPT_CLUSTER_SIZE, 1584 .type = OPT_SIZE, 1585 .help = "Cluster size (in bytes)", 1586 .value = { .n = QED_DEFAULT_CLUSTER_SIZE }, 1587 }, { 1588 .name = BLOCK_OPT_TABLE_SIZE, 1589 .type = OPT_SIZE, 1590 .help = "L1/L2 table size (in clusters)" 1591 }, 1592 { /* end of list */ } 1593 }; 1594 1595 static BlockDriver bdrv_qed = { 1596 .format_name = "qed", 1597 .instance_size = sizeof(BDRVQEDState), 1598 .create_options = qed_create_options, 1599 1600 .bdrv_probe = bdrv_qed_probe, 1601 .bdrv_rebind = bdrv_qed_rebind, 1602 .bdrv_open = bdrv_qed_open, 1603 .bdrv_close = bdrv_qed_close, 1604 .bdrv_reopen_prepare = bdrv_qed_reopen_prepare, 1605 .bdrv_create = bdrv_qed_create, 1606 .bdrv_has_zero_init = bdrv_has_zero_init_1, 1607 .bdrv_co_get_block_status = bdrv_qed_co_get_block_status, 1608 .bdrv_make_empty = bdrv_qed_make_empty, 1609 .bdrv_aio_readv = bdrv_qed_aio_readv, 1610 .bdrv_aio_writev = bdrv_qed_aio_writev, 1611 .bdrv_co_write_zeroes = bdrv_qed_co_write_zeroes, 1612 .bdrv_truncate = bdrv_qed_truncate, 1613 .bdrv_getlength = bdrv_qed_getlength, 1614 .bdrv_get_info = bdrv_qed_get_info, 1615 .bdrv_change_backing_file = bdrv_qed_change_backing_file, 1616 .bdrv_invalidate_cache = bdrv_qed_invalidate_cache, 1617 .bdrv_check = bdrv_qed_check, 1618 }; 1619 1620 static void bdrv_qed_init(void) 1621 { 1622 bdrv_register(&bdrv_qed); 1623 } 1624 1625 block_init(bdrv_qed_init); 1626