Lines Matching +full:cluster +full:- +full:index
11 * See the COPYING.LIB file in the top-level directory.
20 #include "qemu/main-loop.h"
26 #include "sysemu/block-backend.h"
28 #include "qapi/qobject-input-visitor.h"
29 #include "qapi/qapi-visit-block-core.h"
41 if (le32_to_cpu(header->magic) != QED_MAGIC) { in bdrv_qed_probe()
59 cpu->magic = le32_to_cpu(le->magic); in qed_header_le_to_cpu()
60 cpu->cluster_size = le32_to_cpu(le->cluster_size); in qed_header_le_to_cpu()
61 cpu->table_size = le32_to_cpu(le->table_size); in qed_header_le_to_cpu()
62 cpu->header_size = le32_to_cpu(le->header_size); in qed_header_le_to_cpu()
63 cpu->features = le64_to_cpu(le->features); in qed_header_le_to_cpu()
64 cpu->compat_features = le64_to_cpu(le->compat_features); in qed_header_le_to_cpu()
65 cpu->autoclear_features = le64_to_cpu(le->autoclear_features); in qed_header_le_to_cpu()
66 cpu->l1_table_offset = le64_to_cpu(le->l1_table_offset); in qed_header_le_to_cpu()
67 cpu->image_size = le64_to_cpu(le->image_size); in qed_header_le_to_cpu()
68 cpu->backing_filename_offset = le32_to_cpu(le->backing_filename_offset); in qed_header_le_to_cpu()
69 cpu->backing_filename_size = le32_to_cpu(le->backing_filename_size); in qed_header_le_to_cpu()
74 le->magic = cpu_to_le32(cpu->magic); in qed_header_cpu_to_le()
75 le->cluster_size = cpu_to_le32(cpu->cluster_size); in qed_header_cpu_to_le()
76 le->table_size = cpu_to_le32(cpu->table_size); in qed_header_cpu_to_le()
77 le->header_size = cpu_to_le32(cpu->header_size); in qed_header_cpu_to_le()
78 le->features = cpu_to_le64(cpu->features); in qed_header_cpu_to_le()
79 le->compat_features = cpu_to_le64(cpu->compat_features); in qed_header_cpu_to_le()
80 le->autoclear_features = cpu_to_le64(cpu->autoclear_features); in qed_header_cpu_to_le()
81 le->l1_table_offset = cpu_to_le64(cpu->l1_table_offset); in qed_header_cpu_to_le()
82 le->image_size = cpu_to_le64(cpu->image_size); in qed_header_cpu_to_le()
83 le->backing_filename_offset = cpu_to_le32(cpu->backing_filename_offset); in qed_header_cpu_to_le()
84 le->backing_filename_size = cpu_to_le32(cpu->backing_filename_size); in qed_header_cpu_to_le()
91 qed_header_cpu_to_le(&s->header, &le); in qed_write_header_sync()
92 return bdrv_pwrite(s->bs->file, 0, sizeof(le), &le, 0); in qed_write_header_sync()
96 * Update header in-place (does not rewrite backing filename or other strings)
98 * This function only updates known header fields in-place and does not affect
116 assert(s->allocating_acb || s->allocating_write_reqs_plugged); in qed_write_header()
118 buf = qemu_blockalign(s->bs, len); in qed_write_header()
120 ret = bdrv_co_pread(s->bs->file, 0, len, buf, 0); in qed_write_header()
126 qed_header_cpu_to_le(&s->header, (QEDHeader *) buf); in qed_write_header()
128 ret = bdrv_co_pwrite(s->bs->file, 0, len, buf, 0); in qed_write_header()
156 if (cluster_size & (cluster_size - 1)) { in qed_is_cluster_size_valid()
168 if (table_size & (table_size - 1)) { in qed_is_table_size_valid()
194 * @ret: 0 on success, -errno on failure
196 * The string is NUL-terminated.
204 return -EINVAL; in qed_read_string()
219 * @ret: Offset of first allocated cluster
229 uint64_t offset = s->file_size; in qed_alloc_clusters()
230 s->file_size += n * s->header.cluster_size; in qed_alloc_clusters()
237 return qemu_blockalign(s->bs, in qed_alloc_table()
238 s->header.cluster_size * s->header.table_size); in qed_alloc_table()
248 CachedL2Table *l2_table = qed_alloc_l2_cache_entry(&s->l2_cache); in qed_new_l2_table()
250 l2_table->table = qed_alloc_table(s); in qed_new_l2_table()
251 l2_table->offset = qed_alloc_clusters(s, s->header.table_size); in qed_new_l2_table()
253 memset(l2_table->table->offsets, 0, in qed_new_l2_table()
254 s->header.cluster_size * s->header.table_size); in qed_new_l2_table()
260 qemu_co_mutex_lock(&s->table_lock); in qed_plug_allocating_write_reqs()
263 assert(!s->allocating_write_reqs_plugged); in qed_plug_allocating_write_reqs()
264 if (s->allocating_acb != NULL) { in qed_plug_allocating_write_reqs()
268 qemu_co_mutex_unlock(&s->table_lock); in qed_plug_allocating_write_reqs()
272 s->allocating_write_reqs_plugged = true; in qed_plug_allocating_write_reqs()
273 qemu_co_mutex_unlock(&s->table_lock); in qed_plug_allocating_write_reqs()
279 qemu_co_mutex_lock(&s->table_lock); in qed_unplug_allocating_write_reqs()
280 assert(s->allocating_write_reqs_plugged); in qed_unplug_allocating_write_reqs()
281 s->allocating_write_reqs_plugged = false; in qed_unplug_allocating_write_reqs()
282 qemu_co_queue_next(&s->allocating_write_reqs); in qed_unplug_allocating_write_reqs()
283 qemu_co_mutex_unlock(&s->table_lock); in qed_unplug_allocating_write_reqs()
298 ret = bdrv_co_flush(s->bs->file->bs); in qed_need_check_timer()
304 s->header.features &= ~QED_F_NEED_CHECK; in qed_need_check_timer()
310 ret = bdrv_co_flush(s->bs); in qed_need_check_timer()
320 bdrv_dec_in_flight(s->bs); in qed_need_check_timer_entry()
328 bdrv_inc_in_flight(s->bs); in qed_need_check_timer_cb()
339 timer_mod(s->need_check_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + in qed_start_need_check_timer()
347 timer_del(s->need_check_timer); in qed_cancel_need_check_timer()
352 BDRVQEDState *s = bs->opaque; in bdrv_qed_detach_aio_context()
355 timer_free(s->need_check_timer); in bdrv_qed_detach_aio_context()
361 BDRVQEDState *s = bs->opaque; in bdrv_qed_attach_aio_context()
363 s->need_check_timer = aio_timer_new(new_context, in bdrv_qed_attach_aio_context()
366 if (s->header.features & QED_F_NEED_CHECK) { in bdrv_qed_attach_aio_context()
373 BDRVQEDState *s = bs->opaque; in bdrv_qed_drain_begin()
378 if (s->need_check_timer && timer_pending(s->need_check_timer)) { in bdrv_qed_drain_begin()
390 BDRVQEDState *s = bs->opaque; in bdrv_qed_init_state()
393 s->bs = bs; in bdrv_qed_init_state()
394 qemu_co_mutex_init(&s->table_lock); in bdrv_qed_init_state()
395 qemu_co_queue_init(&s->allocating_write_reqs); in bdrv_qed_init_state()
402 BDRVQEDState *s = bs->opaque; in bdrv_qed_do_open()
407 ret = bdrv_co_pread(bs->file, 0, sizeof(le_header), &le_header, 0); in bdrv_qed_do_open()
412 qed_header_le_to_cpu(&le_header, &s->header); in bdrv_qed_do_open()
414 if (s->header.magic != QED_MAGIC) { in bdrv_qed_do_open()
416 return -EINVAL; in bdrv_qed_do_open()
418 if (s->header.features & ~QED_FEATURE_MASK) { in bdrv_qed_do_open()
421 s->header.features & ~QED_FEATURE_MASK); in bdrv_qed_do_open()
422 return -ENOTSUP; in bdrv_qed_do_open()
424 if (!qed_is_cluster_size_valid(s->header.cluster_size)) { in bdrv_qed_do_open()
425 error_setg(errp, "QED cluster size is invalid"); in bdrv_qed_do_open()
426 return -EINVAL; in bdrv_qed_do_open()
429 /* Round down file size to the last cluster */ in bdrv_qed_do_open()
430 file_size = bdrv_co_getlength(bs->file->bs); in bdrv_qed_do_open()
435 s->file_size = qed_start_of_cluster(s, file_size); in bdrv_qed_do_open()
437 if (!qed_is_table_size_valid(s->header.table_size)) { in bdrv_qed_do_open()
439 return -EINVAL; in bdrv_qed_do_open()
441 if (!qed_is_image_size_valid(s->header.image_size, in bdrv_qed_do_open()
442 s->header.cluster_size, in bdrv_qed_do_open()
443 s->header.table_size)) { in bdrv_qed_do_open()
445 return -EINVAL; in bdrv_qed_do_open()
447 if (!qed_check_table_offset(s, s->header.l1_table_offset)) { in bdrv_qed_do_open()
449 return -EINVAL; in bdrv_qed_do_open()
452 s->table_nelems = (s->header.cluster_size * s->header.table_size) / in bdrv_qed_do_open()
454 s->l2_shift = ctz32(s->header.cluster_size); in bdrv_qed_do_open()
455 s->l2_mask = s->table_nelems - 1; in bdrv_qed_do_open()
456 s->l1_shift = s->l2_shift + ctz32(s->table_nelems); in bdrv_qed_do_open()
459 if (s->header.header_size > UINT32_MAX / s->header.cluster_size) { in bdrv_qed_do_open()
461 return -EINVAL; in bdrv_qed_do_open()
464 if ((s->header.features & QED_F_BACKING_FILE)) { in bdrv_qed_do_open()
467 if ((uint64_t)s->header.backing_filename_offset + in bdrv_qed_do_open()
468 s->header.backing_filename_size > in bdrv_qed_do_open()
469 s->header.cluster_size * s->header.header_size) { in bdrv_qed_do_open()
471 return -EINVAL; in bdrv_qed_do_open()
474 backing_file_str = g_malloc(sizeof(bs->backing_file)); in bdrv_qed_do_open()
475 ret = qed_read_string(bs->file, s->header.backing_filename_offset, in bdrv_qed_do_open()
476 s->header.backing_filename_size, in bdrv_qed_do_open()
477 backing_file_str, sizeof(bs->backing_file)); in bdrv_qed_do_open()
483 if (!g_str_equal(backing_file_str, bs->backing_file)) { in bdrv_qed_do_open()
484 pstrcpy(bs->backing_file, sizeof(bs->backing_file), in bdrv_qed_do_open()
486 pstrcpy(bs->auto_backing_file, sizeof(bs->auto_backing_file), in bdrv_qed_do_open()
490 if (s->header.features & QED_F_BACKING_FORMAT_NO_PROBE) { in bdrv_qed_do_open()
491 pstrcpy(bs->backing_format, sizeof(bs->backing_format), "raw"); in bdrv_qed_do_open()
501 if ((s->header.autoclear_features & ~QED_AUTOCLEAR_FEATURE_MASK) != 0 && in bdrv_qed_do_open()
502 !bdrv_is_read_only(bs->file->bs) && !(flags & BDRV_O_INACTIVE)) { in bdrv_qed_do_open()
503 s->header.autoclear_features &= QED_AUTOCLEAR_FEATURE_MASK; in bdrv_qed_do_open()
512 bdrv_co_flush(bs->file->bs); in bdrv_qed_do_open()
515 s->l1_table = qed_alloc_table(s); in bdrv_qed_do_open()
516 qed_init_l2_cache(&s->l2_cache); in bdrv_qed_do_open()
525 if (!(flags & BDRV_O_CHECK) && (s->header.features & QED_F_NEED_CHECK)) { in bdrv_qed_do_open()
526 /* Read-only images cannot be fixed. There is no risk of corruption in bdrv_qed_do_open()
528 * potentially inconsistent images to be opened read-only. This can in bdrv_qed_do_open()
531 if (!bdrv_is_read_only(bs->file->bs) && in bdrv_qed_do_open()
547 qed_free_l2_cache(&s->l2_cache); in bdrv_qed_do_open()
548 qemu_vfree(s->l1_table); in bdrv_qed_do_open()
564 BDRVQEDState *s = qoc->bs->opaque; in bdrv_qed_open_entry()
568 qemu_co_mutex_lock(&s->table_lock); in bdrv_qed_open_entry()
569 qoc->ret = bdrv_qed_do_open(qoc->bs, qoc->options, qoc->flags, qoc->errp); in bdrv_qed_open_entry()
570 qemu_co_mutex_unlock(&s->table_lock); in bdrv_qed_open_entry()
581 .ret = -EINPROGRESS in bdrv_qed_open()
594 BDRV_POLL_WHILE(bs, qoc.ret == -EINPROGRESS); in bdrv_qed_open()
601 BDRVQEDState *s = bs->opaque; in bdrv_qed_refresh_limits()
603 bs->bl.pwrite_zeroes_alignment = s->header.cluster_size; in bdrv_qed_refresh_limits()
604 bs->bl.max_pwrite_zeroes = QEMU_ALIGN_DOWN(INT_MAX, s->header.cluster_size); in bdrv_qed_refresh_limits()
617 BDRVQEDState *s = bs->opaque; in bdrv_qed_do_close()
622 bdrv_flush(bs->file->bs); in bdrv_qed_do_close()
625 if (s->header.features & QED_F_NEED_CHECK) { in bdrv_qed_do_close()
626 s->header.features &= ~QED_F_NEED_CHECK; in bdrv_qed_do_close()
630 qed_free_l2_cache(&s->l2_cache); in bdrv_qed_do_close()
631 qemu_vfree(s->l1_table); in bdrv_qed_do_close()
655 assert(opts->driver == BLOCKDEV_DRIVER_QED); in bdrv_qed_co_create()
656 qed_opts = &opts->u.qed; in bdrv_qed_co_create()
659 if (!qed_opts->has_cluster_size) { in bdrv_qed_co_create()
660 qed_opts->cluster_size = QED_DEFAULT_CLUSTER_SIZE; in bdrv_qed_co_create()
662 if (!qed_opts->has_table_size) { in bdrv_qed_co_create()
663 qed_opts->table_size = QED_DEFAULT_TABLE_SIZE; in bdrv_qed_co_create()
666 if (!qed_is_cluster_size_valid(qed_opts->cluster_size)) { in bdrv_qed_co_create()
667 error_setg(errp, "QED cluster size must be within range [%u, %u] " in bdrv_qed_co_create()
670 return -EINVAL; in bdrv_qed_co_create()
672 if (!qed_is_table_size_valid(qed_opts->table_size)) { in bdrv_qed_co_create()
676 return -EINVAL; in bdrv_qed_co_create()
678 if (!qed_is_image_size_valid(qed_opts->size, qed_opts->cluster_size, in bdrv_qed_co_create()
679 qed_opts->table_size)) in bdrv_qed_co_create()
681 error_setg(errp, "QED image size must be a non-zero multiple of " in bdrv_qed_co_create()
682 "cluster size and less than %" PRIu64 " bytes", in bdrv_qed_co_create()
683 qed_max_image_size(qed_opts->cluster_size, in bdrv_qed_co_create()
684 qed_opts->table_size)); in bdrv_qed_co_create()
685 return -EINVAL; in bdrv_qed_co_create()
689 bs = bdrv_co_open_blockdev_ref(qed_opts->file, errp); in bdrv_qed_co_create()
691 return -EIO; in bdrv_qed_co_create()
697 ret = -EPERM; in bdrv_qed_co_create()
705 .cluster_size = qed_opts->cluster_size, in bdrv_qed_co_create()
706 .table_size = qed_opts->table_size, in bdrv_qed_co_create()
710 .l1_table_offset = qed_opts->cluster_size, in bdrv_qed_co_create()
711 .image_size = qed_opts->size, in bdrv_qed_co_create()
725 if (qed_opts->backing_file) { in bdrv_qed_co_create()
728 header.backing_filename_size = strlen(qed_opts->backing_file); in bdrv_qed_co_create()
730 if (qed_opts->has_backing_fmt) { in bdrv_qed_co_create()
731 const char *backing_fmt = BlockdevDriver_str(qed_opts->backing_fmt); in bdrv_qed_co_create()
744 qed_opts->backing_file, 0); in bdrv_qed_co_create()
774 { BLOCK_OPT_BACKING_FILE, "backing-file" }, in bdrv_qed_co_create_opts()
775 { BLOCK_OPT_BACKING_FMT, "backing-fmt" }, in bdrv_qed_co_create_opts()
776 { BLOCK_OPT_CLUSTER_SIZE, "cluster-size" }, in bdrv_qed_co_create_opts()
777 { BLOCK_OPT_TABLE_SIZE, "table-size" }, in bdrv_qed_co_create_opts()
785 ret = -EINVAL; in bdrv_qed_co_create_opts()
798 ret = -EIO; in bdrv_qed_co_create_opts()
804 qdict_put_str(qdict, "file", bs->node_name); in bdrv_qed_co_create_opts()
808 ret = -EINVAL; in bdrv_qed_co_create_opts()
815 ret = -EINVAL; in bdrv_qed_co_create_opts()
820 assert(create_options->driver == BLOCKDEV_DRIVER_QED); in bdrv_qed_co_create_opts()
821 create_options->u.qed.size = in bdrv_qed_co_create_opts()
822 ROUND_UP(create_options->u.qed.size, BDRV_SECTOR_SIZE); in bdrv_qed_co_create_opts()
839 BDRVQEDState *s = bs->opaque; in bdrv_qed_co_block_status()
846 qemu_co_mutex_lock(&s->table_lock); in bdrv_qed_co_block_status()
854 *file = bs->file->bs; in bdrv_qed_co_block_status()
870 qemu_co_mutex_unlock(&s->table_lock); in bdrv_qed_co_block_status()
877 return acb->bs->opaque; in acb_to_s()
881 * Read from the backing file or zero-fill if no backing file
887 * This function reads qiov->size bytes starting at pos from the backing file.
893 if (s->bs->backing) { in qed_read_backing_file()
894 BLKDBG_CO_EVENT(s->bs->file, BLKDBG_READ_BACKING_AIO); in qed_read_backing_file()
895 return bdrv_co_preadv(s->bs->backing, pos, qiov->size, qiov, 0); in qed_read_backing_file()
897 qemu_iovec_memset(qiov, 0, 0, qiov->size); in qed_read_backing_file()
921 qemu_iovec_init_buf(&qiov, qemu_blockalign(s->bs, len), len); in qed_copy_from_backing_file()
929 BLKDBG_CO_EVENT(s->bs->file, BLKDBG_COW_WRITE); in qed_copy_from_backing_file()
930 ret = bdrv_co_pwritev(s->bs->file, offset, qiov.size, &qiov, 0); in qed_copy_from_backing_file()
945 * @index: First cluster index
947 * @cluster: First cluster offset
949 * The cluster offset may be an allocated byte offset in the image file, the
950 * zero cluster marker, or the unallocated cluster marker.
955 int index, unsigned int n, in qed_update_l2_table() argument
956 uint64_t cluster) in qed_update_l2_table() argument
959 for (i = index; i < index + n; i++) { in qed_update_l2_table()
960 table->offsets[i] = cluster; in qed_update_l2_table()
961 if (!qed_offset_is_unalloc_cluster(cluster) && in qed_update_l2_table()
962 !qed_offset_is_zero_cluster(cluster)) { in qed_update_l2_table()
963 cluster += s->header.cluster_size; in qed_update_l2_table()
974 qemu_iovec_destroy(&acb->cur_qiov); in qed_aio_complete()
975 qed_unref_l2_cache_entry(acb->request.l2_table); in qed_aio_complete()
978 if (acb->flags & QED_AIOCB_ZERO) { in qed_aio_complete()
979 qemu_vfree(acb->qiov->iov[0].iov_base); in qed_aio_complete()
980 acb->qiov->iov[0].iov_base = NULL; in qed_aio_complete()
984 * requests enqueue themselves when they first hit an unallocated cluster in qed_aio_complete()
989 if (acb == s->allocating_acb) { in qed_aio_complete()
990 s->allocating_acb = NULL; in qed_aio_complete()
991 if (!qemu_co_queue_empty(&s->allocating_write_reqs)) { in qed_aio_complete()
992 qemu_co_queue_next(&s->allocating_write_reqs); in qed_aio_complete()
993 } else if (s->header.features & QED_F_NEED_CHECK) { in qed_aio_complete()
1007 CachedL2Table *l2_table = acb->request.l2_table; in qed_aio_write_l1_update()
1008 uint64_t l2_offset = l2_table->offset; in qed_aio_write_l1_update()
1009 int index, ret; in qed_aio_write_l1_update() local
1011 index = qed_l1_index(s, acb->cur_pos); in qed_aio_write_l1_update()
1012 s->l1_table->offsets[index] = l2_table->offset; in qed_aio_write_l1_update()
1014 ret = qed_write_l1_table(s, index, 1); in qed_aio_write_l1_update()
1017 qed_commit_l2_cache_entry(&s->l2_cache, l2_table); in qed_aio_write_l1_update()
1022 acb->request.l2_table = qed_find_l2_cache_entry(&s->l2_cache, l2_offset); in qed_aio_write_l1_update()
1023 assert(acb->request.l2_table != NULL); in qed_aio_write_l1_update()
1030 * Update L2 table with new cluster offsets and write them out
1038 bool need_alloc = acb->find_cluster_ret == QED_CLUSTER_L1; in qed_aio_write_l2_update()
1039 int index, ret; in qed_aio_write_l2_update() local
1042 qed_unref_l2_cache_entry(acb->request.l2_table); in qed_aio_write_l2_update()
1043 acb->request.l2_table = qed_new_l2_table(s); in qed_aio_write_l2_update()
1046 index = qed_l2_index(s, acb->cur_pos); in qed_aio_write_l2_update()
1047 qed_update_l2_table(s, acb->request.l2_table->table, index, acb->cur_nclusters, in qed_aio_write_l2_update()
1052 ret = qed_write_l2_table(s, &acb->request, 0, s->table_nelems, true); in qed_aio_write_l2_update()
1059 ret = qed_write_l2_table(s, &acb->request, index, acb->cur_nclusters, in qed_aio_write_l2_update()
1076 uint64_t offset = acb->cur_cluster + in qed_aio_write_main()
1077 qed_offset_into_cluster(s, acb->cur_pos); in qed_aio_write_main()
1079 trace_qed_aio_write_main(s, acb, 0, offset, acb->cur_qiov.size); in qed_aio_write_main()
1081 BLKDBG_CO_EVENT(s->bs->file, BLKDBG_WRITE_AIO); in qed_aio_write_main()
1082 return bdrv_co_pwritev(s->bs->file, offset, acb->cur_qiov.size, in qed_aio_write_main()
1083 &acb->cur_qiov, 0); in qed_aio_write_main()
1087 * Populate untouched regions of new data cluster
1097 qemu_co_mutex_unlock(&s->table_lock); in qed_aio_write_cow()
1099 /* Populate front untouched region of new data cluster */ in qed_aio_write_cow()
1100 start = qed_start_of_cluster(s, acb->cur_pos); in qed_aio_write_cow()
1101 len = qed_offset_into_cluster(s, acb->cur_pos); in qed_aio_write_cow()
1103 trace_qed_aio_write_prefill(s, acb, start, len, acb->cur_cluster); in qed_aio_write_cow()
1104 ret = qed_copy_from_backing_file(s, start, len, acb->cur_cluster); in qed_aio_write_cow()
1109 /* Populate back untouched region of new data cluster */ in qed_aio_write_cow()
1110 start = acb->cur_pos + acb->cur_qiov.size; in qed_aio_write_cow()
1111 len = qed_start_of_cluster(s, start + s->header.cluster_size - 1) - start; in qed_aio_write_cow()
1112 offset = acb->cur_cluster + in qed_aio_write_cow()
1113 qed_offset_into_cluster(s, acb->cur_pos) + in qed_aio_write_cow()
1114 acb->cur_qiov.size; in qed_aio_write_cow()
1127 if (s->bs->backing) { in qed_aio_write_cow()
1133 * image. If the write only touched a subregion of the cluster, in qed_aio_write_cow()
1136 * cluster and before updating the L2 table. in qed_aio_write_cow()
1138 ret = bdrv_co_flush(s->bs->file->bs); in qed_aio_write_cow()
1142 qemu_co_mutex_lock(&s->table_lock); in qed_aio_write_cow()
1152 if (s->bs->backing) { in qed_should_set_need_check()
1156 return !(s->header.features & QED_F_NEED_CHECK); in qed_should_set_need_check()
1160 * Write new data cluster
1176 if (s->allocating_acb == NULL) { in qed_aio_write_alloc()
1181 if (s->allocating_acb != acb || s->allocating_write_reqs_plugged) { in qed_aio_write_alloc()
1182 if (s->allocating_acb != NULL) { in qed_aio_write_alloc()
1183 qemu_co_queue_wait(&s->allocating_write_reqs, &s->table_lock); in qed_aio_write_alloc()
1184 assert(s->allocating_acb == NULL); in qed_aio_write_alloc()
1186 s->allocating_acb = acb; in qed_aio_write_alloc()
1187 return -EAGAIN; /* start over with looking up table entries */ in qed_aio_write_alloc()
1190 acb->cur_nclusters = qed_bytes_to_clusters(s, in qed_aio_write_alloc()
1191 qed_offset_into_cluster(s, acb->cur_pos) + len); in qed_aio_write_alloc()
1192 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len); in qed_aio_write_alloc()
1194 if (acb->flags & QED_AIOCB_ZERO) { in qed_aio_write_alloc()
1196 if (acb->find_cluster_ret == QED_CLUSTER_ZERO) { in qed_aio_write_alloc()
1199 acb->cur_cluster = 1; in qed_aio_write_alloc()
1201 acb->cur_cluster = qed_alloc_clusters(s, acb->cur_nclusters); in qed_aio_write_alloc()
1205 s->header.features |= QED_F_NEED_CHECK; in qed_aio_write_alloc()
1212 if (!(acb->flags & QED_AIOCB_ZERO)) { in qed_aio_write_alloc()
1219 return qed_aio_write_l2_update(acb, acb->cur_cluster); in qed_aio_write_alloc()
1223 * Write data cluster in place
1226 * @offset: Cluster offset in bytes
1239 qemu_co_mutex_unlock(&s->table_lock); in qed_aio_write_inplace()
1242 if (acb->flags & QED_AIOCB_ZERO) { in qed_aio_write_inplace()
1243 struct iovec *iov = acb->qiov->iov; in qed_aio_write_inplace()
1245 if (!iov->iov_base) { in qed_aio_write_inplace()
1246 iov->iov_base = qemu_try_blockalign(acb->bs, iov->iov_len); in qed_aio_write_inplace()
1247 if (iov->iov_base == NULL) { in qed_aio_write_inplace()
1248 r = -ENOMEM; in qed_aio_write_inplace()
1251 memset(iov->iov_base, 0, iov->iov_len); in qed_aio_write_inplace()
1256 acb->cur_cluster = offset; in qed_aio_write_inplace()
1257 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len); in qed_aio_write_inplace()
1262 qemu_co_mutex_lock(&s->table_lock); in qed_aio_write_inplace()
1267 * Write data cluster
1271 * @offset: Cluster offset in bytes
1283 acb->find_cluster_ret = ret; in qed_aio_write_data()
1300 * Read data cluster
1304 * @offset: Cluster offset in bytes
1314 BlockDriverState *bs = acb->bs; in qed_aio_read_data()
1317 qemu_co_mutex_unlock(&s->table_lock); in qed_aio_read_data()
1319 /* Adjust offset into cluster */ in qed_aio_read_data()
1320 offset += qed_offset_into_cluster(s, acb->cur_pos); in qed_aio_read_data()
1324 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len); in qed_aio_read_data()
1326 /* Handle zero cluster and backing file reads, otherwise read in qed_aio_read_data()
1327 * data cluster directly. in qed_aio_read_data()
1330 qemu_iovec_memset(&acb->cur_qiov, 0, 0, acb->cur_qiov.size); in qed_aio_read_data()
1333 r = qed_read_backing_file(s, acb->cur_pos, &acb->cur_qiov); in qed_aio_read_data()
1335 BLKDBG_CO_EVENT(bs->file, BLKDBG_READ_AIO); in qed_aio_read_data()
1336 r = bdrv_co_preadv(bs->file, offset, acb->cur_qiov.size, in qed_aio_read_data()
1337 &acb->cur_qiov, 0); in qed_aio_read_data()
1340 qemu_co_mutex_lock(&s->table_lock); in qed_aio_read_data()
1354 qemu_co_mutex_lock(&s->table_lock); in qed_aio_next_io()
1356 trace_qed_aio_next_io(s, acb, 0, acb->cur_pos + acb->cur_qiov.size); in qed_aio_next_io()
1358 acb->qiov_offset += acb->cur_qiov.size; in qed_aio_next_io()
1359 acb->cur_pos += acb->cur_qiov.size; in qed_aio_next_io()
1360 qemu_iovec_reset(&acb->cur_qiov); in qed_aio_next_io()
1363 if (acb->cur_pos >= acb->end_pos) { in qed_aio_next_io()
1368 /* Find next cluster and start I/O */ in qed_aio_next_io()
1369 len = acb->end_pos - acb->cur_pos; in qed_aio_next_io()
1370 ret = qed_find_cluster(s, &acb->request, acb->cur_pos, &len, &offset); in qed_aio_next_io()
1375 if (acb->flags & QED_AIOCB_WRITE) { in qed_aio_next_io()
1381 if (ret < 0 && ret != -EAGAIN) { in qed_aio_next_io()
1388 qemu_co_mutex_unlock(&s->table_lock); in qed_aio_next_io()
1403 qemu_iovec_init(&acb.cur_qiov, qiov->niov); in qed_co_request()
1405 trace_qed_aio_setup(bs->opaque, &acb, sector_num, nb_sectors, NULL, flags); in qed_co_request()
1429 BDRVQEDState *s = bs->opaque; in bdrv_qed_co_pwrite_zeroes()
1438 * QED is not prepared for 63bit write-zero requests, so rely on in bdrv_qed_co_pwrite_zeroes()
1446 return -ENOTSUP; in bdrv_qed_co_pwrite_zeroes()
1459 BDRVQEDState *s = bs->opaque; in bdrv_qed_co_truncate()
1466 return -ENOTSUP; in bdrv_qed_co_truncate()
1469 if (!qed_is_image_size_valid(offset, s->header.cluster_size, in bdrv_qed_co_truncate()
1470 s->header.table_size)) { in bdrv_qed_co_truncate()
1472 return -EINVAL; in bdrv_qed_co_truncate()
1475 if ((uint64_t)offset < s->header.image_size) { in bdrv_qed_co_truncate()
1477 return -ENOTSUP; in bdrv_qed_co_truncate()
1480 old_image_size = s->header.image_size; in bdrv_qed_co_truncate()
1481 s->header.image_size = offset; in bdrv_qed_co_truncate()
1484 s->header.image_size = old_image_size; in bdrv_qed_co_truncate()
1485 error_setg_errno(errp, -ret, "Failed to update the image size"); in bdrv_qed_co_truncate()
1492 BDRVQEDState *s = bs->opaque; in bdrv_qed_co_getlength()
1493 return s->header.image_size; in bdrv_qed_co_getlength()
1499 BDRVQEDState *s = bs->opaque; in bdrv_qed_co_get_info()
1502 bdi->cluster_size = s->header.cluster_size; in bdrv_qed_co_get_info()
1503 bdi->is_dirty = s->header.features & QED_F_NEED_CHECK; in bdrv_qed_co_get_info()
1511 BDRVQEDState *s = bs->opaque; in bdrv_qed_co_change_backing_file()
1522 if (backing_file && (s->header.compat_features & in bdrv_qed_co_change_backing_file()
1524 return -ENOTSUP; in bdrv_qed_co_change_backing_file()
1527 memcpy(&new_header, &s->header, sizeof(new_header)); in bdrv_qed_co_change_backing_file()
1555 return -ENOSPC; in bdrv_qed_co_change_backing_file()
1571 ret = bdrv_co_pwrite_sync(bs->file, 0, buffer_len, buffer, 0); in bdrv_qed_co_change_backing_file()
1574 memcpy(&s->header, &new_header, sizeof(new_header)); in bdrv_qed_co_change_backing_file()
1583 BDRVQEDState *s = bs->opaque; in bdrv_qed_co_invalidate_cache()
1589 qemu_co_mutex_lock(&s->table_lock); in bdrv_qed_co_invalidate_cache()
1590 ret = bdrv_qed_do_open(bs, NULL, bs->open_flags, errp); in bdrv_qed_co_invalidate_cache()
1591 qemu_co_mutex_unlock(&s->table_lock); in bdrv_qed_co_invalidate_cache()
1601 BDRVQEDState *s = bs->opaque; in bdrv_qed_co_check()
1604 qemu_co_mutex_lock(&s->table_lock); in bdrv_qed_co_check()
1606 qemu_co_mutex_unlock(&s->table_lock); in bdrv_qed_co_check()
1612 .name = "qed-create-opts",
1633 .help = "Cluster size (in bytes)",