Lines Matching refs:iocb

2486     NvmeDSMAIOCB *iocb = container_of(aiocb, NvmeDSMAIOCB, common);  in nvme_dsm_cancel()  local
2489 iocb->idx = iocb->nr; in nvme_dsm_cancel()
2490 iocb->ret = -ECANCELED; in nvme_dsm_cancel()
2492 if (iocb->aiocb) { in nvme_dsm_cancel()
2493 blk_aio_cancel_async(iocb->aiocb); in nvme_dsm_cancel()
2494 iocb->aiocb = NULL; in nvme_dsm_cancel()
2500 assert(iocb->idx == iocb->nr); in nvme_dsm_cancel()
2513 NvmeDSMAIOCB *iocb = opaque; in nvme_dsm_md_cb() local
2514 NvmeRequest *req = iocb->req; in nvme_dsm_md_cb()
2520 if (ret < 0 || iocb->ret < 0 || !ns->lbaf.ms) { in nvme_dsm_md_cb()
2524 range = &iocb->range[iocb->idx - 1]; in nvme_dsm_md_cb()
2539 nvme_dsm_cb(iocb, 0); in nvme_dsm_md_cb()
2543 iocb->aiocb = blk_aio_pwrite_zeroes(ns->blkconf.blk, nvme_moff(ns, slba), in nvme_dsm_md_cb()
2545 nvme_dsm_cb, iocb); in nvme_dsm_md_cb()
2549 nvme_dsm_cb(iocb, ret); in nvme_dsm_md_cb()
2554 NvmeDSMAIOCB *iocb = opaque; in nvme_dsm_cb() local
2555 NvmeRequest *req = iocb->req; in nvme_dsm_cb()
2562 if (iocb->ret < 0) { in nvme_dsm_cb()
2565 iocb->ret = ret; in nvme_dsm_cb()
2570 if (iocb->idx == iocb->nr) { in nvme_dsm_cb()
2574 range = &iocb->range[iocb->idx++]; in nvme_dsm_cb()
2591 iocb->aiocb = blk_aio_pdiscard(ns->blkconf.blk, nvme_l2b(ns, slba), in nvme_dsm_cb()
2593 nvme_dsm_md_cb, iocb); in nvme_dsm_cb()
2597 iocb->aiocb = NULL; in nvme_dsm_cb()
2598 iocb->common.cb(iocb->common.opaque, iocb->ret); in nvme_dsm_cb()
2599 g_free(iocb->range); in nvme_dsm_cb()
2600 qemu_aio_unref(iocb); in nvme_dsm_cb()
2614 NvmeDSMAIOCB *iocb = blk_aio_get(&nvme_dsm_aiocb_info, ns->blkconf.blk, in nvme_dsm() local
2617 iocb->req = req; in nvme_dsm()
2618 iocb->ret = 0; in nvme_dsm()
2619 iocb->range = g_new(NvmeDsmRange, nr); in nvme_dsm()
2620 iocb->nr = nr; in nvme_dsm()
2621 iocb->idx = 0; in nvme_dsm()
2623 status = nvme_h2c(n, (uint8_t *)iocb->range, sizeof(NvmeDsmRange) * nr, in nvme_dsm()
2626 g_free(iocb->range); in nvme_dsm()
2627 qemu_aio_unref(iocb); in nvme_dsm()
2632 req->aiocb = &iocb->common; in nvme_dsm()
2633 nvme_dsm_cb(iocb, 0); in nvme_dsm()
2729 NvmeCopyAIOCB *iocb = container_of(aiocb, NvmeCopyAIOCB, common); in nvme_copy_cancel() local
2731 iocb->ret = -ECANCELED; in nvme_copy_cancel()
2733 if (iocb->aiocb) { in nvme_copy_cancel()
2734 blk_aio_cancel_async(iocb->aiocb); in nvme_copy_cancel()
2735 iocb->aiocb = NULL; in nvme_copy_cancel()
2744 static void nvme_copy_done(NvmeCopyAIOCB *iocb) in nvme_copy_done() argument
2746 NvmeRequest *req = iocb->req; in nvme_copy_done()
2750 if (iocb->idx != iocb->nr) { in nvme_copy_done()
2751 req->cqe.result = cpu_to_le32(iocb->idx); in nvme_copy_done()
2754 qemu_iovec_destroy(&iocb->iov); in nvme_copy_done()
2755 g_free(iocb->bounce); in nvme_copy_done()
2757 if (iocb->ret < 0) { in nvme_copy_done()
2758 block_acct_failed(stats, &iocb->acct.read); in nvme_copy_done()
2759 block_acct_failed(stats, &iocb->acct.write); in nvme_copy_done()
2761 block_acct_done(stats, &iocb->acct.read); in nvme_copy_done()
2762 block_acct_done(stats, &iocb->acct.write); in nvme_copy_done()
2765 iocb->common.cb(iocb->common.opaque, iocb->ret); in nvme_copy_done()
2766 qemu_aio_unref(iocb); in nvme_copy_done()
2769 static void nvme_do_copy(NvmeCopyAIOCB *iocb);
2872 NvmeCopyAIOCB *iocb, uint16_t nr) in nvme_check_copy_mcl() argument
2878 nvme_copy_source_range_parse(iocb->ranges, idx, iocb->format, NULL, in nvme_check_copy_mcl()
2882 iocb->tcl = copy_len; in nvme_check_copy_mcl()
2892 NvmeCopyAIOCB *iocb = opaque; in nvme_copy_out_completed_cb() local
2893 NvmeRequest *req = iocb->req; in nvme_copy_out_completed_cb()
2897 nvme_copy_source_range_parse(iocb->ranges, iocb->idx, iocb->format, NULL, in nvme_copy_out_completed_cb()
2901 iocb->ret = ret; in nvme_copy_out_completed_cb()
2903 } else if (iocb->ret < 0) { in nvme_copy_out_completed_cb()
2908 nvme_advance_zone_wp(dns, iocb->zone, nlb); in nvme_copy_out_completed_cb()
2911 iocb->idx++; in nvme_copy_out_completed_cb()
2912 iocb->slba += nlb; in nvme_copy_out_completed_cb()
2914 nvme_do_copy(iocb); in nvme_copy_out_completed_cb()
2919 NvmeCopyAIOCB *iocb = opaque; in nvme_copy_out_cb() local
2920 NvmeRequest *req = iocb->req; in nvme_copy_out_cb()
2926 if (ret < 0 || iocb->ret < 0 || !dns->lbaf.ms) { in nvme_copy_out_cb()
2930 nvme_copy_source_range_parse(iocb->ranges, iocb->idx, iocb->format, NULL, in nvme_copy_out_cb()
2934 mbounce = iocb->bounce + nvme_l2b(dns, nlb); in nvme_copy_out_cb()
2936 qemu_iovec_reset(&iocb->iov); in nvme_copy_out_cb()
2937 qemu_iovec_add(&iocb->iov, mbounce, mlen); in nvme_copy_out_cb()
2939 iocb->aiocb = blk_aio_pwritev(dns->blkconf.blk, nvme_moff(dns, iocb->slba), in nvme_copy_out_cb()
2940 &iocb->iov, 0, nvme_copy_out_completed_cb, in nvme_copy_out_cb()
2941 iocb); in nvme_copy_out_cb()
2946 nvme_copy_out_completed_cb(iocb, ret); in nvme_copy_out_cb()
2951 NvmeCopyAIOCB *iocb = opaque; in nvme_copy_in_completed_cb() local
2952 NvmeRequest *req = iocb->req; in nvme_copy_in_completed_cb()
2953 NvmeNamespace *sns = iocb->sns; in nvme_copy_in_completed_cb()
2965 iocb->ret = ret; in nvme_copy_in_completed_cb()
2967 } else if (iocb->ret < 0) { in nvme_copy_in_completed_cb()
2971 nvme_copy_source_range_parse(iocb->ranges, iocb->idx, iocb->format, &slba, in nvme_copy_in_completed_cb()
2974 trace_pci_nvme_copy_out(iocb->slba, nlb); in nvme_copy_in_completed_cb()
2984 mbounce = iocb->bounce + nvme_l2b(sns, nlb); in nvme_copy_in_completed_cb()
2990 status = nvme_dif_check(sns, iocb->bounce, len, mbounce, mlen, prinfor, in nvme_copy_in_completed_cb()
3002 mbounce = iocb->bounce + nvme_l2b(dns, nlb); in nvme_copy_in_completed_cb()
3008 status = nvme_check_prinfo(dns, prinfow, iocb->slba, iocb->reftag); in nvme_copy_in_completed_cb()
3013 nvme_dif_pract_generate_dif(dns, iocb->bounce, len, mbounce, mlen, in nvme_copy_in_completed_cb()
3014 apptag, &iocb->reftag); in nvme_copy_in_completed_cb()
3016 status = nvme_dif_check(dns, iocb->bounce, len, mbounce, mlen, in nvme_copy_in_completed_cb()
3017 prinfow, iocb->slba, apptag, appmask, in nvme_copy_in_completed_cb()
3018 &iocb->reftag); in nvme_copy_in_completed_cb()
3025 status = nvme_check_bounds(dns, iocb->slba, nlb); in nvme_copy_in_completed_cb()
3031 status = nvme_check_zone_write(dns, iocb->zone, iocb->slba, nlb); in nvme_copy_in_completed_cb()
3036 if (!(iocb->zone->d.za & NVME_ZA_ZRWA_VALID)) { in nvme_copy_in_completed_cb()
3037 iocb->zone->w_ptr += nlb; in nvme_copy_in_completed_cb()
3041 qemu_iovec_reset(&iocb->iov); in nvme_copy_in_completed_cb()
3042 qemu_iovec_add(&iocb->iov, iocb->bounce, len); in nvme_copy_in_completed_cb()
3044 block_acct_start(blk_get_stats(dns->blkconf.blk), &iocb->acct.write, 0, in nvme_copy_in_completed_cb()
3047 iocb->aiocb = blk_aio_pwritev(dns->blkconf.blk, nvme_l2b(dns, iocb->slba), in nvme_copy_in_completed_cb()
3048 &iocb->iov, 0, nvme_copy_out_cb, iocb); in nvme_copy_in_completed_cb()
3054 iocb->ret = -1; in nvme_copy_in_completed_cb()
3056 nvme_do_copy(iocb); in nvme_copy_in_completed_cb()
3061 NvmeCopyAIOCB *iocb = opaque; in nvme_copy_in_cb() local
3062 NvmeNamespace *sns = iocb->sns; in nvme_copy_in_cb()
3066 if (ret < 0 || iocb->ret < 0 || !sns->lbaf.ms) { in nvme_copy_in_cb()
3070 nvme_copy_source_range_parse(iocb->ranges, iocb->idx, iocb->format, &slba, in nvme_copy_in_cb()
3073 qemu_iovec_reset(&iocb->iov); in nvme_copy_in_cb()
3074 qemu_iovec_add(&iocb->iov, iocb->bounce + nvme_l2b(sns, nlb), in nvme_copy_in_cb()
3077 iocb->aiocb = blk_aio_preadv(sns->blkconf.blk, nvme_moff(sns, slba), in nvme_copy_in_cb()
3078 &iocb->iov, 0, nvme_copy_in_completed_cb, in nvme_copy_in_cb()
3079 iocb); in nvme_copy_in_cb()
3083 nvme_copy_in_completed_cb(iocb, ret); in nvme_copy_in_cb()
3144 static void nvme_do_copy(NvmeCopyAIOCB *iocb) in nvme_do_copy() argument
3146 NvmeRequest *req = iocb->req; in nvme_do_copy()
3159 if (iocb->ret < 0) { in nvme_do_copy()
3163 if (iocb->idx == iocb->nr) { in nvme_do_copy()
3167 if (iocb->format == 2 || iocb->format == 3) { in nvme_do_copy()
3168 nvme_copy_source_range_parse(iocb->ranges, iocb->idx, iocb->format, in nvme_do_copy()
3172 !nvme_nsid_valid(iocb->n, snsid)) { in nvme_do_copy()
3176 iocb->sns = nvme_ns(iocb->n, snsid); in nvme_do_copy()
3177 if (unlikely(!iocb->sns)) { in nvme_do_copy()
3182 if (((slba + nlb) > iocb->slba) && in nvme_do_copy()
3183 ((slba + nlb) < (iocb->slba + iocb->tcl))) { in nvme_do_copy()
3189 nvme_copy_source_range_parse(iocb->ranges, iocb->idx, iocb->format, in nvme_do_copy()
3193 sns = iocb->sns; in nvme_do_copy()
3274 g_free(iocb->bounce); in nvme_do_copy()
3275 iocb->bounce = g_malloc_n(le16_to_cpu(sns->id_ns.mssrl), in nvme_do_copy()
3278 qemu_iovec_reset(&iocb->iov); in nvme_do_copy()
3279 qemu_iovec_add(&iocb->iov, iocb->bounce, len); in nvme_do_copy()
3281 block_acct_start(blk_get_stats(sns->blkconf.blk), &iocb->acct.read, 0, in nvme_do_copy()
3284 iocb->aiocb = blk_aio_preadv(sns->blkconf.blk, nvme_l2b(sns, slba), in nvme_do_copy()
3285 &iocb->iov, 0, nvme_copy_in_cb, iocb); in nvme_do_copy()
3290 iocb->ret = -1; in nvme_do_copy()
3292 nvme_copy_done(iocb); in nvme_do_copy()
3299 NvmeCopyAIOCB *iocb = blk_aio_get(&nvme_copy_aiocb_info, ns->blkconf.blk, in nvme_copy() local
3309 iocb->ranges = NULL; in nvme_copy()
3310 iocb->zone = NULL; in nvme_copy()
3335 iocb->format = format; in nvme_copy()
3336 iocb->ranges = g_malloc_n(nr, len); in nvme_copy()
3337 status = nvme_h2c(n, (uint8_t *)iocb->ranges, len * nr, req); in nvme_copy()
3342 iocb->slba = le64_to_cpu(copy->sdlba); in nvme_copy()
3345 iocb->zone = nvme_get_zone_by_slba(ns, iocb->slba); in nvme_copy()
3346 if (!iocb->zone) { in nvme_copy()
3351 status = nvme_zrm_auto(n, ns, iocb->zone); in nvme_copy()
3357 status = nvme_check_copy_mcl(ns, iocb, nr); in nvme_copy()
3362 iocb->req = req; in nvme_copy()
3363 iocb->ret = 0; in nvme_copy()
3364 iocb->nr = nr; in nvme_copy()
3365 iocb->idx = 0; in nvme_copy()
3366 iocb->reftag = le32_to_cpu(copy->reftag); in nvme_copy()
3367 iocb->reftag |= (uint64_t)le32_to_cpu(copy->cdw3) << 32; in nvme_copy()
3369 qemu_iovec_init(&iocb->iov, 1); in nvme_copy()
3371 req->aiocb = &iocb->common; in nvme_copy()
3372 iocb->sns = req->ns; in nvme_copy()
3373 iocb->n = n; in nvme_copy()
3374 iocb->bounce = NULL; in nvme_copy()
3375 nvme_do_copy(iocb); in nvme_copy()
3380 g_free(iocb->ranges); in nvme_copy()
3381 qemu_aio_unref(iocb); in nvme_copy()
3460 NvmeFlushAIOCB *iocb = container_of(acb, NvmeFlushAIOCB, common); in nvme_flush_cancel() local
3462 iocb->ret = -ECANCELED; in nvme_flush_cancel()
3464 if (iocb->aiocb) { in nvme_flush_cancel()
3465 blk_aio_cancel_async(iocb->aiocb); in nvme_flush_cancel()
3466 iocb->aiocb = NULL; in nvme_flush_cancel()
3475 static void nvme_do_flush(NvmeFlushAIOCB *iocb);
3479 NvmeFlushAIOCB *iocb = opaque; in nvme_flush_ns_cb() local
3480 NvmeNamespace *ns = iocb->ns; in nvme_flush_ns_cb()
3483 iocb->ret = ret; in nvme_flush_ns_cb()
3485 } else if (iocb->ret < 0) { in nvme_flush_ns_cb()
3490 trace_pci_nvme_flush_ns(iocb->nsid); in nvme_flush_ns_cb()
3492 iocb->ns = NULL; in nvme_flush_ns_cb()
3493 iocb->aiocb = blk_aio_flush(ns->blkconf.blk, nvme_flush_ns_cb, iocb); in nvme_flush_ns_cb()
3498 nvme_do_flush(iocb); in nvme_flush_ns_cb()
3501 static void nvme_do_flush(NvmeFlushAIOCB *iocb) in nvme_do_flush() argument
3503 NvmeRequest *req = iocb->req; in nvme_do_flush()
3507 if (iocb->ret < 0) { in nvme_do_flush()
3511 if (iocb->broadcast) { in nvme_do_flush()
3512 for (i = iocb->nsid + 1; i <= NVME_MAX_NAMESPACES; i++) { in nvme_do_flush()
3513 iocb->ns = nvme_ns(n, i); in nvme_do_flush()
3514 if (iocb->ns) { in nvme_do_flush()
3515 iocb->nsid = i; in nvme_do_flush()
3521 if (!iocb->ns) { in nvme_do_flush()
3525 nvme_flush_ns_cb(iocb, 0); in nvme_do_flush()
3529 iocb->common.cb(iocb->common.opaque, iocb->ret); in nvme_do_flush()
3530 qemu_aio_unref(iocb); in nvme_do_flush()
3535 NvmeFlushAIOCB *iocb; in nvme_flush() local
3539 iocb = qemu_aio_get(&nvme_flush_aiocb_info, NULL, nvme_misc_cb, req); in nvme_flush()
3541 iocb->req = req; in nvme_flush()
3542 iocb->ret = 0; in nvme_flush()
3543 iocb->ns = NULL; in nvme_flush()
3544 iocb->nsid = 0; in nvme_flush()
3545 iocb->broadcast = (nsid == NVME_NSID_BROADCAST); in nvme_flush()
3547 if (!iocb->broadcast) { in nvme_flush()
3553 iocb->ns = nvme_ns(n, nsid); in nvme_flush()
3554 if (!iocb->ns) { in nvme_flush()
3559 iocb->nsid = nsid; in nvme_flush()
3562 req->aiocb = &iocb->common; in nvme_flush()
3563 nvme_do_flush(iocb); in nvme_flush()
3568 qemu_aio_unref(iocb); in nvme_flush()
4048 NvmeZoneResetAIOCB *iocb = container_of(aiocb, NvmeZoneResetAIOCB, common); in nvme_zone_reset_cancel() local
4049 NvmeRequest *req = iocb->req; in nvme_zone_reset_cancel()
4052 iocb->idx = ns->num_zones; in nvme_zone_reset_cancel()
4054 iocb->ret = -ECANCELED; in nvme_zone_reset_cancel()
4056 if (iocb->aiocb) { in nvme_zone_reset_cancel()
4057 blk_aio_cancel_async(iocb->aiocb); in nvme_zone_reset_cancel()
4058 iocb->aiocb = NULL; in nvme_zone_reset_cancel()
4071 NvmeZoneResetAIOCB *iocb = opaque; in nvme_zone_reset_epilogue_cb() local
4072 NvmeRequest *req = iocb->req; in nvme_zone_reset_epilogue_cb()
4077 if (ret < 0 || iocb->ret < 0 || !ns->lbaf.ms) { in nvme_zone_reset_epilogue_cb()
4081 moff = nvme_moff(ns, iocb->zone->d.zslba); in nvme_zone_reset_epilogue_cb()
4084 iocb->aiocb = blk_aio_pwrite_zeroes(ns->blkconf.blk, moff, count, in nvme_zone_reset_epilogue_cb()
4086 nvme_zone_reset_cb, iocb); in nvme_zone_reset_epilogue_cb()
4090 nvme_zone_reset_cb(iocb, ret); in nvme_zone_reset_epilogue_cb()
4095 NvmeZoneResetAIOCB *iocb = opaque; in nvme_zone_reset_cb() local
4096 NvmeRequest *req = iocb->req; in nvme_zone_reset_cb()
4099 if (iocb->ret < 0) { in nvme_zone_reset_cb()
4102 iocb->ret = ret; in nvme_zone_reset_cb()
4106 if (iocb->zone) { in nvme_zone_reset_cb()
4107 nvme_zrm_reset(ns, iocb->zone); in nvme_zone_reset_cb()
4109 if (!iocb->all) { in nvme_zone_reset_cb()
4114 while (iocb->idx < ns->num_zones) { in nvme_zone_reset_cb()
4115 NvmeZone *zone = &ns->zone_array[iocb->idx++]; in nvme_zone_reset_cb()
4119 if (!iocb->all) { in nvme_zone_reset_cb()
4129 iocb->zone = zone; in nvme_zone_reset_cb()
4138 iocb->aiocb = blk_aio_pwrite_zeroes(ns->blkconf.blk, in nvme_zone_reset_cb()
4143 iocb); in nvme_zone_reset_cb()
4148 iocb->aiocb = NULL; in nvme_zone_reset_cb()
4150 iocb->common.cb(iocb->common.opaque, iocb->ret); in nvme_zone_reset_cb()
4151 qemu_aio_unref(iocb); in nvme_zone_reset_cb()
4197 NvmeZoneResetAIOCB *iocb; in nvme_zone_mgmt_send() local
4252 iocb = blk_aio_get(&nvme_zone_reset_aiocb_info, ns->blkconf.blk, in nvme_zone_mgmt_send()
4255 iocb->req = req; in nvme_zone_mgmt_send()
4256 iocb->ret = 0; in nvme_zone_mgmt_send()
4257 iocb->all = all; in nvme_zone_mgmt_send()
4258 iocb->idx = zone_idx; in nvme_zone_mgmt_send()
4259 iocb->zone = NULL; in nvme_zone_mgmt_send()
4261 req->aiocb = &iocb->common; in nvme_zone_mgmt_send()
4262 nvme_zone_reset_cb(iocb, 0); in nvme_zone_mgmt_send()
6698 NvmeFormatAIOCB *iocb = container_of(aiocb, NvmeFormatAIOCB, common); in nvme_format_cancel() local
6700 iocb->ret = -ECANCELED; in nvme_format_cancel()
6702 if (iocb->aiocb) { in nvme_format_cancel()
6703 blk_aio_cancel_async(iocb->aiocb); in nvme_format_cancel()
6704 iocb->aiocb = NULL; in nvme_format_cancel()
6727 static void nvme_do_format(NvmeFormatAIOCB *iocb);
6731 NvmeFormatAIOCB *iocb = opaque; in nvme_format_ns_cb() local
6732 NvmeNamespace *ns = iocb->ns; in nvme_format_ns_cb()
6735 if (iocb->ret < 0) { in nvme_format_ns_cb()
6738 iocb->ret = ret; in nvme_format_ns_cb()
6744 if (iocb->offset < ns->size) { in nvme_format_ns_cb()
6745 bytes = MIN(BDRV_REQUEST_MAX_BYTES, ns->size - iocb->offset); in nvme_format_ns_cb()
6747 iocb->aiocb = blk_aio_pwrite_zeroes(ns->blkconf.blk, iocb->offset, in nvme_format_ns_cb()
6749 nvme_format_ns_cb, iocb); in nvme_format_ns_cb()
6751 iocb->offset += bytes; in nvme_format_ns_cb()
6755 nvme_format_set(ns, iocb->lbaf, iocb->mset, iocb->pi, iocb->pil); in nvme_format_ns_cb()
6757 iocb->ns = NULL; in nvme_format_ns_cb()
6758 iocb->offset = 0; in nvme_format_ns_cb()
6761 nvme_do_format(iocb); in nvme_format_ns_cb()
6785 static void nvme_do_format(NvmeFormatAIOCB *iocb) in nvme_do_format() argument
6787 NvmeRequest *req = iocb->req; in nvme_do_format()
6795 if (iocb->ret < 0) { in nvme_do_format()
6799 if (iocb->broadcast) { in nvme_do_format()
6800 for (i = iocb->nsid + 1; i <= NVME_MAX_NAMESPACES; i++) { in nvme_do_format()
6801 iocb->ns = nvme_ns(n, i); in nvme_do_format()
6802 if (iocb->ns) { in nvme_do_format()
6803 iocb->nsid = i; in nvme_do_format()
6809 if (!iocb->ns) { in nvme_do_format()
6813 status = nvme_format_check(iocb->ns, lbaf, pi); in nvme_do_format()
6819 iocb->ns->status = NVME_FORMAT_IN_PROGRESS; in nvme_do_format()
6820 nvme_format_ns_cb(iocb, 0); in nvme_do_format()
6824 iocb->common.cb(iocb->common.opaque, iocb->ret); in nvme_do_format()
6825 qemu_aio_unref(iocb); in nvme_do_format()
6830 NvmeFormatAIOCB *iocb; in nvme_format() local
6840 iocb = qemu_aio_get(&nvme_format_aiocb_info, NULL, nvme_misc_cb, req); in nvme_format()
6842 iocb->req = req; in nvme_format()
6843 iocb->ret = 0; in nvme_format()
6844 iocb->ns = NULL; in nvme_format()
6845 iocb->nsid = 0; in nvme_format()
6846 iocb->lbaf = lbaf; in nvme_format()
6847 iocb->mset = mset; in nvme_format()
6848 iocb->pi = pi; in nvme_format()
6849 iocb->pil = pil; in nvme_format()
6850 iocb->broadcast = (nsid == NVME_NSID_BROADCAST); in nvme_format()
6851 iocb->offset = 0; in nvme_format()
6854 iocb->lbaf |= lbafu << 4; in nvme_format()
6857 if (!iocb->broadcast) { in nvme_format()
6863 iocb->ns = nvme_ns(n, nsid); in nvme_format()
6864 if (!iocb->ns) { in nvme_format()
6870 req->aiocb = &iocb->common; in nvme_format()
6871 nvme_do_format(iocb); in nvme_format()
6876 qemu_aio_unref(iocb); in nvme_format()