Lines Matching +full:suspend +full:- +full:to +full:- +full:disk
10 * the COPYING file in the top-level directory.
15 #include "qemu/defer-call.h"
19 #include "qemu/error-report.h"
20 #include "qemu/main-loop.h"
24 #include "hw/qdev-properties.h"
26 #include "system/block-ram-registrar.h"
29 #include "hw/virtio/virtio-blk.h"
34 #include "hw/virtio/virtio-bus.h"
35 #include "migration/qemu-file-types.h"
36 #include "hw/virtio/iothread-vq-mapping.h"
37 #include "hw/virtio/virtio-access.h"
38 #include "hw/virtio/virtio-blk-common.h"
46 req->dev = s; in virtio_blk_init_request()
47 req->vq = vq; in virtio_blk_init_request()
48 req->qiov.size = 0; in virtio_blk_init_request()
49 req->in_len = 0; in virtio_blk_init_request()
50 req->next = NULL; in virtio_blk_init_request()
51 req->mr_next = NULL; in virtio_blk_init_request()
56 VirtIOBlock *s = req->dev; in virtio_blk_req_complete()
61 stb_p(&req->in->status, status); in virtio_blk_req_complete()
62 iov_discard_undo(&req->inhdr_undo); in virtio_blk_req_complete()
63 iov_discard_undo(&req->outhdr_undo); in virtio_blk_req_complete()
64 virtqueue_push(req->vq, &req->elem, req->in_len); in virtio_blk_req_complete()
66 virtio_notify_irqfd(vdev, req->vq); in virtio_blk_req_complete()
68 virtio_notify(vdev, req->vq); in virtio_blk_req_complete()
75 VirtIOBlock *s = req->dev; in virtio_blk_handle_rw_error()
76 BlockErrorAction action = blk_get_error_action(s->blk, is_read, error); in virtio_blk_handle_rw_error()
79 /* Break the link as the next request is going to be parsed from the in virtio_blk_handle_rw_error()
81 req->mr_next = NULL; in virtio_blk_handle_rw_error()
83 WITH_QEMU_LOCK_GUARD(&s->rq_lock) { in virtio_blk_handle_rw_error()
84 req->next = s->rq; in virtio_blk_handle_rw_error()
85 s->rq = req; in virtio_blk_handle_rw_error()
90 block_acct_failed(blk_get_stats(s->blk), &req->acct); in virtio_blk_handle_rw_error()
95 blk_error_action(s->blk, action, is_read, error); in virtio_blk_handle_rw_error()
102 VirtIOBlock *s = next->dev; in virtio_blk_rw_complete()
107 next = req->mr_next; in virtio_blk_rw_complete()
110 if (req->qiov.nalloc != -1) { in virtio_blk_rw_complete()
111 /* If nalloc is != -1 req->qiov is a local copy of the original in virtio_blk_rw_complete()
112 * external iovec. It was allocated in submit_requests to be in virtio_blk_rw_complete()
113 * able to merge requests. */ in virtio_blk_rw_complete()
114 qemu_iovec_destroy(&req->qiov); in virtio_blk_rw_complete()
118 int p = virtio_ldl_p(VIRTIO_DEVICE(s), &req->out.type); in virtio_blk_rw_complete()
124 * it is acceptable because the device is free to write to in virtio_blk_rw_complete()
128 if (virtio_blk_handle_rw_error(req, -ret, is_read, true)) { in virtio_blk_rw_complete()
134 block_acct_done(blk_get_stats(s->blk), &req->acct); in virtio_blk_rw_complete()
142 VirtIOBlock *s = req->dev; in virtio_blk_flush_complete()
144 if (ret && virtio_blk_handle_rw_error(req, -ret, 0, true)) { in virtio_blk_flush_complete()
149 block_acct_done(blk_get_stats(s->blk), &req->acct); in virtio_blk_flush_complete()
156 VirtIOBlock *s = req->dev; in virtio_blk_discard_write_zeroes_complete()
157 bool is_write_zeroes = (virtio_ldl_p(VIRTIO_DEVICE(s), &req->out.type) & in virtio_blk_discard_write_zeroes_complete()
160 if (ret && virtio_blk_handle_rw_error(req, -ret, false, is_write_zeroes)) { in virtio_blk_discard_write_zeroes_complete()
166 block_acct_done(blk_get_stats(s->blk), &req->acct); in virtio_blk_discard_write_zeroes_complete()
185 VirtIOBlock *blk = req->dev; in virtio_blk_handle_scsi()
187 VirtQueueElement *elem = &req->elem; in virtio_blk_handle_scsi()
196 if (elem->out_num < 2 || elem->in_num < 3) { in virtio_blk_handle_scsi()
202 * The scsi inhdr is placed in the second-to-last input segment, just in virtio_blk_handle_scsi()
207 scsi = (void *)elem->in_sg[elem->in_num - 2].iov_base; in virtio_blk_handle_scsi()
208 virtio_stl_p(vdev, &scsi->errors, 255); in virtio_blk_handle_scsi()
219 BlockBackend *blk = s->blk; in submit_requests()
220 QEMUIOVector *qiov = &mrb->reqs[start]->qiov; in submit_requests()
221 int64_t sector_num = mrb->reqs[start]->sector_num; in submit_requests()
222 bool is_write = mrb->is_write; in submit_requests()
227 struct iovec *tmp_iov = qiov->iov; in submit_requests()
228 int tmp_niov = qiov->niov; in submit_requests()
230 /* mrb->reqs[start]->qiov was initialized from external so we can't in submit_requests()
231 * modify it here. We need to initialize it locally and then add the in submit_requests()
240 qemu_iovec_concat(qiov, &mrb->reqs[i]->qiov, 0, in submit_requests()
241 mrb->reqs[i]->qiov.size); in submit_requests()
242 mrb->reqs[i - 1]->mr_next = mrb->reqs[i]; in submit_requests()
245 trace_virtio_blk_submit_multireq(VIRTIO_DEVICE(mrb->reqs[start]->dev), in submit_requests()
248 qiov->size, is_write); in submit_requests()
251 num_reqs - 1); in submit_requests()
254 if (blk_ram_registrar_ok(&s->blk_ram_registrar)) { in submit_requests()
261 mrb->reqs[start]); in submit_requests()
265 mrb->reqs[start]); in submit_requests()
278 if (req1->sector_num > req2->sector_num) { in multireq_compare()
280 } else if (req1->sector_num < req2->sector_num) { in multireq_compare()
281 return -1; in multireq_compare()
293 if (mrb->num_reqs == 1) { in virtio_blk_submit_multireq()
294 submit_requests(s, mrb, 0, 1, -1); in virtio_blk_submit_multireq()
295 mrb->num_reqs = 0; in virtio_blk_submit_multireq()
299 max_transfer = blk_get_max_transfer(mrb->reqs[0]->dev->blk); in virtio_blk_submit_multireq()
301 qsort(mrb->reqs, mrb->num_reqs, sizeof(*mrb->reqs), in virtio_blk_submit_multireq()
304 for (i = 0; i < mrb->num_reqs; i++) { in virtio_blk_submit_multireq()
305 VirtIOBlockReq *req = mrb->reqs[i]; in virtio_blk_submit_multireq()
313 if (sector_num + nb_sectors != req->sector_num || in virtio_blk_submit_multireq()
314 niov > blk_get_max_iov(s->blk) - req->qiov.niov || in virtio_blk_submit_multireq()
315 req->qiov.size > max_transfer || in virtio_blk_submit_multireq()
316 nb_sectors > (max_transfer - in virtio_blk_submit_multireq()
317 req->qiov.size) / BDRV_SECTOR_SIZE) { in virtio_blk_submit_multireq()
324 sector_num = req->sector_num; in virtio_blk_submit_multireq()
329 nb_sectors += req->qiov.size / BDRV_SECTOR_SIZE; in virtio_blk_submit_multireq()
330 niov += req->qiov.niov; in virtio_blk_submit_multireq()
335 mrb->num_reqs = 0; in virtio_blk_submit_multireq()
340 VirtIOBlock *s = req->dev; in virtio_blk_handle_flush()
342 block_acct_start(blk_get_stats(s->blk), &req->acct, 0, in virtio_blk_handle_flush()
346 * Make sure all outstanding writes are posted to the backing device. in virtio_blk_handle_flush()
348 if (mrb->is_write && mrb->num_reqs > 0) { in virtio_blk_handle_flush()
351 blk_aio_flush(s->blk, virtio_blk_flush_complete, req); in virtio_blk_handle_flush()
363 if (sector & dev->sector_mask) { in virtio_blk_sect_range_ok()
366 if (size % dev->conf.conf.logical_block_size) { in virtio_blk_sect_range_ok()
369 blk_get_geometry(dev->blk, &total_sectors); in virtio_blk_sect_range_ok()
370 if (sector > total_sectors || nb_sectors > total_sectors - sector) { in virtio_blk_sect_range_ok()
379 VirtIOBlock *s = req->dev; in virtio_blk_handle_discard_write_zeroes()
386 sector = virtio_ldq_p(vdev, &dwz_hdr->sector); in virtio_blk_handle_discard_write_zeroes()
387 num_sectors = virtio_ldl_p(vdev, &dwz_hdr->num_sectors); in virtio_blk_handle_discard_write_zeroes()
388 flags = virtio_ldl_p(vdev, &dwz_hdr->flags); in virtio_blk_handle_discard_write_zeroes()
389 max_sectors = is_write_zeroes ? s->conf.max_write_zeroes_sectors : in virtio_blk_handle_discard_write_zeroes()
390 s->conf.max_discard_sectors; in virtio_blk_handle_discard_write_zeroes()
410 * The device MUST set the status byte to VIRTIO_BLK_S_UNSUPP for discard in virtio_blk_handle_discard_write_zeroes()
425 block_acct_start(blk_get_stats(s->blk), &req->acct, bytes, in virtio_blk_handle_discard_write_zeroes()
428 blk_aio_pwrite_zeroes(s->blk, sector << BDRV_SECTOR_BITS, in virtio_blk_handle_discard_write_zeroes()
433 * The device MUST set the status byte to VIRTIO_BLK_S_UNSUPP for in virtio_blk_handle_discard_write_zeroes()
441 blk_aio_pdiscard(s->blk, sector << BDRV_SECTOR_BITS, bytes, in virtio_blk_handle_discard_write_zeroes()
449 block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_WRITE); in virtio_blk_handle_discard_write_zeroes()
476 BlockDriverState *bs = blk_bs(s->blk); in check_zoned_request()
479 if (!virtio_has_feature(s->host_features, VIRTIO_BLK_F_ZONED)) { in check_zoned_request()
484 if (offset < 0 || len < 0 || len > (bs->total_sectors << BDRV_SECTOR_BITS) in check_zoned_request()
485 || offset > (bs->total_sectors << BDRV_SECTOR_BITS) - len) { in check_zoned_request()
491 if (bs->bl.write_granularity) { in check_zoned_request()
492 if ((offset % bs->bl.write_granularity) != 0) { in check_zoned_request()
498 index = offset / bs->bl.zone_size; in check_zoned_request()
499 if (BDRV_ZT_IS_CONV(bs->wps->wp[index])) { in check_zoned_request()
504 if (len / 512 > bs->bl.max_append_sectors) { in check_zoned_request()
505 if (bs->bl.max_append_sectors == 0) { in check_zoned_request()
519 VirtIOBlockReq *req = data->req; in virtio_blk_zone_report_complete()
520 VirtIODevice *vdev = VIRTIO_DEVICE(req->dev); in virtio_blk_zone_report_complete()
521 struct iovec *in_iov = data->in_iov; in virtio_blk_zone_report_complete()
522 unsigned in_num = data->in_num; in virtio_blk_zone_report_complete()
524 int64_t nz = data->zone_report_data.nr_zones; in virtio_blk_zone_report_complete()
549 .z_start = cpu_to_le64(data->zone_report_data.zones[j].start in virtio_blk_zone_report_complete()
551 .z_cap = cpu_to_le64(data->zone_report_data.zones[j].cap in virtio_blk_zone_report_complete()
553 .z_wp = cpu_to_le64(data->zone_report_data.zones[j].wp in virtio_blk_zone_report_complete()
557 switch (data->zone_report_data.zones[j].type) { in virtio_blk_zone_report_complete()
571 switch (data->zone_report_data.zones[j].state) { in virtio_blk_zone_report_complete()
612 g_free(data->zone_report_data.zones); in virtio_blk_zone_report_complete()
620 VirtIOBlock *s = req->dev; in virtio_blk_handle_zone_report()
627 if (req->in_len < sizeof(struct virtio_blk_inhdr) + in virtio_blk_handle_zone_report()
636 offset = virtio_ldq_p(vdev, &req->out.sector) << BDRV_SECTOR_BITS; in virtio_blk_handle_zone_report()
640 nr_zones = (req->in_len - sizeof(struct virtio_blk_inhdr) - in virtio_blk_handle_zone_report()
648 data->req = req; in virtio_blk_handle_zone_report()
649 data->in_iov = in_iov; in virtio_blk_handle_zone_report()
650 data->in_num = in_num; in virtio_blk_handle_zone_report()
651 data->zone_report_data.nr_zones = nr_zones; in virtio_blk_handle_zone_report()
652 data->zone_report_data.zones = g_malloc(zone_size), in virtio_blk_handle_zone_report()
654 blk_aio_zone_report(s->blk, offset, &data->zone_report_data.nr_zones, in virtio_blk_handle_zone_report()
655 data->zone_report_data.zones, in virtio_blk_handle_zone_report()
666 VirtIOBlock *s = req->dev; in virtio_blk_zone_mgmt_complete()
681 VirtIOBlock *s = req->dev; in virtio_blk_handle_zone_mgmt()
683 BlockDriverState *bs = blk_bs(s->blk); in virtio_blk_handle_zone_mgmt()
684 int64_t offset = virtio_ldq_p(vdev, &req->out.sector) << BDRV_SECTOR_BITS; in virtio_blk_handle_zone_mgmt()
686 uint64_t capacity = bs->total_sectors << BDRV_SECTOR_BITS; in virtio_blk_handle_zone_mgmt()
689 uint32_t type = virtio_ldl_p(vdev, &req->out.type); in virtio_blk_handle_zone_mgmt()
695 bs->total_sectors); in virtio_blk_handle_zone_mgmt()
697 if (bs->bl.zone_size > capacity - offset) { in virtio_blk_handle_zone_mgmt()
699 len = capacity - bs->bl.zone_size * (bs->bl.nr_zones - 1ull); in virtio_blk_handle_zone_mgmt()
701 len = bs->bl.zone_size; in virtio_blk_handle_zone_mgmt()
712 blk_aio_zone_mgmt(s->blk, op, offset, len, in virtio_blk_handle_zone_mgmt()
725 VirtIOBlockReq *req = data->req; in virtio_blk_zone_append_complete()
726 VirtIODevice *vdev = VIRTIO_DEVICE(req->dev); in virtio_blk_zone_append_complete()
736 data->zone_append_data.offset >> BDRV_SECTOR_BITS); in virtio_blk_zone_append_complete()
737 n = iov_from_buf(data->in_iov, data->in_num, 0, &append_sector, in virtio_blk_zone_append_complete()
758 VirtIOBlock *s = req->dev; in virtio_blk_handle_zone_append()
762 int64_t offset = virtio_ldq_p(vdev, &req->out.sector) << BDRV_SECTOR_BITS; in virtio_blk_handle_zone_append()
772 data->req = req; in virtio_blk_handle_zone_append()
773 data->in_iov = in_iov; in virtio_blk_handle_zone_append()
774 data->in_num = in_num; in virtio_blk_handle_zone_append()
775 data->zone_append_data.offset = offset; in virtio_blk_handle_zone_append()
776 qemu_iovec_init_external(&req->qiov, out_iov, out_num); in virtio_blk_handle_zone_append()
778 block_acct_start(blk_get_stats(s->blk), &req->acct, len, in virtio_blk_handle_zone_append()
781 blk_aio_zone_append(s->blk, &data->zone_append_data.offset, &req->qiov, 0, in virtio_blk_handle_zone_append()
794 struct iovec *in_iov = req->elem.in_sg; in virtio_blk_handle_request()
795 struct iovec *out_iov = req->elem.out_sg; in virtio_blk_handle_request()
796 unsigned in_num = req->elem.in_num; in virtio_blk_handle_request()
797 unsigned out_num = req->elem.out_num; in virtio_blk_handle_request()
798 VirtIOBlock *s = req->dev; in virtio_blk_handle_request()
801 if (req->elem.out_num < 1 || req->elem.in_num < 1) { in virtio_blk_handle_request()
802 virtio_error(vdev, "virtio-blk missing headers"); in virtio_blk_handle_request()
803 return -1; in virtio_blk_handle_request()
806 if (unlikely(iov_to_buf(out_iov, out_num, 0, &req->out, in virtio_blk_handle_request()
807 sizeof(req->out)) != sizeof(req->out))) { in virtio_blk_handle_request()
808 virtio_error(vdev, "virtio-blk request outhdr too short"); in virtio_blk_handle_request()
809 return -1; in virtio_blk_handle_request()
812 iov_discard_front_undoable(&out_iov, &out_num, sizeof(req->out), in virtio_blk_handle_request()
813 &req->outhdr_undo); in virtio_blk_handle_request()
815 if (in_iov[in_num - 1].iov_len < sizeof(struct virtio_blk_inhdr)) { in virtio_blk_handle_request()
816 virtio_error(vdev, "virtio-blk request inhdr too short"); in virtio_blk_handle_request()
817 iov_discard_undo(&req->outhdr_undo); in virtio_blk_handle_request()
818 return -1; in virtio_blk_handle_request()
822 req->in_len = iov_size(in_iov, in_num); in virtio_blk_handle_request()
823 req->in = (void *)in_iov[in_num - 1].iov_base in virtio_blk_handle_request()
824 + in_iov[in_num - 1].iov_len in virtio_blk_handle_request()
825 - sizeof(struct virtio_blk_inhdr); in virtio_blk_handle_request()
827 &req->inhdr_undo); in virtio_blk_handle_request()
829 type = virtio_ldl_p(vdev, &req->out.type); in virtio_blk_handle_request()
838 req->sector_num = virtio_ldq_p(vdev, &req->out.sector); in virtio_blk_handle_request()
841 qemu_iovec_init_external(&req->qiov, out_iov, out_num); in virtio_blk_handle_request()
842 trace_virtio_blk_handle_write(vdev, req, req->sector_num, in virtio_blk_handle_request()
843 req->qiov.size / BDRV_SECTOR_SIZE); in virtio_blk_handle_request()
845 qemu_iovec_init_external(&req->qiov, in_iov, in_num); in virtio_blk_handle_request()
846 trace_virtio_blk_handle_read(vdev, req, req->sector_num, in virtio_blk_handle_request()
847 req->qiov.size / BDRV_SECTOR_SIZE); in virtio_blk_handle_request()
850 if (!virtio_blk_sect_range_ok(s, req->sector_num, req->qiov.size)) { in virtio_blk_handle_request()
852 block_acct_invalid(blk_get_stats(s->blk), in virtio_blk_handle_request()
858 block_acct_start(blk_get_stats(s->blk), &req->acct, req->qiov.size, in virtio_blk_handle_request()
863 if (mrb->num_reqs > 0 && (mrb->num_reqs == VIRTIO_BLK_MAX_MERGE_REQS || in virtio_blk_handle_request()
864 is_write != mrb->is_write || in virtio_blk_handle_request()
865 !s->conf.request_merging)) { in virtio_blk_handle_request()
869 assert(mrb->num_reqs < VIRTIO_BLK_MAX_MERGE_REQS); in virtio_blk_handle_request()
870 mrb->reqs[mrb->num_reqs++] = req; in virtio_blk_handle_request()
871 mrb->is_write = is_write; in virtio_blk_handle_request()
904 const char *serial = s->conf.serial ? s->conf.serial : ""; in virtio_blk_handle_request()
916 * to access req->elem.out_sg directly because it may be in virtio_blk_handle_request()
948 iov_discard_undo(&req->inhdr_undo); in virtio_blk_handle_request()
949 iov_discard_undo(&req->outhdr_undo); in virtio_blk_handle_request()
950 virtio_error(vdev, "virtio-blk discard/write_zeroes header" in virtio_blk_handle_request()
952 return -1; in virtio_blk_handle_request()
967 * Give subclasses a chance to handle unknown requests. This way the in virtio_blk_handle_request()
971 if (!vbk->handle_unknown_request || in virtio_blk_handle_request()
972 !vbk->handle_unknown_request(req, mrb, type)) { in virtio_blk_handle_request()
996 virtqueue_detach_element(req->vq, &req->elem, 0); in virtio_blk_handle_vq()
1018 if (!s->ioeventfd_disabled && !s->ioeventfd_started) { in virtio_blk_handle_output()
1023 if (!s->ioeventfd_disabled) { in virtio_blk_handle_output()
1034 VirtIOBlock *s = req->dev; /* we're called with at least one request */ in virtio_blk_dma_restart_bh()
1039 VirtIOBlockReq *next = req->next; in virtio_blk_dma_restart_bh()
1045 next = req->next; in virtio_blk_dma_restart_bh()
1046 virtqueue_detach_element(req->vq, &req->elem, 0); in virtio_blk_dma_restart_bh()
1060 blk_dec_in_flight(s->conf.conf.blk); in virtio_blk_dma_restart_bh()
1067 uint16_t num_queues = s->conf.num_queues; in virtio_blk_dma_restart_cb()
1075 /* Split the device-wide s->rq request list into per-vq request lists */ in virtio_blk_dma_restart_cb()
1078 WITH_QEMU_LOCK_GUARD(&s->rq_lock) { in virtio_blk_dma_restart_cb()
1079 rq = s->rq; in virtio_blk_dma_restart_cb()
1080 s->rq = NULL; in virtio_blk_dma_restart_cb()
1084 VirtIOBlockReq *next = rq->next; in virtio_blk_dma_restart_cb()
1085 uint16_t idx = virtio_get_queue_index(rq->vq); in virtio_blk_dma_restart_cb()
1089 rq->next = vq_rq[idx]; in virtio_blk_dma_restart_cb()
1094 /* Schedule a BH to submit the requests in each vq's AioContext */ in virtio_blk_dma_restart_cb()
1101 blk_inc_in_flight(s->conf.conf.blk); in virtio_blk_dma_restart_cb()
1103 aio_bh_schedule_oneshot(s->vq_aio_context[i], in virtio_blk_dma_restart_cb()
1115 assert(!s->ioeventfd_started); in virtio_blk_reset()
1118 blk_drain(s->blk); in virtio_blk_reset()
1122 WITH_QEMU_LOCK_GUARD(&s->rq_lock) { in virtio_blk_reset()
1123 while (s->rq) { in virtio_blk_reset()
1124 req = s->rq; in virtio_blk_reset()
1125 s->rq = req->next; in virtio_blk_reset()
1127 /* No other threads can access req->vq here */ in virtio_blk_reset()
1128 virtqueue_detach_element(req->vq, &req->elem, 0); in virtio_blk_reset()
1134 blk_set_enable_write_cache(s->blk, s->original_wce); in virtio_blk_reset()
1137 /* coalesce internal state, copy to pci i/o region 0
1142 BlockConf *conf = &s->conf.conf; in virtio_blk_update_config()
1143 BlockDriverState *bs = blk_bs(s->blk); in virtio_blk_update_config()
1147 int blk_size = conf->logical_block_size; in virtio_blk_update_config()
1149 blk_get_geometry(s->blk, &capacity); in virtio_blk_update_config()
1153 s->conf.seg_max_adjust ? s->conf.queue_size - 2 : 128 - 2); in virtio_blk_update_config()
1154 virtio_stw_p(vdev, &blkcfg.geometry.cylinders, conf->cyls); in virtio_blk_update_config()
1156 virtio_stw_p(vdev, &blkcfg.min_io_size, conf->min_io_size / blk_size); in virtio_blk_update_config()
1157 virtio_stl_p(vdev, &blkcfg.opt_io_size, conf->opt_io_size / blk_size); in virtio_blk_update_config()
1158 blkcfg.geometry.heads = conf->heads; in virtio_blk_update_config()
1162 * sector_mask to adopt the geometry to have a correct picture. in virtio_blk_update_config()
1167 * divided by 512 - instead it is the amount of blk_size blocks in virtio_blk_update_config()
1170 length = blk_getlength(s->blk); in virtio_blk_update_config()
1171 if (length > 0 && length / conf->heads / conf->secs % blk_size) { in virtio_blk_update_config()
1172 blkcfg.geometry.sectors = conf->secs & ~s->sector_mask; in virtio_blk_update_config()
1174 blkcfg.geometry.sectors = conf->secs; in virtio_blk_update_config()
1179 blkcfg.wce = blk_enable_write_cache(s->blk); in virtio_blk_update_config()
1180 virtio_stw_p(vdev, &blkcfg.num_queues, s->conf.num_queues); in virtio_blk_update_config()
1181 if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_DISCARD)) { in virtio_blk_update_config()
1182 uint32_t discard_granularity = conf->discard_granularity; in virtio_blk_update_config()
1183 if (discard_granularity == -1 || !s->conf.report_discard_granularity) { in virtio_blk_update_config()
1187 s->conf.max_discard_sectors); in virtio_blk_update_config()
1193 * applications to submit multiple segments in a single call. in virtio_blk_update_config()
1197 if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_WRITE_ZEROES)) { in virtio_blk_update_config()
1199 s->conf.max_write_zeroes_sectors); in virtio_blk_update_config()
1203 if (bs->bl.zoned != BLK_Z_NONE) { in virtio_blk_update_config()
1204 switch (bs->bl.zoned) { in virtio_blk_update_config()
1216 bs->bl.zone_size / 512); in virtio_blk_update_config()
1218 bs->bl.max_active_zones); in virtio_blk_update_config()
1220 bs->bl.max_open_zones); in virtio_blk_update_config()
1223 bs->bl.max_append_sectors); in virtio_blk_update_config()
1227 memcpy(config, &blkcfg, s->config_size); in virtio_blk_update_config()
1235 memcpy(&blkcfg, config, s->config_size); in virtio_blk_set_config()
1237 blk_set_enable_write_cache(s->blk, blkcfg.wce != 0); in virtio_blk_set_config()
1245 /* Firstly sync all virtio-blk possible supported features */ in virtio_blk_get_features()
1246 features |= s->host_features; in virtio_blk_get_features()
1258 if (blk_enable_write_cache(s->blk) || in virtio_blk_get_features()
1259 (s->conf.x_enable_wce_if_config_wce && in virtio_blk_get_features()
1263 if (!blk_is_writable(s->blk)) { in virtio_blk_get_features()
1266 if (s->conf.num_queues > 1) { in virtio_blk_get_features()
1278 assert(!s->ioeventfd_started); in virtio_blk_set_status()
1285 /* A guest that supports VIRTIO_BLK_F_CONFIG_WCE must be able to send in virtio_blk_set_status()
1290 * Guest started with "-drive cache=writethrough" in virtio_blk_set_status()
1291 * Guest sets status to 0 in virtio_blk_set_status()
1295 * Guest writes 1 to the WCE configuration field (writeback mode) in virtio_blk_set_status()
1298 * s->blk would erroneously be placed in writethrough mode. in virtio_blk_set_status()
1301 blk_set_enable_write_cache(s->blk, in virtio_blk_set_status()
1312 WITH_QEMU_LOCK_GUARD(&s->rq_lock) { in virtio_blk_save_device()
1313 VirtIOBlockReq *req = s->rq; in virtio_blk_save_device()
1318 if (s->conf.num_queues > 1) { in virtio_blk_save_device()
1319 qemu_put_be32(f, virtio_get_queue_index(req->vq)); in virtio_blk_save_device()
1322 qemu_put_virtqueue_element(vdev, f, &req->elem); in virtio_blk_save_device()
1323 req = req->next; in virtio_blk_save_device()
1336 unsigned nvqs = s->conf.num_queues; in virtio_blk_load_device()
1346 return -EINVAL; in virtio_blk_load_device()
1353 WITH_QEMU_LOCK_GUARD(&s->rq_lock) { in virtio_blk_load_device()
1354 req->next = s->rq; in virtio_blk_load_device()
1355 s->rq = req; in virtio_blk_load_device()
1375 * virtio_notify_config() needs to acquire the BQL, in virtio_blk_resize()
1377 * it to be run in the main context BH. in virtio_blk_resize()
1386 for (uint16_t i = 0; i < s->conf.num_queues; i++) { in virtio_blk_ioeventfd_detach()
1388 virtio_queue_aio_detach_host_notifier(vq, s->vq_aio_context[i]); in virtio_blk_ioeventfd_detach()
1396 for (uint16_t i = 0; i < s->conf.num_queues; i++) { in virtio_blk_ioeventfd_attach()
1398 virtio_queue_aio_attach_host_notifier(vq, s->vq_aio_context[i]); in virtio_blk_ioeventfd_attach()
1402 /* Suspend virtqueue ioeventfd processing during drain */
1407 if (s->ioeventfd_started) { in virtio_blk_drained_begin()
1417 if (s->ioeventfd_started) { in virtio_blk_drained_end()
1433 VirtIOBlkConf *conf = &s->conf; in virtio_blk_vq_aio_context_init()
1437 if (conf->iothread && conf->iothread_vq_mapping_list) { in virtio_blk_vq_aio_context_init()
1439 "iothread and iothread-vq-mapping properties cannot be set " in virtio_blk_vq_aio_context_init()
1444 if (conf->iothread || conf->iothread_vq_mapping_list) { in virtio_blk_vq_aio_context_init()
1445 if (!k->set_guest_notifiers || !k->ioeventfd_assign) { in virtio_blk_vq_aio_context_init()
1457 s->vq_aio_context = g_new(AioContext *, conf->num_queues); in virtio_blk_vq_aio_context_init()
1459 if (conf->iothread_vq_mapping_list) { in virtio_blk_vq_aio_context_init()
1460 if (!iothread_vq_mapping_apply(conf->iothread_vq_mapping_list, in virtio_blk_vq_aio_context_init()
1461 s->vq_aio_context, in virtio_blk_vq_aio_context_init()
1462 conf->num_queues, in virtio_blk_vq_aio_context_init()
1464 g_free(s->vq_aio_context); in virtio_blk_vq_aio_context_init()
1465 s->vq_aio_context = NULL; in virtio_blk_vq_aio_context_init()
1468 } else if (conf->iothread) { in virtio_blk_vq_aio_context_init()
1469 AioContext *ctx = iothread_get_aio_context(conf->iothread); in virtio_blk_vq_aio_context_init()
1470 for (unsigned i = 0; i < conf->num_queues; i++) { in virtio_blk_vq_aio_context_init()
1471 s->vq_aio_context[i] = ctx; in virtio_blk_vq_aio_context_init()
1475 object_ref(OBJECT(conf->iothread)); in virtio_blk_vq_aio_context_init()
1478 for (unsigned i = 0; i < conf->num_queues; i++) { in virtio_blk_vq_aio_context_init()
1479 s->vq_aio_context[i] = ctx; in virtio_blk_vq_aio_context_init()
1489 VirtIOBlkConf *conf = &s->conf; in virtio_blk_vq_aio_context_cleanup()
1491 assert(!s->ioeventfd_started); in virtio_blk_vq_aio_context_cleanup()
1493 if (conf->iothread_vq_mapping_list) { in virtio_blk_vq_aio_context_cleanup()
1494 iothread_vq_mapping_cleanup(conf->iothread_vq_mapping_list); in virtio_blk_vq_aio_context_cleanup()
1497 if (conf->iothread) { in virtio_blk_vq_aio_context_cleanup()
1498 object_unref(OBJECT(conf->iothread)); in virtio_blk_vq_aio_context_cleanup()
1501 g_free(s->vq_aio_context); in virtio_blk_vq_aio_context_cleanup()
1502 s->vq_aio_context = NULL; in virtio_blk_vq_aio_context_cleanup()
1512 unsigned nvqs = s->conf.num_queues; in virtio_blk_start_ioeventfd()
1516 if (s->ioeventfd_started || s->ioeventfd_starting) { in virtio_blk_start_ioeventfd()
1520 s->ioeventfd_starting = true; in virtio_blk_start_ioeventfd()
1523 r = k->set_guest_notifiers(qbus->parent, nvqs, true); in virtio_blk_start_ioeventfd()
1525 error_report("virtio-blk failed to set guest notifier (%d), " in virtio_blk_start_ioeventfd()
1526 "ensure -accel kvm is set.", r); in virtio_blk_start_ioeventfd()
1531 * Batch all the host notifiers in a single transaction to avoid in virtio_blk_start_ioeventfd()
1542 fprintf(stderr, "virtio-blk failed to set host notifier (%d)\n", r); in virtio_blk_start_ioeventfd()
1543 while (i--) { in virtio_blk_start_ioeventfd()
1548 * The transaction expects the ioeventfds to be open when it in virtio_blk_start_ioeventfd()
1553 while (j--) { in virtio_blk_start_ioeventfd()
1563 * Try to change the AioContext so that block jobs and other operations can in virtio_blk_start_ioeventfd()
1564 * co-locate their activity in the same AioContext. If it fails, nevermind. in virtio_blk_start_ioeventfd()
1566 assert(nvqs > 0); /* enforced during ->realize() */ in virtio_blk_start_ioeventfd()
1567 r = blk_set_aio_context(s->conf.conf.blk, s->vq_aio_context[0], in virtio_blk_start_ioeventfd()
1574 * These fields must be visible to the IOThread when it processes the in virtio_blk_start_ioeventfd()
1577 * Make sure ->ioeventfd_started is false when blk_set_aio_context() is in virtio_blk_start_ioeventfd()
1578 * called above so that draining does not cause the host notifier to be in virtio_blk_start_ioeventfd()
1581 s->ioeventfd_starting = false; in virtio_blk_start_ioeventfd()
1582 s->ioeventfd_started = true; in virtio_blk_start_ioeventfd()
1591 if (!blk_in_drain(s->conf.conf.blk)) { in virtio_blk_start_ioeventfd()
1597 k->set_guest_notifiers(qbus->parent, nvqs, false); in virtio_blk_start_ioeventfd()
1599 s->ioeventfd_disabled = true; in virtio_blk_start_ioeventfd()
1600 s->ioeventfd_starting = false; in virtio_blk_start_ioeventfd()
1601 return -ENOSYS; in virtio_blk_start_ioeventfd()
1617 * didn't have time to run. in virtio_blk_ioeventfd_stop_vq_bh()
1629 unsigned nvqs = s->conf.num_queues; in virtio_blk_stop_ioeventfd()
1631 if (!s->ioeventfd_started || s->ioeventfd_stopping) { in virtio_blk_stop_ioeventfd()
1636 if (s->ioeventfd_disabled) { in virtio_blk_stop_ioeventfd()
1637 s->ioeventfd_disabled = false; in virtio_blk_stop_ioeventfd()
1638 s->ioeventfd_started = false; in virtio_blk_stop_ioeventfd()
1641 s->ioeventfd_stopping = true; in virtio_blk_stop_ioeventfd()
1643 if (!blk_in_drain(s->conf.conf.blk)) { in virtio_blk_stop_ioeventfd()
1646 AioContext *ctx = s->vq_aio_context[i]; in virtio_blk_stop_ioeventfd()
1653 * Batch all the host notifiers in a single transaction to avoid in virtio_blk_stop_ioeventfd()
1663 * The transaction expects the ioeventfds to be open when it in virtio_blk_stop_ioeventfd()
1673 * Set ->ioeventfd_started to false before draining so that host notifiers in virtio_blk_stop_ioeventfd()
1676 s->ioeventfd_started = false; in virtio_blk_stop_ioeventfd()
1678 /* Wait for virtio_blk_dma_restart_bh() and in flight I/O to complete */ in virtio_blk_stop_ioeventfd()
1679 blk_drain(s->conf.conf.blk); in virtio_blk_stop_ioeventfd()
1682 * Try to switch bs back to the QEMU main loop. If other users keep the in virtio_blk_stop_ioeventfd()
1685 blk_set_aio_context(s->conf.conf.blk, qemu_get_aio_context(), NULL); in virtio_blk_stop_ioeventfd()
1688 k->set_guest_notifiers(qbus->parent, nvqs, false); in virtio_blk_stop_ioeventfd()
1690 s->ioeventfd_stopping = false; in virtio_blk_stop_ioeventfd()
1697 VirtIOBlkConf *conf = &s->conf; in virtio_blk_device_realize()
1702 if (!conf->conf.blk) { in virtio_blk_device_realize()
1706 if (!blk_is_inserted(conf->conf.blk)) { in virtio_blk_device_realize()
1710 if (conf->num_queues == VIRTIO_BLK_AUTO_NUM_QUEUES) { in virtio_blk_device_realize()
1711 conf->num_queues = 1; in virtio_blk_device_realize()
1713 if (!conf->num_queues) { in virtio_blk_device_realize()
1714 error_setg(errp, "num-queues property must be larger than 0"); in virtio_blk_device_realize()
1717 if (conf->queue_size <= 2) { in virtio_blk_device_realize()
1718 error_setg(errp, "invalid queue-size property (%" PRIu16 "), " in virtio_blk_device_realize()
1719 "must be > 2", conf->queue_size); in virtio_blk_device_realize()
1722 if (!is_power_of_2(conf->queue_size) || in virtio_blk_device_realize()
1723 conf->queue_size > VIRTQUEUE_MAX_SIZE) { in virtio_blk_device_realize()
1724 error_setg(errp, "invalid queue-size property (%" PRIu16 "), " in virtio_blk_device_realize()
1726 conf->queue_size, VIRTQUEUE_MAX_SIZE); in virtio_blk_device_realize()
1730 if (!blkconf_apply_backend_options(&conf->conf, in virtio_blk_device_realize()
1731 !blk_supports_write_perm(conf->conf.blk), in virtio_blk_device_realize()
1735 s->original_wce = blk_enable_write_cache(conf->conf.blk); in virtio_blk_device_realize()
1736 if (!blkconf_geometry(&conf->conf, NULL, 65535, 255, 255, errp)) { in virtio_blk_device_realize()
1740 if (!blkconf_blocksizes(&conf->conf, errp)) { in virtio_blk_device_realize()
1744 bs = blk_bs(conf->conf.blk); in virtio_blk_device_realize()
1745 if (bs->bl.zoned != BLK_Z_NONE) { in virtio_blk_device_realize()
1746 virtio_add_feature(&s->host_features, VIRTIO_BLK_F_ZONED); in virtio_blk_device_realize()
1747 if (bs->bl.zoned == BLK_Z_HM) { in virtio_blk_device_realize()
1748 virtio_clear_feature(&s->host_features, VIRTIO_BLK_F_DISCARD); in virtio_blk_device_realize()
1752 if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_DISCARD) && in virtio_blk_device_realize()
1753 (!conf->max_discard_sectors || in virtio_blk_device_realize()
1754 conf->max_discard_sectors > BDRV_REQUEST_MAX_SECTORS)) { in virtio_blk_device_realize()
1755 error_setg(errp, "invalid max-discard-sectors property (%" PRIu32 ")" in virtio_blk_device_realize()
1757 conf->max_discard_sectors, (int)BDRV_REQUEST_MAX_SECTORS); in virtio_blk_device_realize()
1761 if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_WRITE_ZEROES) && in virtio_blk_device_realize()
1762 (!conf->max_write_zeroes_sectors || in virtio_blk_device_realize()
1763 conf->max_write_zeroes_sectors > BDRV_REQUEST_MAX_SECTORS)) { in virtio_blk_device_realize()
1764 error_setg(errp, "invalid max-write-zeroes-sectors property (%" PRIu32 in virtio_blk_device_realize()
1766 conf->max_write_zeroes_sectors, in virtio_blk_device_realize()
1771 s->config_size = virtio_get_config_size(&virtio_blk_cfg_size_params, in virtio_blk_device_realize()
1772 s->host_features); in virtio_blk_device_realize()
1773 virtio_init(vdev, VIRTIO_ID_BLOCK, s->config_size); in virtio_blk_device_realize()
1775 qemu_mutex_init(&s->rq_lock); in virtio_blk_device_realize()
1777 s->blk = conf->conf.blk; in virtio_blk_device_realize()
1778 s->rq = NULL; in virtio_blk_device_realize()
1779 s->sector_mask = (s->conf.conf.logical_block_size / BDRV_SECTOR_SIZE) - 1; in virtio_blk_device_realize()
1781 for (i = 0; i < conf->num_queues; i++) { in virtio_blk_device_realize()
1782 virtio_add_queue(vdev, conf->queue_size, virtio_blk_handle_output); in virtio_blk_device_realize()
1784 qemu_coroutine_inc_pool_size(conf->num_queues * conf->queue_size / 2); in virtio_blk_device_realize()
1788 s->ioeventfd_disabled = true; in virtio_blk_device_realize()
1794 for (i = 0; i < conf->num_queues; i++) { in virtio_blk_device_realize()
1803 * called after ->start_ioeventfd() has already set blk's AioContext. in virtio_blk_device_realize()
1805 s->change = in virtio_blk_device_realize()
1808 blk_ram_registrar_init(&s->blk_ram_registrar, s->blk); in virtio_blk_device_realize()
1809 blk_set_dev_ops(s->blk, &virtio_block_ops, s); in virtio_blk_device_realize()
1811 blk_iostatus_enable(s->blk); in virtio_blk_device_realize()
1813 add_boot_device_lchs(dev, "/disk@0,0", in virtio_blk_device_realize()
1814 conf->conf.lcyls, in virtio_blk_device_realize()
1815 conf->conf.lheads, in virtio_blk_device_realize()
1816 conf->conf.lsecs); in virtio_blk_device_realize()
1823 VirtIOBlkConf *conf = &s->conf; in virtio_blk_device_unrealize()
1826 blk_drain(s->blk); in virtio_blk_device_unrealize()
1827 del_boot_device_lchs(dev, "/disk@0,0"); in virtio_blk_device_unrealize()
1829 for (i = 0; i < conf->num_queues; i++) { in virtio_blk_device_unrealize()
1832 qemu_coroutine_dec_pool_size(conf->num_queues * conf->queue_size / 2); in virtio_blk_device_unrealize()
1833 qemu_mutex_destroy(&s->rq_lock); in virtio_blk_device_unrealize()
1834 blk_ram_registrar_destroy(&s->blk_ram_registrar); in virtio_blk_device_unrealize()
1835 qemu_del_vm_change_state_handler(s->change); in virtio_blk_device_unrealize()
1836 blockdev_mark_auto_del(s->blk); in virtio_blk_device_unrealize()
1844 device_add_bootindex_property(obj, &s->conf.conf.bootindex, in virtio_blk_instance_init()
1845 "bootindex", "/disk@0,0", in virtio_blk_instance_init()
1850 .name = "virtio-blk",
1864 DEFINE_PROP_BIT64("config-wce", VirtIOBlock, host_features,
1866 DEFINE_PROP_BIT("request-merging", VirtIOBlock, conf.request_merging, 0,
1868 DEFINE_PROP_UINT16("num-queues", VirtIOBlock, conf.num_queues,
1870 DEFINE_PROP_UINT16("queue-size", VirtIOBlock, conf.queue_size, 256),
1871 DEFINE_PROP_BOOL("seg-max-adjust", VirtIOBlock, conf.seg_max_adjust, true),
1874 DEFINE_PROP_IOTHREAD_VQ_MAPPING_LIST("iothread-vq-mapping", VirtIOBlock,
1878 DEFINE_PROP_BOOL("report-discard-granularity", VirtIOBlock,
1880 DEFINE_PROP_BIT64("write-zeroes", VirtIOBlock, host_features,
1882 DEFINE_PROP_UINT32("max-discard-sectors", VirtIOBlock,
1884 DEFINE_PROP_UINT32("max-write-zeroes-sectors", VirtIOBlock,
1886 DEFINE_PROP_BOOL("x-enable-wce-if-config-wce", VirtIOBlock,
1896 dc->vmsd = &vmstate_virtio_blk; in virtio_blk_class_init()
1897 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); in virtio_blk_class_init()
1898 vdc->realize = virtio_blk_device_realize; in virtio_blk_class_init()
1899 vdc->unrealize = virtio_blk_device_unrealize; in virtio_blk_class_init()
1900 vdc->get_config = virtio_blk_update_config; in virtio_blk_class_init()
1901 vdc->set_config = virtio_blk_set_config; in virtio_blk_class_init()
1902 vdc->get_features = virtio_blk_get_features; in virtio_blk_class_init()
1903 vdc->set_status = virtio_blk_set_status; in virtio_blk_class_init()
1904 vdc->reset = virtio_blk_reset; in virtio_blk_class_init()
1905 vdc->save = virtio_blk_save_device; in virtio_blk_class_init()
1906 vdc->load = virtio_blk_load_device; in virtio_blk_class_init()
1907 vdc->start_ioeventfd = virtio_blk_start_ioeventfd; in virtio_blk_class_init()
1908 vdc->stop_ioeventfd = virtio_blk_stop_ioeventfd; in virtio_blk_class_init()