1 /* 2 * Virtio Block Device 3 * 4 * Copyright IBM, Corp. 2007 5 * 6 * Authors: 7 * Anthony Liguori <aliguori@us.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qemu/defer-call.h" 16 #include "qapi/error.h" 17 #include "qemu/iov.h" 18 #include "qemu/module.h" 19 #include "qemu/error-report.h" 20 #include "qemu/main-loop.h" 21 #include "block/block_int.h" 22 #include "trace.h" 23 #include "hw/block/block.h" 24 #include "hw/qdev-properties.h" 25 #include "sysemu/blockdev.h" 26 #include "sysemu/block-ram-registrar.h" 27 #include "sysemu/sysemu.h" 28 #include "sysemu/runstate.h" 29 #include "hw/virtio/virtio-blk.h" 30 #include "dataplane/virtio-blk.h" 31 #include "scsi/constants.h" 32 #ifdef __linux__ 33 # include <scsi/sg.h> 34 #endif 35 #include "hw/virtio/virtio-bus.h" 36 #include "migration/qemu-file-types.h" 37 #include "hw/virtio/virtio-access.h" 38 #include "hw/virtio/virtio-blk-common.h" 39 #include "qemu/coroutine.h" 40 41 static void virtio_blk_init_request(VirtIOBlock *s, VirtQueue *vq, 42 VirtIOBlockReq *req) 43 { 44 req->dev = s; 45 req->vq = vq; 46 req->qiov.size = 0; 47 req->in_len = 0; 48 req->next = NULL; 49 req->mr_next = NULL; 50 } 51 52 static void virtio_blk_free_request(VirtIOBlockReq *req) 53 { 54 g_free(req); 55 } 56 57 static void virtio_blk_req_complete(VirtIOBlockReq *req, unsigned char status) 58 { 59 VirtIOBlock *s = req->dev; 60 VirtIODevice *vdev = VIRTIO_DEVICE(s); 61 62 trace_virtio_blk_req_complete(vdev, req, status); 63 64 stb_p(&req->in->status, status); 65 iov_discard_undo(&req->inhdr_undo); 66 iov_discard_undo(&req->outhdr_undo); 67 virtqueue_push(req->vq, &req->elem, req->in_len); 68 if (s->dataplane_started && !s->dataplane_disabled) { 69 virtio_blk_data_plane_notify(s->dataplane, req->vq); 70 } else { 71 virtio_notify(vdev, req->vq); 72 } 73 } 74 75 static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, int error, 76 bool is_read, bool acct_failed) 77 { 78 VirtIOBlock *s = req->dev; 79 BlockErrorAction action = blk_get_error_action(s->blk, is_read, error); 80 81 if (action == BLOCK_ERROR_ACTION_STOP) { 82 /* Break the link as the next request is going to be parsed from the 83 * ring again. Otherwise we may end up doing a double completion! */ 84 req->mr_next = NULL; 85 req->next = s->rq; 86 s->rq = req; 87 } else if (action == BLOCK_ERROR_ACTION_REPORT) { 88 virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR); 89 if (acct_failed) { 90 block_acct_failed(blk_get_stats(s->blk), &req->acct); 91 } 92 virtio_blk_free_request(req); 93 } 94 95 blk_error_action(s->blk, action, is_read, error); 96 return action != BLOCK_ERROR_ACTION_IGNORE; 97 } 98 99 static void virtio_blk_rw_complete(void *opaque, int ret) 100 { 101 VirtIOBlockReq *next = opaque; 102 VirtIOBlock *s = next->dev; 103 VirtIODevice *vdev = VIRTIO_DEVICE(s); 104 105 aio_context_acquire(blk_get_aio_context(s->conf.conf.blk)); 106 while (next) { 107 VirtIOBlockReq *req = next; 108 next = req->mr_next; 109 trace_virtio_blk_rw_complete(vdev, req, ret); 110 111 if (req->qiov.nalloc != -1) { 112 /* If nalloc is != -1 req->qiov is a local copy of the original 113 * external iovec. It was allocated in submit_requests to be 114 * able to merge requests. */ 115 qemu_iovec_destroy(&req->qiov); 116 } 117 118 if (ret) { 119 int p = virtio_ldl_p(VIRTIO_DEVICE(s), &req->out.type); 120 bool is_read = !(p & VIRTIO_BLK_T_OUT); 121 /* Note that memory may be dirtied on read failure. If the 122 * virtio request is not completed here, as is the case for 123 * BLOCK_ERROR_ACTION_STOP, the memory may not be copied 124 * correctly during live migration. While this is ugly, 125 * it is acceptable because the device is free to write to 126 * the memory until the request is completed (which will 127 * happen on the other side of the migration). 128 */ 129 if (virtio_blk_handle_rw_error(req, -ret, is_read, true)) { 130 continue; 131 } 132 } 133 134 virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); 135 block_acct_done(blk_get_stats(s->blk), &req->acct); 136 virtio_blk_free_request(req); 137 } 138 aio_context_release(blk_get_aio_context(s->conf.conf.blk)); 139 } 140 141 static void virtio_blk_flush_complete(void *opaque, int ret) 142 { 143 VirtIOBlockReq *req = opaque; 144 VirtIOBlock *s = req->dev; 145 146 aio_context_acquire(blk_get_aio_context(s->conf.conf.blk)); 147 if (ret) { 148 if (virtio_blk_handle_rw_error(req, -ret, 0, true)) { 149 goto out; 150 } 151 } 152 153 virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); 154 block_acct_done(blk_get_stats(s->blk), &req->acct); 155 virtio_blk_free_request(req); 156 157 out: 158 aio_context_release(blk_get_aio_context(s->conf.conf.blk)); 159 } 160 161 static void virtio_blk_discard_write_zeroes_complete(void *opaque, int ret) 162 { 163 VirtIOBlockReq *req = opaque; 164 VirtIOBlock *s = req->dev; 165 bool is_write_zeroes = (virtio_ldl_p(VIRTIO_DEVICE(s), &req->out.type) & 166 ~VIRTIO_BLK_T_BARRIER) == VIRTIO_BLK_T_WRITE_ZEROES; 167 168 aio_context_acquire(blk_get_aio_context(s->conf.conf.blk)); 169 if (ret) { 170 if (virtio_blk_handle_rw_error(req, -ret, false, is_write_zeroes)) { 171 goto out; 172 } 173 } 174 175 virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); 176 if (is_write_zeroes) { 177 block_acct_done(blk_get_stats(s->blk), &req->acct); 178 } 179 virtio_blk_free_request(req); 180 181 out: 182 aio_context_release(blk_get_aio_context(s->conf.conf.blk)); 183 } 184 185 #ifdef __linux__ 186 187 typedef struct { 188 VirtIOBlockReq *req; 189 struct sg_io_hdr hdr; 190 } VirtIOBlockIoctlReq; 191 192 static void virtio_blk_ioctl_complete(void *opaque, int status) 193 { 194 VirtIOBlockIoctlReq *ioctl_req = opaque; 195 VirtIOBlockReq *req = ioctl_req->req; 196 VirtIOBlock *s = req->dev; 197 VirtIODevice *vdev = VIRTIO_DEVICE(s); 198 struct virtio_scsi_inhdr *scsi; 199 struct sg_io_hdr *hdr; 200 201 scsi = (void *)req->elem.in_sg[req->elem.in_num - 2].iov_base; 202 203 if (status) { 204 status = VIRTIO_BLK_S_UNSUPP; 205 virtio_stl_p(vdev, &scsi->errors, 255); 206 goto out; 207 } 208 209 hdr = &ioctl_req->hdr; 210 /* 211 * From SCSI-Generic-HOWTO: "Some lower level drivers (e.g. ide-scsi) 212 * clear the masked_status field [hence status gets cleared too, see 213 * block/scsi_ioctl.c] even when a CHECK_CONDITION or COMMAND_TERMINATED 214 * status has occurred. However they do set DRIVER_SENSE in driver_status 215 * field. Also a (sb_len_wr > 0) indicates there is a sense buffer. 216 */ 217 if (hdr->status == 0 && hdr->sb_len_wr > 0) { 218 hdr->status = CHECK_CONDITION; 219 } 220 221 virtio_stl_p(vdev, &scsi->errors, 222 hdr->status | (hdr->msg_status << 8) | 223 (hdr->host_status << 16) | (hdr->driver_status << 24)); 224 virtio_stl_p(vdev, &scsi->residual, hdr->resid); 225 virtio_stl_p(vdev, &scsi->sense_len, hdr->sb_len_wr); 226 virtio_stl_p(vdev, &scsi->data_len, hdr->dxfer_len); 227 228 out: 229 aio_context_acquire(blk_get_aio_context(s->conf.conf.blk)); 230 virtio_blk_req_complete(req, status); 231 virtio_blk_free_request(req); 232 aio_context_release(blk_get_aio_context(s->conf.conf.blk)); 233 g_free(ioctl_req); 234 } 235 236 #endif 237 238 static VirtIOBlockReq *virtio_blk_get_request(VirtIOBlock *s, VirtQueue *vq) 239 { 240 VirtIOBlockReq *req = virtqueue_pop(vq, sizeof(VirtIOBlockReq)); 241 242 if (req) { 243 virtio_blk_init_request(s, vq, req); 244 } 245 return req; 246 } 247 248 static int virtio_blk_handle_scsi_req(VirtIOBlockReq *req) 249 { 250 int status = VIRTIO_BLK_S_OK; 251 struct virtio_scsi_inhdr *scsi = NULL; 252 VirtIOBlock *blk = req->dev; 253 VirtIODevice *vdev = VIRTIO_DEVICE(blk); 254 VirtQueueElement *elem = &req->elem; 255 256 #ifdef __linux__ 257 int i; 258 VirtIOBlockIoctlReq *ioctl_req; 259 BlockAIOCB *acb; 260 #endif 261 262 /* 263 * We require at least one output segment each for the virtio_blk_outhdr 264 * and the SCSI command block. 265 * 266 * We also at least require the virtio_blk_inhdr, the virtio_scsi_inhdr 267 * and the sense buffer pointer in the input segments. 268 */ 269 if (elem->out_num < 2 || elem->in_num < 3) { 270 status = VIRTIO_BLK_S_IOERR; 271 goto fail; 272 } 273 274 /* 275 * The scsi inhdr is placed in the second-to-last input segment, just 276 * before the regular inhdr. 277 */ 278 scsi = (void *)elem->in_sg[elem->in_num - 2].iov_base; 279 280 if (!virtio_has_feature(blk->host_features, VIRTIO_BLK_F_SCSI)) { 281 status = VIRTIO_BLK_S_UNSUPP; 282 goto fail; 283 } 284 285 /* 286 * No support for bidirection commands yet. 287 */ 288 if (elem->out_num > 2 && elem->in_num > 3) { 289 status = VIRTIO_BLK_S_UNSUPP; 290 goto fail; 291 } 292 293 #ifdef __linux__ 294 ioctl_req = g_new0(VirtIOBlockIoctlReq, 1); 295 ioctl_req->req = req; 296 ioctl_req->hdr.interface_id = 'S'; 297 ioctl_req->hdr.cmd_len = elem->out_sg[1].iov_len; 298 ioctl_req->hdr.cmdp = elem->out_sg[1].iov_base; 299 ioctl_req->hdr.dxfer_len = 0; 300 301 if (elem->out_num > 2) { 302 /* 303 * If there are more than the minimally required 2 output segments 304 * there is write payload starting from the third iovec. 305 */ 306 ioctl_req->hdr.dxfer_direction = SG_DXFER_TO_DEV; 307 ioctl_req->hdr.iovec_count = elem->out_num - 2; 308 309 for (i = 0; i < ioctl_req->hdr.iovec_count; i++) { 310 ioctl_req->hdr.dxfer_len += elem->out_sg[i + 2].iov_len; 311 } 312 313 ioctl_req->hdr.dxferp = elem->out_sg + 2; 314 315 } else if (elem->in_num > 3) { 316 /* 317 * If we have more than 3 input segments the guest wants to actually 318 * read data. 319 */ 320 ioctl_req->hdr.dxfer_direction = SG_DXFER_FROM_DEV; 321 ioctl_req->hdr.iovec_count = elem->in_num - 3; 322 for (i = 0; i < ioctl_req->hdr.iovec_count; i++) { 323 ioctl_req->hdr.dxfer_len += elem->in_sg[i].iov_len; 324 } 325 326 ioctl_req->hdr.dxferp = elem->in_sg; 327 } else { 328 /* 329 * Some SCSI commands don't actually transfer any data. 330 */ 331 ioctl_req->hdr.dxfer_direction = SG_DXFER_NONE; 332 } 333 334 ioctl_req->hdr.sbp = elem->in_sg[elem->in_num - 3].iov_base; 335 ioctl_req->hdr.mx_sb_len = elem->in_sg[elem->in_num - 3].iov_len; 336 337 acb = blk_aio_ioctl(blk->blk, SG_IO, &ioctl_req->hdr, 338 virtio_blk_ioctl_complete, ioctl_req); 339 if (!acb) { 340 g_free(ioctl_req); 341 status = VIRTIO_BLK_S_UNSUPP; 342 goto fail; 343 } 344 return -EINPROGRESS; 345 #else 346 abort(); 347 #endif 348 349 fail: 350 /* Just put anything nonzero so that the ioctl fails in the guest. */ 351 if (scsi) { 352 virtio_stl_p(vdev, &scsi->errors, 255); 353 } 354 return status; 355 } 356 357 static void virtio_blk_handle_scsi(VirtIOBlockReq *req) 358 { 359 int status; 360 361 status = virtio_blk_handle_scsi_req(req); 362 if (status != -EINPROGRESS) { 363 virtio_blk_req_complete(req, status); 364 virtio_blk_free_request(req); 365 } 366 } 367 368 static inline void submit_requests(VirtIOBlock *s, MultiReqBuffer *mrb, 369 int start, int num_reqs, int niov) 370 { 371 BlockBackend *blk = s->blk; 372 QEMUIOVector *qiov = &mrb->reqs[start]->qiov; 373 int64_t sector_num = mrb->reqs[start]->sector_num; 374 bool is_write = mrb->is_write; 375 BdrvRequestFlags flags = 0; 376 377 if (num_reqs > 1) { 378 int i; 379 struct iovec *tmp_iov = qiov->iov; 380 int tmp_niov = qiov->niov; 381 382 /* mrb->reqs[start]->qiov was initialized from external so we can't 383 * modify it here. We need to initialize it locally and then add the 384 * external iovecs. */ 385 qemu_iovec_init(qiov, niov); 386 387 for (i = 0; i < tmp_niov; i++) { 388 qemu_iovec_add(qiov, tmp_iov[i].iov_base, tmp_iov[i].iov_len); 389 } 390 391 for (i = start + 1; i < start + num_reqs; i++) { 392 qemu_iovec_concat(qiov, &mrb->reqs[i]->qiov, 0, 393 mrb->reqs[i]->qiov.size); 394 mrb->reqs[i - 1]->mr_next = mrb->reqs[i]; 395 } 396 397 trace_virtio_blk_submit_multireq(VIRTIO_DEVICE(mrb->reqs[start]->dev), 398 mrb, start, num_reqs, 399 sector_num << BDRV_SECTOR_BITS, 400 qiov->size, is_write); 401 block_acct_merge_done(blk_get_stats(blk), 402 is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ, 403 num_reqs - 1); 404 } 405 406 if (blk_ram_registrar_ok(&s->blk_ram_registrar)) { 407 flags |= BDRV_REQ_REGISTERED_BUF; 408 } 409 410 if (is_write) { 411 blk_aio_pwritev(blk, sector_num << BDRV_SECTOR_BITS, qiov, 412 flags, virtio_blk_rw_complete, 413 mrb->reqs[start]); 414 } else { 415 blk_aio_preadv(blk, sector_num << BDRV_SECTOR_BITS, qiov, 416 flags, virtio_blk_rw_complete, 417 mrb->reqs[start]); 418 } 419 } 420 421 static int multireq_compare(const void *a, const void *b) 422 { 423 const VirtIOBlockReq *req1 = *(VirtIOBlockReq **)a, 424 *req2 = *(VirtIOBlockReq **)b; 425 426 /* 427 * Note that we can't simply subtract sector_num1 from sector_num2 428 * here as that could overflow the return value. 429 */ 430 if (req1->sector_num > req2->sector_num) { 431 return 1; 432 } else if (req1->sector_num < req2->sector_num) { 433 return -1; 434 } else { 435 return 0; 436 } 437 } 438 439 static void virtio_blk_submit_multireq(VirtIOBlock *s, MultiReqBuffer *mrb) 440 { 441 int i = 0, start = 0, num_reqs = 0, niov = 0, nb_sectors = 0; 442 uint32_t max_transfer; 443 int64_t sector_num = 0; 444 445 if (mrb->num_reqs == 1) { 446 submit_requests(s, mrb, 0, 1, -1); 447 mrb->num_reqs = 0; 448 return; 449 } 450 451 max_transfer = blk_get_max_transfer(mrb->reqs[0]->dev->blk); 452 453 qsort(mrb->reqs, mrb->num_reqs, sizeof(*mrb->reqs), 454 &multireq_compare); 455 456 for (i = 0; i < mrb->num_reqs; i++) { 457 VirtIOBlockReq *req = mrb->reqs[i]; 458 if (num_reqs > 0) { 459 /* 460 * NOTE: We cannot merge the requests in below situations: 461 * 1. requests are not sequential 462 * 2. merge would exceed maximum number of IOVs 463 * 3. merge would exceed maximum transfer length of backend device 464 */ 465 if (sector_num + nb_sectors != req->sector_num || 466 niov > blk_get_max_iov(s->blk) - req->qiov.niov || 467 req->qiov.size > max_transfer || 468 nb_sectors > (max_transfer - 469 req->qiov.size) / BDRV_SECTOR_SIZE) { 470 submit_requests(s, mrb, start, num_reqs, niov); 471 num_reqs = 0; 472 } 473 } 474 475 if (num_reqs == 0) { 476 sector_num = req->sector_num; 477 nb_sectors = niov = 0; 478 start = i; 479 } 480 481 nb_sectors += req->qiov.size / BDRV_SECTOR_SIZE; 482 niov += req->qiov.niov; 483 num_reqs++; 484 } 485 486 submit_requests(s, mrb, start, num_reqs, niov); 487 mrb->num_reqs = 0; 488 } 489 490 static void virtio_blk_handle_flush(VirtIOBlockReq *req, MultiReqBuffer *mrb) 491 { 492 VirtIOBlock *s = req->dev; 493 494 block_acct_start(blk_get_stats(s->blk), &req->acct, 0, 495 BLOCK_ACCT_FLUSH); 496 497 /* 498 * Make sure all outstanding writes are posted to the backing device. 499 */ 500 if (mrb->is_write && mrb->num_reqs > 0) { 501 virtio_blk_submit_multireq(s, mrb); 502 } 503 blk_aio_flush(s->blk, virtio_blk_flush_complete, req); 504 } 505 506 static bool virtio_blk_sect_range_ok(VirtIOBlock *dev, 507 uint64_t sector, size_t size) 508 { 509 uint64_t nb_sectors = size >> BDRV_SECTOR_BITS; 510 uint64_t total_sectors; 511 512 if (nb_sectors > BDRV_REQUEST_MAX_SECTORS) { 513 return false; 514 } 515 if (sector & dev->sector_mask) { 516 return false; 517 } 518 if (size % dev->conf.conf.logical_block_size) { 519 return false; 520 } 521 blk_get_geometry(dev->blk, &total_sectors); 522 if (sector > total_sectors || nb_sectors > total_sectors - sector) { 523 return false; 524 } 525 return true; 526 } 527 528 static uint8_t virtio_blk_handle_discard_write_zeroes(VirtIOBlockReq *req, 529 struct virtio_blk_discard_write_zeroes *dwz_hdr, bool is_write_zeroes) 530 { 531 VirtIOBlock *s = req->dev; 532 VirtIODevice *vdev = VIRTIO_DEVICE(s); 533 uint64_t sector; 534 uint32_t num_sectors, flags, max_sectors; 535 uint8_t err_status; 536 int bytes; 537 538 sector = virtio_ldq_p(vdev, &dwz_hdr->sector); 539 num_sectors = virtio_ldl_p(vdev, &dwz_hdr->num_sectors); 540 flags = virtio_ldl_p(vdev, &dwz_hdr->flags); 541 max_sectors = is_write_zeroes ? s->conf.max_write_zeroes_sectors : 542 s->conf.max_discard_sectors; 543 544 /* 545 * max_sectors is at most BDRV_REQUEST_MAX_SECTORS, this check 546 * make us sure that "num_sectors << BDRV_SECTOR_BITS" can fit in 547 * the integer variable. 548 */ 549 if (unlikely(num_sectors > max_sectors)) { 550 err_status = VIRTIO_BLK_S_IOERR; 551 goto err; 552 } 553 554 bytes = num_sectors << BDRV_SECTOR_BITS; 555 556 if (unlikely(!virtio_blk_sect_range_ok(s, sector, bytes))) { 557 err_status = VIRTIO_BLK_S_IOERR; 558 goto err; 559 } 560 561 /* 562 * The device MUST set the status byte to VIRTIO_BLK_S_UNSUPP for discard 563 * and write zeroes commands if any unknown flag is set. 564 */ 565 if (unlikely(flags & ~VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP)) { 566 err_status = VIRTIO_BLK_S_UNSUPP; 567 goto err; 568 } 569 570 if (is_write_zeroes) { /* VIRTIO_BLK_T_WRITE_ZEROES */ 571 int blk_aio_flags = 0; 572 573 if (flags & VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP) { 574 blk_aio_flags |= BDRV_REQ_MAY_UNMAP; 575 } 576 577 block_acct_start(blk_get_stats(s->blk), &req->acct, bytes, 578 BLOCK_ACCT_WRITE); 579 580 blk_aio_pwrite_zeroes(s->blk, sector << BDRV_SECTOR_BITS, 581 bytes, blk_aio_flags, 582 virtio_blk_discard_write_zeroes_complete, req); 583 } else { /* VIRTIO_BLK_T_DISCARD */ 584 /* 585 * The device MUST set the status byte to VIRTIO_BLK_S_UNSUPP for 586 * discard commands if the unmap flag is set. 587 */ 588 if (unlikely(flags & VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP)) { 589 err_status = VIRTIO_BLK_S_UNSUPP; 590 goto err; 591 } 592 593 blk_aio_pdiscard(s->blk, sector << BDRV_SECTOR_BITS, bytes, 594 virtio_blk_discard_write_zeroes_complete, req); 595 } 596 597 return VIRTIO_BLK_S_OK; 598 599 err: 600 if (is_write_zeroes) { 601 block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_WRITE); 602 } 603 return err_status; 604 } 605 606 typedef struct ZoneCmdData { 607 VirtIOBlockReq *req; 608 struct iovec *in_iov; 609 unsigned in_num; 610 union { 611 struct { 612 unsigned int nr_zones; 613 BlockZoneDescriptor *zones; 614 } zone_report_data; 615 struct { 616 int64_t offset; 617 } zone_append_data; 618 }; 619 } ZoneCmdData; 620 621 /* 622 * check zoned_request: error checking before issuing requests. If all checks 623 * passed, return true. 624 * append: true if only zone append requests issued. 625 */ 626 static bool check_zoned_request(VirtIOBlock *s, int64_t offset, int64_t len, 627 bool append, uint8_t *status) { 628 BlockDriverState *bs = blk_bs(s->blk); 629 int index; 630 631 if (!virtio_has_feature(s->host_features, VIRTIO_BLK_F_ZONED)) { 632 *status = VIRTIO_BLK_S_UNSUPP; 633 return false; 634 } 635 636 if (offset < 0 || len < 0 || len > (bs->total_sectors << BDRV_SECTOR_BITS) 637 || offset > (bs->total_sectors << BDRV_SECTOR_BITS) - len) { 638 *status = VIRTIO_BLK_S_ZONE_INVALID_CMD; 639 return false; 640 } 641 642 if (append) { 643 if (bs->bl.write_granularity) { 644 if ((offset % bs->bl.write_granularity) != 0) { 645 *status = VIRTIO_BLK_S_ZONE_UNALIGNED_WP; 646 return false; 647 } 648 } 649 650 index = offset / bs->bl.zone_size; 651 if (BDRV_ZT_IS_CONV(bs->wps->wp[index])) { 652 *status = VIRTIO_BLK_S_ZONE_INVALID_CMD; 653 return false; 654 } 655 656 if (len / 512 > bs->bl.max_append_sectors) { 657 if (bs->bl.max_append_sectors == 0) { 658 *status = VIRTIO_BLK_S_UNSUPP; 659 } else { 660 *status = VIRTIO_BLK_S_ZONE_INVALID_CMD; 661 } 662 return false; 663 } 664 } 665 return true; 666 } 667 668 static void virtio_blk_zone_report_complete(void *opaque, int ret) 669 { 670 ZoneCmdData *data = opaque; 671 VirtIOBlockReq *req = data->req; 672 VirtIOBlock *s = req->dev; 673 VirtIODevice *vdev = VIRTIO_DEVICE(req->dev); 674 struct iovec *in_iov = data->in_iov; 675 unsigned in_num = data->in_num; 676 int64_t zrp_size, n, j = 0; 677 int64_t nz = data->zone_report_data.nr_zones; 678 int8_t err_status = VIRTIO_BLK_S_OK; 679 680 trace_virtio_blk_zone_report_complete(vdev, req, nz, ret); 681 if (ret) { 682 err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD; 683 goto out; 684 } 685 686 struct virtio_blk_zone_report zrp_hdr = (struct virtio_blk_zone_report) { 687 .nr_zones = cpu_to_le64(nz), 688 }; 689 zrp_size = sizeof(struct virtio_blk_zone_report) 690 + sizeof(struct virtio_blk_zone_descriptor) * nz; 691 n = iov_from_buf(in_iov, in_num, 0, &zrp_hdr, sizeof(zrp_hdr)); 692 if (n != sizeof(zrp_hdr)) { 693 virtio_error(vdev, "Driver provided input buffer that is too small!"); 694 err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD; 695 goto out; 696 } 697 698 for (size_t i = sizeof(zrp_hdr); i < zrp_size; 699 i += sizeof(struct virtio_blk_zone_descriptor), ++j) { 700 struct virtio_blk_zone_descriptor desc = 701 (struct virtio_blk_zone_descriptor) { 702 .z_start = cpu_to_le64(data->zone_report_data.zones[j].start 703 >> BDRV_SECTOR_BITS), 704 .z_cap = cpu_to_le64(data->zone_report_data.zones[j].cap 705 >> BDRV_SECTOR_BITS), 706 .z_wp = cpu_to_le64(data->zone_report_data.zones[j].wp 707 >> BDRV_SECTOR_BITS), 708 }; 709 710 switch (data->zone_report_data.zones[j].type) { 711 case BLK_ZT_CONV: 712 desc.z_type = VIRTIO_BLK_ZT_CONV; 713 break; 714 case BLK_ZT_SWR: 715 desc.z_type = VIRTIO_BLK_ZT_SWR; 716 break; 717 case BLK_ZT_SWP: 718 desc.z_type = VIRTIO_BLK_ZT_SWP; 719 break; 720 default: 721 g_assert_not_reached(); 722 } 723 724 switch (data->zone_report_data.zones[j].state) { 725 case BLK_ZS_RDONLY: 726 desc.z_state = VIRTIO_BLK_ZS_RDONLY; 727 break; 728 case BLK_ZS_OFFLINE: 729 desc.z_state = VIRTIO_BLK_ZS_OFFLINE; 730 break; 731 case BLK_ZS_EMPTY: 732 desc.z_state = VIRTIO_BLK_ZS_EMPTY; 733 break; 734 case BLK_ZS_CLOSED: 735 desc.z_state = VIRTIO_BLK_ZS_CLOSED; 736 break; 737 case BLK_ZS_FULL: 738 desc.z_state = VIRTIO_BLK_ZS_FULL; 739 break; 740 case BLK_ZS_EOPEN: 741 desc.z_state = VIRTIO_BLK_ZS_EOPEN; 742 break; 743 case BLK_ZS_IOPEN: 744 desc.z_state = VIRTIO_BLK_ZS_IOPEN; 745 break; 746 case BLK_ZS_NOT_WP: 747 desc.z_state = VIRTIO_BLK_ZS_NOT_WP; 748 break; 749 default: 750 g_assert_not_reached(); 751 } 752 753 /* TODO: it takes O(n^2) time complexity. Optimizations required. */ 754 n = iov_from_buf(in_iov, in_num, i, &desc, sizeof(desc)); 755 if (n != sizeof(desc)) { 756 virtio_error(vdev, "Driver provided input buffer " 757 "for descriptors that is too small!"); 758 err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD; 759 } 760 } 761 762 out: 763 aio_context_acquire(blk_get_aio_context(s->conf.conf.blk)); 764 virtio_blk_req_complete(req, err_status); 765 virtio_blk_free_request(req); 766 aio_context_release(blk_get_aio_context(s->conf.conf.blk)); 767 g_free(data->zone_report_data.zones); 768 g_free(data); 769 } 770 771 static void virtio_blk_handle_zone_report(VirtIOBlockReq *req, 772 struct iovec *in_iov, 773 unsigned in_num) 774 { 775 VirtIOBlock *s = req->dev; 776 VirtIODevice *vdev = VIRTIO_DEVICE(s); 777 unsigned int nr_zones; 778 ZoneCmdData *data; 779 int64_t zone_size, offset; 780 uint8_t err_status; 781 782 if (req->in_len < sizeof(struct virtio_blk_inhdr) + 783 sizeof(struct virtio_blk_zone_report) + 784 sizeof(struct virtio_blk_zone_descriptor)) { 785 virtio_error(vdev, "in buffer too small for zone report"); 786 return; 787 } 788 789 /* start byte offset of the zone report */ 790 offset = virtio_ldq_p(vdev, &req->out.sector) << BDRV_SECTOR_BITS; 791 if (!check_zoned_request(s, offset, 0, false, &err_status)) { 792 goto out; 793 } 794 nr_zones = (req->in_len - sizeof(struct virtio_blk_inhdr) - 795 sizeof(struct virtio_blk_zone_report)) / 796 sizeof(struct virtio_blk_zone_descriptor); 797 trace_virtio_blk_handle_zone_report(vdev, req, 798 offset >> BDRV_SECTOR_BITS, nr_zones); 799 800 zone_size = sizeof(BlockZoneDescriptor) * nr_zones; 801 data = g_malloc(sizeof(ZoneCmdData)); 802 data->req = req; 803 data->in_iov = in_iov; 804 data->in_num = in_num; 805 data->zone_report_data.nr_zones = nr_zones; 806 data->zone_report_data.zones = g_malloc(zone_size), 807 808 blk_aio_zone_report(s->blk, offset, &data->zone_report_data.nr_zones, 809 data->zone_report_data.zones, 810 virtio_blk_zone_report_complete, data); 811 return; 812 out: 813 virtio_blk_req_complete(req, err_status); 814 virtio_blk_free_request(req); 815 } 816 817 static void virtio_blk_zone_mgmt_complete(void *opaque, int ret) 818 { 819 VirtIOBlockReq *req = opaque; 820 VirtIOBlock *s = req->dev; 821 VirtIODevice *vdev = VIRTIO_DEVICE(s); 822 int8_t err_status = VIRTIO_BLK_S_OK; 823 trace_virtio_blk_zone_mgmt_complete(vdev, req,ret); 824 825 if (ret) { 826 err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD; 827 } 828 829 aio_context_acquire(blk_get_aio_context(s->conf.conf.blk)); 830 virtio_blk_req_complete(req, err_status); 831 virtio_blk_free_request(req); 832 aio_context_release(blk_get_aio_context(s->conf.conf.blk)); 833 } 834 835 static int virtio_blk_handle_zone_mgmt(VirtIOBlockReq *req, BlockZoneOp op) 836 { 837 VirtIOBlock *s = req->dev; 838 VirtIODevice *vdev = VIRTIO_DEVICE(s); 839 BlockDriverState *bs = blk_bs(s->blk); 840 int64_t offset = virtio_ldq_p(vdev, &req->out.sector) << BDRV_SECTOR_BITS; 841 uint64_t len; 842 uint64_t capacity = bs->total_sectors << BDRV_SECTOR_BITS; 843 uint8_t err_status = VIRTIO_BLK_S_OK; 844 845 uint32_t type = virtio_ldl_p(vdev, &req->out.type); 846 if (type == VIRTIO_BLK_T_ZONE_RESET_ALL) { 847 /* Entire drive capacity */ 848 offset = 0; 849 len = capacity; 850 trace_virtio_blk_handle_zone_reset_all(vdev, req, 0, 851 bs->total_sectors); 852 } else { 853 if (bs->bl.zone_size > capacity - offset) { 854 /* The zoned device allows the last smaller zone. */ 855 len = capacity - bs->bl.zone_size * (bs->bl.nr_zones - 1); 856 } else { 857 len = bs->bl.zone_size; 858 } 859 trace_virtio_blk_handle_zone_mgmt(vdev, req, op, 860 offset >> BDRV_SECTOR_BITS, 861 len >> BDRV_SECTOR_BITS); 862 } 863 864 if (!check_zoned_request(s, offset, len, false, &err_status)) { 865 goto out; 866 } 867 868 blk_aio_zone_mgmt(s->blk, op, offset, len, 869 virtio_blk_zone_mgmt_complete, req); 870 871 return 0; 872 out: 873 virtio_blk_req_complete(req, err_status); 874 virtio_blk_free_request(req); 875 return err_status; 876 } 877 878 static void virtio_blk_zone_append_complete(void *opaque, int ret) 879 { 880 ZoneCmdData *data = opaque; 881 VirtIOBlockReq *req = data->req; 882 VirtIOBlock *s = req->dev; 883 VirtIODevice *vdev = VIRTIO_DEVICE(req->dev); 884 int64_t append_sector, n; 885 uint8_t err_status = VIRTIO_BLK_S_OK; 886 887 if (ret) { 888 err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD; 889 goto out; 890 } 891 892 virtio_stq_p(vdev, &append_sector, 893 data->zone_append_data.offset >> BDRV_SECTOR_BITS); 894 n = iov_from_buf(data->in_iov, data->in_num, 0, &append_sector, 895 sizeof(append_sector)); 896 if (n != sizeof(append_sector)) { 897 virtio_error(vdev, "Driver provided input buffer less than size of " 898 "append_sector"); 899 err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD; 900 goto out; 901 } 902 trace_virtio_blk_zone_append_complete(vdev, req, append_sector, ret); 903 904 out: 905 aio_context_acquire(blk_get_aio_context(s->conf.conf.blk)); 906 virtio_blk_req_complete(req, err_status); 907 virtio_blk_free_request(req); 908 aio_context_release(blk_get_aio_context(s->conf.conf.blk)); 909 g_free(data); 910 } 911 912 static int virtio_blk_handle_zone_append(VirtIOBlockReq *req, 913 struct iovec *out_iov, 914 struct iovec *in_iov, 915 uint64_t out_num, 916 unsigned in_num) { 917 VirtIOBlock *s = req->dev; 918 VirtIODevice *vdev = VIRTIO_DEVICE(s); 919 uint8_t err_status = VIRTIO_BLK_S_OK; 920 921 int64_t offset = virtio_ldq_p(vdev, &req->out.sector) << BDRV_SECTOR_BITS; 922 int64_t len = iov_size(out_iov, out_num); 923 924 trace_virtio_blk_handle_zone_append(vdev, req, offset >> BDRV_SECTOR_BITS); 925 if (!check_zoned_request(s, offset, len, true, &err_status)) { 926 goto out; 927 } 928 929 ZoneCmdData *data = g_malloc(sizeof(ZoneCmdData)); 930 data->req = req; 931 data->in_iov = in_iov; 932 data->in_num = in_num; 933 data->zone_append_data.offset = offset; 934 qemu_iovec_init_external(&req->qiov, out_iov, out_num); 935 936 block_acct_start(blk_get_stats(s->blk), &req->acct, len, 937 BLOCK_ACCT_ZONE_APPEND); 938 939 blk_aio_zone_append(s->blk, &data->zone_append_data.offset, &req->qiov, 0, 940 virtio_blk_zone_append_complete, data); 941 return 0; 942 943 out: 944 aio_context_acquire(blk_get_aio_context(s->conf.conf.blk)); 945 virtio_blk_req_complete(req, err_status); 946 virtio_blk_free_request(req); 947 aio_context_release(blk_get_aio_context(s->conf.conf.blk)); 948 return err_status; 949 } 950 951 static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb) 952 { 953 uint32_t type; 954 struct iovec *in_iov = req->elem.in_sg; 955 struct iovec *out_iov = req->elem.out_sg; 956 unsigned in_num = req->elem.in_num; 957 unsigned out_num = req->elem.out_num; 958 VirtIOBlock *s = req->dev; 959 VirtIODevice *vdev = VIRTIO_DEVICE(s); 960 961 if (req->elem.out_num < 1 || req->elem.in_num < 1) { 962 virtio_error(vdev, "virtio-blk missing headers"); 963 return -1; 964 } 965 966 if (unlikely(iov_to_buf(out_iov, out_num, 0, &req->out, 967 sizeof(req->out)) != sizeof(req->out))) { 968 virtio_error(vdev, "virtio-blk request outhdr too short"); 969 return -1; 970 } 971 972 iov_discard_front_undoable(&out_iov, &out_num, sizeof(req->out), 973 &req->outhdr_undo); 974 975 if (in_iov[in_num - 1].iov_len < sizeof(struct virtio_blk_inhdr)) { 976 virtio_error(vdev, "virtio-blk request inhdr too short"); 977 iov_discard_undo(&req->outhdr_undo); 978 return -1; 979 } 980 981 /* We always touch the last byte, so just see how big in_iov is. */ 982 req->in_len = iov_size(in_iov, in_num); 983 req->in = (void *)in_iov[in_num - 1].iov_base 984 + in_iov[in_num - 1].iov_len 985 - sizeof(struct virtio_blk_inhdr); 986 iov_discard_back_undoable(in_iov, &in_num, sizeof(struct virtio_blk_inhdr), 987 &req->inhdr_undo); 988 989 type = virtio_ldl_p(vdev, &req->out.type); 990 991 /* VIRTIO_BLK_T_OUT defines the command direction. VIRTIO_BLK_T_BARRIER 992 * is an optional flag. Although a guest should not send this flag if 993 * not negotiated we ignored it in the past. So keep ignoring it. */ 994 switch (type & ~(VIRTIO_BLK_T_OUT | VIRTIO_BLK_T_BARRIER)) { 995 case VIRTIO_BLK_T_IN: 996 { 997 bool is_write = type & VIRTIO_BLK_T_OUT; 998 req->sector_num = virtio_ldq_p(vdev, &req->out.sector); 999 1000 if (is_write) { 1001 qemu_iovec_init_external(&req->qiov, out_iov, out_num); 1002 trace_virtio_blk_handle_write(vdev, req, req->sector_num, 1003 req->qiov.size / BDRV_SECTOR_SIZE); 1004 } else { 1005 qemu_iovec_init_external(&req->qiov, in_iov, in_num); 1006 trace_virtio_blk_handle_read(vdev, req, req->sector_num, 1007 req->qiov.size / BDRV_SECTOR_SIZE); 1008 } 1009 1010 if (!virtio_blk_sect_range_ok(s, req->sector_num, req->qiov.size)) { 1011 virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR); 1012 block_acct_invalid(blk_get_stats(s->blk), 1013 is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ); 1014 virtio_blk_free_request(req); 1015 return 0; 1016 } 1017 1018 block_acct_start(blk_get_stats(s->blk), &req->acct, req->qiov.size, 1019 is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ); 1020 1021 /* merge would exceed maximum number of requests or IO direction 1022 * changes */ 1023 if (mrb->num_reqs > 0 && (mrb->num_reqs == VIRTIO_BLK_MAX_MERGE_REQS || 1024 is_write != mrb->is_write || 1025 !s->conf.request_merging)) { 1026 virtio_blk_submit_multireq(s, mrb); 1027 } 1028 1029 assert(mrb->num_reqs < VIRTIO_BLK_MAX_MERGE_REQS); 1030 mrb->reqs[mrb->num_reqs++] = req; 1031 mrb->is_write = is_write; 1032 break; 1033 } 1034 case VIRTIO_BLK_T_FLUSH: 1035 virtio_blk_handle_flush(req, mrb); 1036 break; 1037 case VIRTIO_BLK_T_ZONE_REPORT: 1038 virtio_blk_handle_zone_report(req, in_iov, in_num); 1039 break; 1040 case VIRTIO_BLK_T_ZONE_OPEN: 1041 virtio_blk_handle_zone_mgmt(req, BLK_ZO_OPEN); 1042 break; 1043 case VIRTIO_BLK_T_ZONE_CLOSE: 1044 virtio_blk_handle_zone_mgmt(req, BLK_ZO_CLOSE); 1045 break; 1046 case VIRTIO_BLK_T_ZONE_FINISH: 1047 virtio_blk_handle_zone_mgmt(req, BLK_ZO_FINISH); 1048 break; 1049 case VIRTIO_BLK_T_ZONE_RESET: 1050 virtio_blk_handle_zone_mgmt(req, BLK_ZO_RESET); 1051 break; 1052 case VIRTIO_BLK_T_ZONE_RESET_ALL: 1053 virtio_blk_handle_zone_mgmt(req, BLK_ZO_RESET); 1054 break; 1055 case VIRTIO_BLK_T_SCSI_CMD: 1056 virtio_blk_handle_scsi(req); 1057 break; 1058 case VIRTIO_BLK_T_GET_ID: 1059 { 1060 /* 1061 * NB: per existing s/n string convention the string is 1062 * terminated by '\0' only when shorter than buffer. 1063 */ 1064 const char *serial = s->conf.serial ? s->conf.serial : ""; 1065 size_t size = MIN(strlen(serial) + 1, 1066 MIN(iov_size(in_iov, in_num), 1067 VIRTIO_BLK_ID_BYTES)); 1068 iov_from_buf(in_iov, in_num, 0, serial, size); 1069 virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); 1070 virtio_blk_free_request(req); 1071 break; 1072 } 1073 case VIRTIO_BLK_T_ZONE_APPEND & ~VIRTIO_BLK_T_OUT: 1074 /* 1075 * Passing out_iov/out_num and in_iov/in_num is not safe 1076 * to access req->elem.out_sg directly because it may be 1077 * modified by virtio_blk_handle_request(). 1078 */ 1079 virtio_blk_handle_zone_append(req, out_iov, in_iov, out_num, in_num); 1080 break; 1081 /* 1082 * VIRTIO_BLK_T_DISCARD and VIRTIO_BLK_T_WRITE_ZEROES are defined with 1083 * VIRTIO_BLK_T_OUT flag set. We masked this flag in the switch statement, 1084 * so we must mask it for these requests, then we will check if it is set. 1085 */ 1086 case VIRTIO_BLK_T_DISCARD & ~VIRTIO_BLK_T_OUT: 1087 case VIRTIO_BLK_T_WRITE_ZEROES & ~VIRTIO_BLK_T_OUT: 1088 { 1089 struct virtio_blk_discard_write_zeroes dwz_hdr; 1090 size_t out_len = iov_size(out_iov, out_num); 1091 bool is_write_zeroes = (type & ~VIRTIO_BLK_T_BARRIER) == 1092 VIRTIO_BLK_T_WRITE_ZEROES; 1093 uint8_t err_status; 1094 1095 /* 1096 * Unsupported if VIRTIO_BLK_T_OUT is not set or the request contains 1097 * more than one segment. 1098 */ 1099 if (unlikely(!(type & VIRTIO_BLK_T_OUT) || 1100 out_len > sizeof(dwz_hdr))) { 1101 virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP); 1102 virtio_blk_free_request(req); 1103 return 0; 1104 } 1105 1106 if (unlikely(iov_to_buf(out_iov, out_num, 0, &dwz_hdr, 1107 sizeof(dwz_hdr)) != sizeof(dwz_hdr))) { 1108 iov_discard_undo(&req->inhdr_undo); 1109 iov_discard_undo(&req->outhdr_undo); 1110 virtio_error(vdev, "virtio-blk discard/write_zeroes header" 1111 " too short"); 1112 return -1; 1113 } 1114 1115 err_status = virtio_blk_handle_discard_write_zeroes(req, &dwz_hdr, 1116 is_write_zeroes); 1117 if (err_status != VIRTIO_BLK_S_OK) { 1118 virtio_blk_req_complete(req, err_status); 1119 virtio_blk_free_request(req); 1120 } 1121 1122 break; 1123 } 1124 default: 1125 virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP); 1126 virtio_blk_free_request(req); 1127 } 1128 return 0; 1129 } 1130 1131 void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq) 1132 { 1133 VirtIOBlockReq *req; 1134 MultiReqBuffer mrb = {}; 1135 bool suppress_notifications = virtio_queue_get_notification(vq); 1136 1137 aio_context_acquire(blk_get_aio_context(s->blk)); 1138 defer_call_begin(); 1139 1140 do { 1141 if (suppress_notifications) { 1142 virtio_queue_set_notification(vq, 0); 1143 } 1144 1145 while ((req = virtio_blk_get_request(s, vq))) { 1146 if (virtio_blk_handle_request(req, &mrb)) { 1147 virtqueue_detach_element(req->vq, &req->elem, 0); 1148 virtio_blk_free_request(req); 1149 break; 1150 } 1151 } 1152 1153 if (suppress_notifications) { 1154 virtio_queue_set_notification(vq, 1); 1155 } 1156 } while (!virtio_queue_empty(vq)); 1157 1158 if (mrb.num_reqs) { 1159 virtio_blk_submit_multireq(s, &mrb); 1160 } 1161 1162 defer_call_end(); 1163 aio_context_release(blk_get_aio_context(s->blk)); 1164 } 1165 1166 static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq) 1167 { 1168 VirtIOBlock *s = (VirtIOBlock *)vdev; 1169 1170 if (s->dataplane && !s->dataplane_started) { 1171 /* Some guests kick before setting VIRTIO_CONFIG_S_DRIVER_OK so start 1172 * dataplane here instead of waiting for .set_status(). 1173 */ 1174 virtio_device_start_ioeventfd(vdev); 1175 if (!s->dataplane_disabled) { 1176 return; 1177 } 1178 } 1179 virtio_blk_handle_vq(s, vq); 1180 } 1181 1182 static void virtio_blk_dma_restart_bh(void *opaque) 1183 { 1184 VirtIOBlock *s = opaque; 1185 1186 VirtIOBlockReq *req = s->rq; 1187 MultiReqBuffer mrb = {}; 1188 1189 s->rq = NULL; 1190 1191 aio_context_acquire(blk_get_aio_context(s->conf.conf.blk)); 1192 while (req) { 1193 VirtIOBlockReq *next = req->next; 1194 if (virtio_blk_handle_request(req, &mrb)) { 1195 /* Device is now broken and won't do any processing until it gets 1196 * reset. Already queued requests will be lost: let's purge them. 1197 */ 1198 while (req) { 1199 next = req->next; 1200 virtqueue_detach_element(req->vq, &req->elem, 0); 1201 virtio_blk_free_request(req); 1202 req = next; 1203 } 1204 break; 1205 } 1206 req = next; 1207 } 1208 1209 if (mrb.num_reqs) { 1210 virtio_blk_submit_multireq(s, &mrb); 1211 } 1212 1213 /* Paired with inc in virtio_blk_dma_restart_cb() */ 1214 blk_dec_in_flight(s->conf.conf.blk); 1215 1216 aio_context_release(blk_get_aio_context(s->conf.conf.blk)); 1217 } 1218 1219 static void virtio_blk_dma_restart_cb(void *opaque, bool running, 1220 RunState state) 1221 { 1222 VirtIOBlock *s = opaque; 1223 1224 if (!running) { 1225 return; 1226 } 1227 1228 /* Paired with dec in virtio_blk_dma_restart_bh() */ 1229 blk_inc_in_flight(s->conf.conf.blk); 1230 1231 aio_bh_schedule_oneshot(blk_get_aio_context(s->conf.conf.blk), 1232 virtio_blk_dma_restart_bh, s); 1233 } 1234 1235 static void virtio_blk_reset(VirtIODevice *vdev) 1236 { 1237 VirtIOBlock *s = VIRTIO_BLK(vdev); 1238 AioContext *ctx; 1239 VirtIOBlockReq *req; 1240 1241 ctx = blk_get_aio_context(s->blk); 1242 aio_context_acquire(ctx); 1243 blk_drain(s->blk); 1244 1245 /* We drop queued requests after blk_drain() because blk_drain() itself can 1246 * produce them. */ 1247 while (s->rq) { 1248 req = s->rq; 1249 s->rq = req->next; 1250 virtqueue_detach_element(req->vq, &req->elem, 0); 1251 virtio_blk_free_request(req); 1252 } 1253 1254 aio_context_release(ctx); 1255 1256 assert(!s->dataplane_started); 1257 blk_set_enable_write_cache(s->blk, s->original_wce); 1258 } 1259 1260 /* coalesce internal state, copy to pci i/o region 0 1261 */ 1262 static void virtio_blk_update_config(VirtIODevice *vdev, uint8_t *config) 1263 { 1264 VirtIOBlock *s = VIRTIO_BLK(vdev); 1265 BlockConf *conf = &s->conf.conf; 1266 BlockDriverState *bs = blk_bs(s->blk); 1267 struct virtio_blk_config blkcfg; 1268 uint64_t capacity; 1269 int64_t length; 1270 int blk_size = conf->logical_block_size; 1271 AioContext *ctx; 1272 1273 ctx = blk_get_aio_context(s->blk); 1274 aio_context_acquire(ctx); 1275 1276 blk_get_geometry(s->blk, &capacity); 1277 memset(&blkcfg, 0, sizeof(blkcfg)); 1278 virtio_stq_p(vdev, &blkcfg.capacity, capacity); 1279 virtio_stl_p(vdev, &blkcfg.seg_max, 1280 s->conf.seg_max_adjust ? s->conf.queue_size - 2 : 128 - 2); 1281 virtio_stw_p(vdev, &blkcfg.geometry.cylinders, conf->cyls); 1282 virtio_stl_p(vdev, &blkcfg.blk_size, blk_size); 1283 virtio_stw_p(vdev, &blkcfg.min_io_size, conf->min_io_size / blk_size); 1284 virtio_stl_p(vdev, &blkcfg.opt_io_size, conf->opt_io_size / blk_size); 1285 blkcfg.geometry.heads = conf->heads; 1286 /* 1287 * We must ensure that the block device capacity is a multiple of 1288 * the logical block size. If that is not the case, let's use 1289 * sector_mask to adopt the geometry to have a correct picture. 1290 * For those devices where the capacity is ok for the given geometry 1291 * we don't touch the sector value of the geometry, since some devices 1292 * (like s390 dasd) need a specific value. Here the capacity is already 1293 * cyls*heads*secs*blk_size and the sector value is not block size 1294 * divided by 512 - instead it is the amount of blk_size blocks 1295 * per track (cylinder). 1296 */ 1297 length = blk_getlength(s->blk); 1298 aio_context_release(ctx); 1299 if (length > 0 && length / conf->heads / conf->secs % blk_size) { 1300 blkcfg.geometry.sectors = conf->secs & ~s->sector_mask; 1301 } else { 1302 blkcfg.geometry.sectors = conf->secs; 1303 } 1304 blkcfg.size_max = 0; 1305 blkcfg.physical_block_exp = get_physical_block_exp(conf); 1306 blkcfg.alignment_offset = 0; 1307 blkcfg.wce = blk_enable_write_cache(s->blk); 1308 virtio_stw_p(vdev, &blkcfg.num_queues, s->conf.num_queues); 1309 if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_DISCARD)) { 1310 uint32_t discard_granularity = conf->discard_granularity; 1311 if (discard_granularity == -1 || !s->conf.report_discard_granularity) { 1312 discard_granularity = blk_size; 1313 } 1314 virtio_stl_p(vdev, &blkcfg.max_discard_sectors, 1315 s->conf.max_discard_sectors); 1316 virtio_stl_p(vdev, &blkcfg.discard_sector_alignment, 1317 discard_granularity >> BDRV_SECTOR_BITS); 1318 /* 1319 * We support only one segment per request since multiple segments 1320 * are not widely used and there are no userspace APIs that allow 1321 * applications to submit multiple segments in a single call. 1322 */ 1323 virtio_stl_p(vdev, &blkcfg.max_discard_seg, 1); 1324 } 1325 if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_WRITE_ZEROES)) { 1326 virtio_stl_p(vdev, &blkcfg.max_write_zeroes_sectors, 1327 s->conf.max_write_zeroes_sectors); 1328 blkcfg.write_zeroes_may_unmap = 1; 1329 virtio_stl_p(vdev, &blkcfg.max_write_zeroes_seg, 1); 1330 } 1331 if (bs->bl.zoned != BLK_Z_NONE) { 1332 switch (bs->bl.zoned) { 1333 case BLK_Z_HM: 1334 blkcfg.zoned.model = VIRTIO_BLK_Z_HM; 1335 break; 1336 case BLK_Z_HA: 1337 blkcfg.zoned.model = VIRTIO_BLK_Z_HA; 1338 break; 1339 default: 1340 g_assert_not_reached(); 1341 } 1342 1343 virtio_stl_p(vdev, &blkcfg.zoned.zone_sectors, 1344 bs->bl.zone_size / 512); 1345 virtio_stl_p(vdev, &blkcfg.zoned.max_active_zones, 1346 bs->bl.max_active_zones); 1347 virtio_stl_p(vdev, &blkcfg.zoned.max_open_zones, 1348 bs->bl.max_open_zones); 1349 virtio_stl_p(vdev, &blkcfg.zoned.write_granularity, blk_size); 1350 virtio_stl_p(vdev, &blkcfg.zoned.max_append_sectors, 1351 bs->bl.max_append_sectors); 1352 } else { 1353 blkcfg.zoned.model = VIRTIO_BLK_Z_NONE; 1354 } 1355 memcpy(config, &blkcfg, s->config_size); 1356 } 1357 1358 static void virtio_blk_set_config(VirtIODevice *vdev, const uint8_t *config) 1359 { 1360 VirtIOBlock *s = VIRTIO_BLK(vdev); 1361 struct virtio_blk_config blkcfg; 1362 1363 memcpy(&blkcfg, config, s->config_size); 1364 1365 aio_context_acquire(blk_get_aio_context(s->blk)); 1366 blk_set_enable_write_cache(s->blk, blkcfg.wce != 0); 1367 aio_context_release(blk_get_aio_context(s->blk)); 1368 } 1369 1370 static uint64_t virtio_blk_get_features(VirtIODevice *vdev, uint64_t features, 1371 Error **errp) 1372 { 1373 VirtIOBlock *s = VIRTIO_BLK(vdev); 1374 1375 /* Firstly sync all virtio-blk possible supported features */ 1376 features |= s->host_features; 1377 1378 virtio_add_feature(&features, VIRTIO_BLK_F_SEG_MAX); 1379 virtio_add_feature(&features, VIRTIO_BLK_F_GEOMETRY); 1380 virtio_add_feature(&features, VIRTIO_BLK_F_TOPOLOGY); 1381 virtio_add_feature(&features, VIRTIO_BLK_F_BLK_SIZE); 1382 if (virtio_has_feature(features, VIRTIO_F_VERSION_1)) { 1383 if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_SCSI)) { 1384 error_setg(errp, "Please set scsi=off for virtio-blk devices in order to use virtio 1.0"); 1385 return 0; 1386 } 1387 } else { 1388 virtio_clear_feature(&features, VIRTIO_F_ANY_LAYOUT); 1389 virtio_add_feature(&features, VIRTIO_BLK_F_SCSI); 1390 } 1391 1392 if (blk_enable_write_cache(s->blk) || 1393 (s->conf.x_enable_wce_if_config_wce && 1394 virtio_has_feature(features, VIRTIO_BLK_F_CONFIG_WCE))) { 1395 virtio_add_feature(&features, VIRTIO_BLK_F_WCE); 1396 } 1397 if (!blk_is_writable(s->blk)) { 1398 virtio_add_feature(&features, VIRTIO_BLK_F_RO); 1399 } 1400 if (s->conf.num_queues > 1) { 1401 virtio_add_feature(&features, VIRTIO_BLK_F_MQ); 1402 } 1403 1404 return features; 1405 } 1406 1407 static void virtio_blk_set_status(VirtIODevice *vdev, uint8_t status) 1408 { 1409 VirtIOBlock *s = VIRTIO_BLK(vdev); 1410 1411 if (!(status & (VIRTIO_CONFIG_S_DRIVER | VIRTIO_CONFIG_S_DRIVER_OK))) { 1412 assert(!s->dataplane_started); 1413 } 1414 1415 if (!(status & VIRTIO_CONFIG_S_DRIVER_OK)) { 1416 return; 1417 } 1418 1419 /* A guest that supports VIRTIO_BLK_F_CONFIG_WCE must be able to send 1420 * cache flushes. Thus, the "auto writethrough" behavior is never 1421 * necessary for guests that support the VIRTIO_BLK_F_CONFIG_WCE feature. 1422 * Leaving it enabled would break the following sequence: 1423 * 1424 * Guest started with "-drive cache=writethrough" 1425 * Guest sets status to 0 1426 * Guest sets DRIVER bit in status field 1427 * Guest reads host features (WCE=0, CONFIG_WCE=1) 1428 * Guest writes guest features (WCE=0, CONFIG_WCE=1) 1429 * Guest writes 1 to the WCE configuration field (writeback mode) 1430 * Guest sets DRIVER_OK bit in status field 1431 * 1432 * s->blk would erroneously be placed in writethrough mode. 1433 */ 1434 if (!virtio_vdev_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE)) { 1435 aio_context_acquire(blk_get_aio_context(s->blk)); 1436 blk_set_enable_write_cache(s->blk, 1437 virtio_vdev_has_feature(vdev, 1438 VIRTIO_BLK_F_WCE)); 1439 aio_context_release(blk_get_aio_context(s->blk)); 1440 } 1441 } 1442 1443 static void virtio_blk_save_device(VirtIODevice *vdev, QEMUFile *f) 1444 { 1445 VirtIOBlock *s = VIRTIO_BLK(vdev); 1446 VirtIOBlockReq *req = s->rq; 1447 1448 while (req) { 1449 qemu_put_sbyte(f, 1); 1450 1451 if (s->conf.num_queues > 1) { 1452 qemu_put_be32(f, virtio_get_queue_index(req->vq)); 1453 } 1454 1455 qemu_put_virtqueue_element(vdev, f, &req->elem); 1456 req = req->next; 1457 } 1458 qemu_put_sbyte(f, 0); 1459 } 1460 1461 static int virtio_blk_load_device(VirtIODevice *vdev, QEMUFile *f, 1462 int version_id) 1463 { 1464 VirtIOBlock *s = VIRTIO_BLK(vdev); 1465 1466 while (qemu_get_sbyte(f)) { 1467 unsigned nvqs = s->conf.num_queues; 1468 unsigned vq_idx = 0; 1469 VirtIOBlockReq *req; 1470 1471 if (nvqs > 1) { 1472 vq_idx = qemu_get_be32(f); 1473 1474 if (vq_idx >= nvqs) { 1475 error_report("Invalid virtqueue index in request list: %#x", 1476 vq_idx); 1477 return -EINVAL; 1478 } 1479 } 1480 1481 req = qemu_get_virtqueue_element(vdev, f, sizeof(VirtIOBlockReq)); 1482 virtio_blk_init_request(s, virtio_get_queue(vdev, vq_idx), req); 1483 req->next = s->rq; 1484 s->rq = req; 1485 } 1486 1487 return 0; 1488 } 1489 1490 static void virtio_resize_cb(void *opaque) 1491 { 1492 VirtIODevice *vdev = opaque; 1493 1494 assert(qemu_get_current_aio_context() == qemu_get_aio_context()); 1495 virtio_notify_config(vdev); 1496 } 1497 1498 static void virtio_blk_resize(void *opaque) 1499 { 1500 VirtIODevice *vdev = VIRTIO_DEVICE(opaque); 1501 1502 /* 1503 * virtio_notify_config() needs to acquire the global mutex, 1504 * so it can't be called from an iothread. Instead, schedule 1505 * it to be run in the main context BH. 1506 */ 1507 aio_bh_schedule_oneshot(qemu_get_aio_context(), virtio_resize_cb, vdev); 1508 } 1509 1510 /* Suspend virtqueue ioeventfd processing during drain */ 1511 static void virtio_blk_drained_begin(void *opaque) 1512 { 1513 VirtIOBlock *s = opaque; 1514 VirtIODevice *vdev = VIRTIO_DEVICE(opaque); 1515 AioContext *ctx = blk_get_aio_context(s->conf.conf.blk); 1516 1517 if (!s->dataplane || !s->dataplane_started) { 1518 return; 1519 } 1520 1521 for (uint16_t i = 0; i < s->conf.num_queues; i++) { 1522 VirtQueue *vq = virtio_get_queue(vdev, i); 1523 virtio_queue_aio_detach_host_notifier(vq, ctx); 1524 } 1525 } 1526 1527 /* Resume virtqueue ioeventfd processing after drain */ 1528 static void virtio_blk_drained_end(void *opaque) 1529 { 1530 VirtIOBlock *s = opaque; 1531 VirtIODevice *vdev = VIRTIO_DEVICE(opaque); 1532 AioContext *ctx = blk_get_aio_context(s->conf.conf.blk); 1533 1534 if (!s->dataplane || !s->dataplane_started) { 1535 return; 1536 } 1537 1538 for (uint16_t i = 0; i < s->conf.num_queues; i++) { 1539 VirtQueue *vq = virtio_get_queue(vdev, i); 1540 virtio_queue_aio_attach_host_notifier(vq, ctx); 1541 } 1542 } 1543 1544 static const BlockDevOps virtio_block_ops = { 1545 .resize_cb = virtio_blk_resize, 1546 .drained_begin = virtio_blk_drained_begin, 1547 .drained_end = virtio_blk_drained_end, 1548 }; 1549 1550 static void virtio_blk_device_realize(DeviceState *dev, Error **errp) 1551 { 1552 VirtIODevice *vdev = VIRTIO_DEVICE(dev); 1553 VirtIOBlock *s = VIRTIO_BLK(dev); 1554 VirtIOBlkConf *conf = &s->conf; 1555 Error *err = NULL; 1556 unsigned i; 1557 1558 if (!conf->conf.blk) { 1559 error_setg(errp, "drive property not set"); 1560 return; 1561 } 1562 if (!blk_is_inserted(conf->conf.blk)) { 1563 error_setg(errp, "Device needs media, but drive is empty"); 1564 return; 1565 } 1566 if (conf->num_queues == VIRTIO_BLK_AUTO_NUM_QUEUES) { 1567 conf->num_queues = 1; 1568 } 1569 if (!conf->num_queues) { 1570 error_setg(errp, "num-queues property must be larger than 0"); 1571 return; 1572 } 1573 if (conf->queue_size <= 2) { 1574 error_setg(errp, "invalid queue-size property (%" PRIu16 "), " 1575 "must be > 2", conf->queue_size); 1576 return; 1577 } 1578 if (!is_power_of_2(conf->queue_size) || 1579 conf->queue_size > VIRTQUEUE_MAX_SIZE) { 1580 error_setg(errp, "invalid queue-size property (%" PRIu16 "), " 1581 "must be a power of 2 (max %d)", 1582 conf->queue_size, VIRTQUEUE_MAX_SIZE); 1583 return; 1584 } 1585 1586 if (!blkconf_apply_backend_options(&conf->conf, 1587 !blk_supports_write_perm(conf->conf.blk), 1588 true, errp)) { 1589 return; 1590 } 1591 s->original_wce = blk_enable_write_cache(conf->conf.blk); 1592 if (!blkconf_geometry(&conf->conf, NULL, 65535, 255, 255, errp)) { 1593 return; 1594 } 1595 1596 if (!blkconf_blocksizes(&conf->conf, errp)) { 1597 return; 1598 } 1599 1600 BlockDriverState *bs = blk_bs(conf->conf.blk); 1601 if (bs->bl.zoned != BLK_Z_NONE) { 1602 virtio_add_feature(&s->host_features, VIRTIO_BLK_F_ZONED); 1603 if (bs->bl.zoned == BLK_Z_HM) { 1604 virtio_clear_feature(&s->host_features, VIRTIO_BLK_F_DISCARD); 1605 } 1606 } 1607 1608 if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_DISCARD) && 1609 (!conf->max_discard_sectors || 1610 conf->max_discard_sectors > BDRV_REQUEST_MAX_SECTORS)) { 1611 error_setg(errp, "invalid max-discard-sectors property (%" PRIu32 ")" 1612 ", must be between 1 and %d", 1613 conf->max_discard_sectors, (int)BDRV_REQUEST_MAX_SECTORS); 1614 return; 1615 } 1616 1617 if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_WRITE_ZEROES) && 1618 (!conf->max_write_zeroes_sectors || 1619 conf->max_write_zeroes_sectors > BDRV_REQUEST_MAX_SECTORS)) { 1620 error_setg(errp, "invalid max-write-zeroes-sectors property (%" PRIu32 1621 "), must be between 1 and %d", 1622 conf->max_write_zeroes_sectors, 1623 (int)BDRV_REQUEST_MAX_SECTORS); 1624 return; 1625 } 1626 1627 s->config_size = virtio_get_config_size(&virtio_blk_cfg_size_params, 1628 s->host_features); 1629 virtio_init(vdev, VIRTIO_ID_BLOCK, s->config_size); 1630 1631 s->blk = conf->conf.blk; 1632 s->rq = NULL; 1633 s->sector_mask = (s->conf.conf.logical_block_size / BDRV_SECTOR_SIZE) - 1; 1634 1635 for (i = 0; i < conf->num_queues; i++) { 1636 virtio_add_queue(vdev, conf->queue_size, virtio_blk_handle_output); 1637 } 1638 qemu_coroutine_inc_pool_size(conf->num_queues * conf->queue_size / 2); 1639 virtio_blk_data_plane_create(vdev, conf, &s->dataplane, &err); 1640 if (err != NULL) { 1641 error_propagate(errp, err); 1642 for (i = 0; i < conf->num_queues; i++) { 1643 virtio_del_queue(vdev, i); 1644 } 1645 virtio_cleanup(vdev); 1646 return; 1647 } 1648 1649 /* 1650 * This must be after virtio_init() so virtio_blk_dma_restart_cb() gets 1651 * called after ->start_ioeventfd() has already set blk's AioContext. 1652 */ 1653 s->change = 1654 qdev_add_vm_change_state_handler(dev, virtio_blk_dma_restart_cb, s); 1655 1656 blk_ram_registrar_init(&s->blk_ram_registrar, s->blk); 1657 blk_set_dev_ops(s->blk, &virtio_block_ops, s); 1658 1659 blk_iostatus_enable(s->blk); 1660 1661 add_boot_device_lchs(dev, "/disk@0,0", 1662 conf->conf.lcyls, 1663 conf->conf.lheads, 1664 conf->conf.lsecs); 1665 } 1666 1667 static void virtio_blk_device_unrealize(DeviceState *dev) 1668 { 1669 VirtIODevice *vdev = VIRTIO_DEVICE(dev); 1670 VirtIOBlock *s = VIRTIO_BLK(dev); 1671 VirtIOBlkConf *conf = &s->conf; 1672 unsigned i; 1673 1674 blk_drain(s->blk); 1675 del_boot_device_lchs(dev, "/disk@0,0"); 1676 virtio_blk_data_plane_destroy(s->dataplane); 1677 s->dataplane = NULL; 1678 for (i = 0; i < conf->num_queues; i++) { 1679 virtio_del_queue(vdev, i); 1680 } 1681 qemu_coroutine_dec_pool_size(conf->num_queues * conf->queue_size / 2); 1682 blk_ram_registrar_destroy(&s->blk_ram_registrar); 1683 qemu_del_vm_change_state_handler(s->change); 1684 blockdev_mark_auto_del(s->blk); 1685 virtio_cleanup(vdev); 1686 } 1687 1688 static void virtio_blk_instance_init(Object *obj) 1689 { 1690 VirtIOBlock *s = VIRTIO_BLK(obj); 1691 1692 device_add_bootindex_property(obj, &s->conf.conf.bootindex, 1693 "bootindex", "/disk@0,0", 1694 DEVICE(obj)); 1695 } 1696 1697 static const VMStateDescription vmstate_virtio_blk = { 1698 .name = "virtio-blk", 1699 .minimum_version_id = 2, 1700 .version_id = 2, 1701 .fields = (VMStateField[]) { 1702 VMSTATE_VIRTIO_DEVICE, 1703 VMSTATE_END_OF_LIST() 1704 }, 1705 }; 1706 1707 static Property virtio_blk_properties[] = { 1708 DEFINE_BLOCK_PROPERTIES(VirtIOBlock, conf.conf), 1709 DEFINE_BLOCK_ERROR_PROPERTIES(VirtIOBlock, conf.conf), 1710 DEFINE_BLOCK_CHS_PROPERTIES(VirtIOBlock, conf.conf), 1711 DEFINE_PROP_STRING("serial", VirtIOBlock, conf.serial), 1712 DEFINE_PROP_BIT64("config-wce", VirtIOBlock, host_features, 1713 VIRTIO_BLK_F_CONFIG_WCE, true), 1714 #ifdef __linux__ 1715 DEFINE_PROP_BIT64("scsi", VirtIOBlock, host_features, 1716 VIRTIO_BLK_F_SCSI, false), 1717 #endif 1718 DEFINE_PROP_BIT("request-merging", VirtIOBlock, conf.request_merging, 0, 1719 true), 1720 DEFINE_PROP_UINT16("num-queues", VirtIOBlock, conf.num_queues, 1721 VIRTIO_BLK_AUTO_NUM_QUEUES), 1722 DEFINE_PROP_UINT16("queue-size", VirtIOBlock, conf.queue_size, 256), 1723 DEFINE_PROP_BOOL("seg-max-adjust", VirtIOBlock, conf.seg_max_adjust, true), 1724 DEFINE_PROP_LINK("iothread", VirtIOBlock, conf.iothread, TYPE_IOTHREAD, 1725 IOThread *), 1726 DEFINE_PROP_BIT64("discard", VirtIOBlock, host_features, 1727 VIRTIO_BLK_F_DISCARD, true), 1728 DEFINE_PROP_BOOL("report-discard-granularity", VirtIOBlock, 1729 conf.report_discard_granularity, true), 1730 DEFINE_PROP_BIT64("write-zeroes", VirtIOBlock, host_features, 1731 VIRTIO_BLK_F_WRITE_ZEROES, true), 1732 DEFINE_PROP_UINT32("max-discard-sectors", VirtIOBlock, 1733 conf.max_discard_sectors, BDRV_REQUEST_MAX_SECTORS), 1734 DEFINE_PROP_UINT32("max-write-zeroes-sectors", VirtIOBlock, 1735 conf.max_write_zeroes_sectors, BDRV_REQUEST_MAX_SECTORS), 1736 DEFINE_PROP_BOOL("x-enable-wce-if-config-wce", VirtIOBlock, 1737 conf.x_enable_wce_if_config_wce, true), 1738 DEFINE_PROP_END_OF_LIST(), 1739 }; 1740 1741 static void virtio_blk_class_init(ObjectClass *klass, void *data) 1742 { 1743 DeviceClass *dc = DEVICE_CLASS(klass); 1744 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); 1745 1746 device_class_set_props(dc, virtio_blk_properties); 1747 dc->vmsd = &vmstate_virtio_blk; 1748 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); 1749 vdc->realize = virtio_blk_device_realize; 1750 vdc->unrealize = virtio_blk_device_unrealize; 1751 vdc->get_config = virtio_blk_update_config; 1752 vdc->set_config = virtio_blk_set_config; 1753 vdc->get_features = virtio_blk_get_features; 1754 vdc->set_status = virtio_blk_set_status; 1755 vdc->reset = virtio_blk_reset; 1756 vdc->save = virtio_blk_save_device; 1757 vdc->load = virtio_blk_load_device; 1758 vdc->start_ioeventfd = virtio_blk_data_plane_start; 1759 vdc->stop_ioeventfd = virtio_blk_data_plane_stop; 1760 } 1761 1762 static const TypeInfo virtio_blk_info = { 1763 .name = TYPE_VIRTIO_BLK, 1764 .parent = TYPE_VIRTIO_DEVICE, 1765 .instance_size = sizeof(VirtIOBlock), 1766 .instance_init = virtio_blk_instance_init, 1767 .class_init = virtio_blk_class_init, 1768 }; 1769 1770 static void virtio_register_types(void) 1771 { 1772 type_register_static(&virtio_blk_info); 1773 } 1774 1775 type_init(virtio_register_types) 1776