1 /* 2 * Virtio Block Device 3 * 4 * Copyright IBM, Corp. 2007 5 * 6 * Authors: 7 * Anthony Liguori <aliguori@us.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qemu/defer-call.h" 16 #include "qapi/error.h" 17 #include "qemu/iov.h" 18 #include "qemu/module.h" 19 #include "qemu/error-report.h" 20 #include "qemu/main-loop.h" 21 #include "block/block_int.h" 22 #include "trace.h" 23 #include "hw/block/block.h" 24 #include "hw/qdev-properties.h" 25 #include "sysemu/blockdev.h" 26 #include "sysemu/block-ram-registrar.h" 27 #include "sysemu/sysemu.h" 28 #include "sysemu/runstate.h" 29 #include "hw/virtio/virtio-blk.h" 30 #include "scsi/constants.h" 31 #ifdef __linux__ 32 # include <scsi/sg.h> 33 #endif 34 #include "hw/virtio/virtio-bus.h" 35 #include "migration/qemu-file-types.h" 36 #include "hw/virtio/virtio-access.h" 37 #include "hw/virtio/virtio-blk-common.h" 38 #include "qemu/coroutine.h" 39 40 static void virtio_blk_ioeventfd_attach(VirtIOBlock *s); 41 42 static void virtio_blk_init_request(VirtIOBlock *s, VirtQueue *vq, 43 VirtIOBlockReq *req) 44 { 45 req->dev = s; 46 req->vq = vq; 47 req->qiov.size = 0; 48 req->in_len = 0; 49 req->next = NULL; 50 req->mr_next = NULL; 51 } 52 53 static void virtio_blk_free_request(VirtIOBlockReq *req) 54 { 55 g_free(req); 56 } 57 58 static void virtio_blk_req_complete(VirtIOBlockReq *req, unsigned char status) 59 { 60 VirtIOBlock *s = req->dev; 61 VirtIODevice *vdev = VIRTIO_DEVICE(s); 62 63 trace_virtio_blk_req_complete(vdev, req, status); 64 65 stb_p(&req->in->status, status); 66 iov_discard_undo(&req->inhdr_undo); 67 iov_discard_undo(&req->outhdr_undo); 68 virtqueue_push(req->vq, &req->elem, req->in_len); 69 if (qemu_in_iothread()) { 70 virtio_notify_irqfd(vdev, req->vq); 71 } else { 72 virtio_notify(vdev, req->vq); 73 } 74 } 75 76 static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, int error, 77 bool is_read, bool acct_failed) 78 { 79 VirtIOBlock *s = req->dev; 80 BlockErrorAction action = blk_get_error_action(s->blk, is_read, error); 81 82 if (action == BLOCK_ERROR_ACTION_STOP) { 83 /* Break the link as the next request is going to be parsed from the 84 * ring again. Otherwise we may end up doing a double completion! */ 85 req->mr_next = NULL; 86 87 WITH_QEMU_LOCK_GUARD(&s->rq_lock) { 88 req->next = s->rq; 89 s->rq = req; 90 } 91 } else if (action == BLOCK_ERROR_ACTION_REPORT) { 92 virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR); 93 if (acct_failed) { 94 block_acct_failed(blk_get_stats(s->blk), &req->acct); 95 } 96 virtio_blk_free_request(req); 97 } 98 99 blk_error_action(s->blk, action, is_read, error); 100 return action != BLOCK_ERROR_ACTION_IGNORE; 101 } 102 103 static void virtio_blk_rw_complete(void *opaque, int ret) 104 { 105 VirtIOBlockReq *next = opaque; 106 VirtIOBlock *s = next->dev; 107 VirtIODevice *vdev = VIRTIO_DEVICE(s); 108 109 while (next) { 110 VirtIOBlockReq *req = next; 111 next = req->mr_next; 112 trace_virtio_blk_rw_complete(vdev, req, ret); 113 114 if (req->qiov.nalloc != -1) { 115 /* If nalloc is != -1 req->qiov is a local copy of the original 116 * external iovec. It was allocated in submit_requests to be 117 * able to merge requests. */ 118 qemu_iovec_destroy(&req->qiov); 119 } 120 121 if (ret) { 122 int p = virtio_ldl_p(VIRTIO_DEVICE(s), &req->out.type); 123 bool is_read = !(p & VIRTIO_BLK_T_OUT); 124 /* Note that memory may be dirtied on read failure. If the 125 * virtio request is not completed here, as is the case for 126 * BLOCK_ERROR_ACTION_STOP, the memory may not be copied 127 * correctly during live migration. While this is ugly, 128 * it is acceptable because the device is free to write to 129 * the memory until the request is completed (which will 130 * happen on the other side of the migration). 131 */ 132 if (virtio_blk_handle_rw_error(req, -ret, is_read, true)) { 133 continue; 134 } 135 } 136 137 virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); 138 block_acct_done(blk_get_stats(s->blk), &req->acct); 139 virtio_blk_free_request(req); 140 } 141 } 142 143 static void virtio_blk_flush_complete(void *opaque, int ret) 144 { 145 VirtIOBlockReq *req = opaque; 146 VirtIOBlock *s = req->dev; 147 148 if (ret && virtio_blk_handle_rw_error(req, -ret, 0, true)) { 149 return; 150 } 151 152 virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); 153 block_acct_done(blk_get_stats(s->blk), &req->acct); 154 virtio_blk_free_request(req); 155 } 156 157 static void virtio_blk_discard_write_zeroes_complete(void *opaque, int ret) 158 { 159 VirtIOBlockReq *req = opaque; 160 VirtIOBlock *s = req->dev; 161 bool is_write_zeroes = (virtio_ldl_p(VIRTIO_DEVICE(s), &req->out.type) & 162 ~VIRTIO_BLK_T_BARRIER) == VIRTIO_BLK_T_WRITE_ZEROES; 163 164 if (ret && virtio_blk_handle_rw_error(req, -ret, false, is_write_zeroes)) { 165 return; 166 } 167 168 virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); 169 if (is_write_zeroes) { 170 block_acct_done(blk_get_stats(s->blk), &req->acct); 171 } 172 virtio_blk_free_request(req); 173 } 174 175 #ifdef __linux__ 176 177 typedef struct { 178 VirtIOBlockReq *req; 179 struct sg_io_hdr hdr; 180 } VirtIOBlockIoctlReq; 181 182 static void virtio_blk_ioctl_complete(void *opaque, int status) 183 { 184 VirtIOBlockIoctlReq *ioctl_req = opaque; 185 VirtIOBlockReq *req = ioctl_req->req; 186 VirtIOBlock *s = req->dev; 187 VirtIODevice *vdev = VIRTIO_DEVICE(s); 188 struct virtio_scsi_inhdr *scsi; 189 struct sg_io_hdr *hdr; 190 191 scsi = (void *)req->elem.in_sg[req->elem.in_num - 2].iov_base; 192 193 if (status) { 194 status = VIRTIO_BLK_S_UNSUPP; 195 virtio_stl_p(vdev, &scsi->errors, 255); 196 goto out; 197 } 198 199 hdr = &ioctl_req->hdr; 200 /* 201 * From SCSI-Generic-HOWTO: "Some lower level drivers (e.g. ide-scsi) 202 * clear the masked_status field [hence status gets cleared too, see 203 * block/scsi_ioctl.c] even when a CHECK_CONDITION or COMMAND_TERMINATED 204 * status has occurred. However they do set DRIVER_SENSE in driver_status 205 * field. Also a (sb_len_wr > 0) indicates there is a sense buffer. 206 */ 207 if (hdr->status == 0 && hdr->sb_len_wr > 0) { 208 hdr->status = CHECK_CONDITION; 209 } 210 211 virtio_stl_p(vdev, &scsi->errors, 212 hdr->status | (hdr->msg_status << 8) | 213 (hdr->host_status << 16) | (hdr->driver_status << 24)); 214 virtio_stl_p(vdev, &scsi->residual, hdr->resid); 215 virtio_stl_p(vdev, &scsi->sense_len, hdr->sb_len_wr); 216 virtio_stl_p(vdev, &scsi->data_len, hdr->dxfer_len); 217 218 out: 219 virtio_blk_req_complete(req, status); 220 virtio_blk_free_request(req); 221 g_free(ioctl_req); 222 } 223 224 #endif 225 226 static VirtIOBlockReq *virtio_blk_get_request(VirtIOBlock *s, VirtQueue *vq) 227 { 228 VirtIOBlockReq *req = virtqueue_pop(vq, sizeof(VirtIOBlockReq)); 229 230 if (req) { 231 virtio_blk_init_request(s, vq, req); 232 } 233 return req; 234 } 235 236 static int virtio_blk_handle_scsi_req(VirtIOBlockReq *req) 237 { 238 int status = VIRTIO_BLK_S_OK; 239 struct virtio_scsi_inhdr *scsi = NULL; 240 VirtIOBlock *blk = req->dev; 241 VirtIODevice *vdev = VIRTIO_DEVICE(blk); 242 VirtQueueElement *elem = &req->elem; 243 244 #ifdef __linux__ 245 int i; 246 VirtIOBlockIoctlReq *ioctl_req; 247 BlockAIOCB *acb; 248 #endif 249 250 /* 251 * We require at least one output segment each for the virtio_blk_outhdr 252 * and the SCSI command block. 253 * 254 * We also at least require the virtio_blk_inhdr, the virtio_scsi_inhdr 255 * and the sense buffer pointer in the input segments. 256 */ 257 if (elem->out_num < 2 || elem->in_num < 3) { 258 status = VIRTIO_BLK_S_IOERR; 259 goto fail; 260 } 261 262 /* 263 * The scsi inhdr is placed in the second-to-last input segment, just 264 * before the regular inhdr. 265 */ 266 scsi = (void *)elem->in_sg[elem->in_num - 2].iov_base; 267 268 if (!virtio_has_feature(blk->host_features, VIRTIO_BLK_F_SCSI)) { 269 status = VIRTIO_BLK_S_UNSUPP; 270 goto fail; 271 } 272 273 /* 274 * No support for bidirection commands yet. 275 */ 276 if (elem->out_num > 2 && elem->in_num > 3) { 277 status = VIRTIO_BLK_S_UNSUPP; 278 goto fail; 279 } 280 281 #ifdef __linux__ 282 ioctl_req = g_new0(VirtIOBlockIoctlReq, 1); 283 ioctl_req->req = req; 284 ioctl_req->hdr.interface_id = 'S'; 285 ioctl_req->hdr.cmd_len = elem->out_sg[1].iov_len; 286 ioctl_req->hdr.cmdp = elem->out_sg[1].iov_base; 287 ioctl_req->hdr.dxfer_len = 0; 288 289 if (elem->out_num > 2) { 290 /* 291 * If there are more than the minimally required 2 output segments 292 * there is write payload starting from the third iovec. 293 */ 294 ioctl_req->hdr.dxfer_direction = SG_DXFER_TO_DEV; 295 ioctl_req->hdr.iovec_count = elem->out_num - 2; 296 297 for (i = 0; i < ioctl_req->hdr.iovec_count; i++) { 298 ioctl_req->hdr.dxfer_len += elem->out_sg[i + 2].iov_len; 299 } 300 301 ioctl_req->hdr.dxferp = elem->out_sg + 2; 302 303 } else if (elem->in_num > 3) { 304 /* 305 * If we have more than 3 input segments the guest wants to actually 306 * read data. 307 */ 308 ioctl_req->hdr.dxfer_direction = SG_DXFER_FROM_DEV; 309 ioctl_req->hdr.iovec_count = elem->in_num - 3; 310 for (i = 0; i < ioctl_req->hdr.iovec_count; i++) { 311 ioctl_req->hdr.dxfer_len += elem->in_sg[i].iov_len; 312 } 313 314 ioctl_req->hdr.dxferp = elem->in_sg; 315 } else { 316 /* 317 * Some SCSI commands don't actually transfer any data. 318 */ 319 ioctl_req->hdr.dxfer_direction = SG_DXFER_NONE; 320 } 321 322 ioctl_req->hdr.sbp = elem->in_sg[elem->in_num - 3].iov_base; 323 ioctl_req->hdr.mx_sb_len = elem->in_sg[elem->in_num - 3].iov_len; 324 325 acb = blk_aio_ioctl(blk->blk, SG_IO, &ioctl_req->hdr, 326 virtio_blk_ioctl_complete, ioctl_req); 327 if (!acb) { 328 g_free(ioctl_req); 329 status = VIRTIO_BLK_S_UNSUPP; 330 goto fail; 331 } 332 return -EINPROGRESS; 333 #else 334 abort(); 335 #endif 336 337 fail: 338 /* Just put anything nonzero so that the ioctl fails in the guest. */ 339 if (scsi) { 340 virtio_stl_p(vdev, &scsi->errors, 255); 341 } 342 return status; 343 } 344 345 static void virtio_blk_handle_scsi(VirtIOBlockReq *req) 346 { 347 int status; 348 349 status = virtio_blk_handle_scsi_req(req); 350 if (status != -EINPROGRESS) { 351 virtio_blk_req_complete(req, status); 352 virtio_blk_free_request(req); 353 } 354 } 355 356 static inline void submit_requests(VirtIOBlock *s, MultiReqBuffer *mrb, 357 int start, int num_reqs, int niov) 358 { 359 BlockBackend *blk = s->blk; 360 QEMUIOVector *qiov = &mrb->reqs[start]->qiov; 361 int64_t sector_num = mrb->reqs[start]->sector_num; 362 bool is_write = mrb->is_write; 363 BdrvRequestFlags flags = 0; 364 365 if (num_reqs > 1) { 366 int i; 367 struct iovec *tmp_iov = qiov->iov; 368 int tmp_niov = qiov->niov; 369 370 /* mrb->reqs[start]->qiov was initialized from external so we can't 371 * modify it here. We need to initialize it locally and then add the 372 * external iovecs. */ 373 qemu_iovec_init(qiov, niov); 374 375 for (i = 0; i < tmp_niov; i++) { 376 qemu_iovec_add(qiov, tmp_iov[i].iov_base, tmp_iov[i].iov_len); 377 } 378 379 for (i = start + 1; i < start + num_reqs; i++) { 380 qemu_iovec_concat(qiov, &mrb->reqs[i]->qiov, 0, 381 mrb->reqs[i]->qiov.size); 382 mrb->reqs[i - 1]->mr_next = mrb->reqs[i]; 383 } 384 385 trace_virtio_blk_submit_multireq(VIRTIO_DEVICE(mrb->reqs[start]->dev), 386 mrb, start, num_reqs, 387 sector_num << BDRV_SECTOR_BITS, 388 qiov->size, is_write); 389 block_acct_merge_done(blk_get_stats(blk), 390 is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ, 391 num_reqs - 1); 392 } 393 394 if (blk_ram_registrar_ok(&s->blk_ram_registrar)) { 395 flags |= BDRV_REQ_REGISTERED_BUF; 396 } 397 398 if (is_write) { 399 blk_aio_pwritev(blk, sector_num << BDRV_SECTOR_BITS, qiov, 400 flags, virtio_blk_rw_complete, 401 mrb->reqs[start]); 402 } else { 403 blk_aio_preadv(blk, sector_num << BDRV_SECTOR_BITS, qiov, 404 flags, virtio_blk_rw_complete, 405 mrb->reqs[start]); 406 } 407 } 408 409 static int multireq_compare(const void *a, const void *b) 410 { 411 const VirtIOBlockReq *req1 = *(VirtIOBlockReq **)a, 412 *req2 = *(VirtIOBlockReq **)b; 413 414 /* 415 * Note that we can't simply subtract sector_num1 from sector_num2 416 * here as that could overflow the return value. 417 */ 418 if (req1->sector_num > req2->sector_num) { 419 return 1; 420 } else if (req1->sector_num < req2->sector_num) { 421 return -1; 422 } else { 423 return 0; 424 } 425 } 426 427 static void virtio_blk_submit_multireq(VirtIOBlock *s, MultiReqBuffer *mrb) 428 { 429 int i = 0, start = 0, num_reqs = 0, niov = 0, nb_sectors = 0; 430 uint32_t max_transfer; 431 int64_t sector_num = 0; 432 433 if (mrb->num_reqs == 1) { 434 submit_requests(s, mrb, 0, 1, -1); 435 mrb->num_reqs = 0; 436 return; 437 } 438 439 max_transfer = blk_get_max_transfer(mrb->reqs[0]->dev->blk); 440 441 qsort(mrb->reqs, mrb->num_reqs, sizeof(*mrb->reqs), 442 &multireq_compare); 443 444 for (i = 0; i < mrb->num_reqs; i++) { 445 VirtIOBlockReq *req = mrb->reqs[i]; 446 if (num_reqs > 0) { 447 /* 448 * NOTE: We cannot merge the requests in below situations: 449 * 1. requests are not sequential 450 * 2. merge would exceed maximum number of IOVs 451 * 3. merge would exceed maximum transfer length of backend device 452 */ 453 if (sector_num + nb_sectors != req->sector_num || 454 niov > blk_get_max_iov(s->blk) - req->qiov.niov || 455 req->qiov.size > max_transfer || 456 nb_sectors > (max_transfer - 457 req->qiov.size) / BDRV_SECTOR_SIZE) { 458 submit_requests(s, mrb, start, num_reqs, niov); 459 num_reqs = 0; 460 } 461 } 462 463 if (num_reqs == 0) { 464 sector_num = req->sector_num; 465 nb_sectors = niov = 0; 466 start = i; 467 } 468 469 nb_sectors += req->qiov.size / BDRV_SECTOR_SIZE; 470 niov += req->qiov.niov; 471 num_reqs++; 472 } 473 474 submit_requests(s, mrb, start, num_reqs, niov); 475 mrb->num_reqs = 0; 476 } 477 478 static void virtio_blk_handle_flush(VirtIOBlockReq *req, MultiReqBuffer *mrb) 479 { 480 VirtIOBlock *s = req->dev; 481 482 block_acct_start(blk_get_stats(s->blk), &req->acct, 0, 483 BLOCK_ACCT_FLUSH); 484 485 /* 486 * Make sure all outstanding writes are posted to the backing device. 487 */ 488 if (mrb->is_write && mrb->num_reqs > 0) { 489 virtio_blk_submit_multireq(s, mrb); 490 } 491 blk_aio_flush(s->blk, virtio_blk_flush_complete, req); 492 } 493 494 static bool virtio_blk_sect_range_ok(VirtIOBlock *dev, 495 uint64_t sector, size_t size) 496 { 497 uint64_t nb_sectors = size >> BDRV_SECTOR_BITS; 498 uint64_t total_sectors; 499 500 if (nb_sectors > BDRV_REQUEST_MAX_SECTORS) { 501 return false; 502 } 503 if (sector & dev->sector_mask) { 504 return false; 505 } 506 if (size % dev->conf.conf.logical_block_size) { 507 return false; 508 } 509 blk_get_geometry(dev->blk, &total_sectors); 510 if (sector > total_sectors || nb_sectors > total_sectors - sector) { 511 return false; 512 } 513 return true; 514 } 515 516 static uint8_t virtio_blk_handle_discard_write_zeroes(VirtIOBlockReq *req, 517 struct virtio_blk_discard_write_zeroes *dwz_hdr, bool is_write_zeroes) 518 { 519 VirtIOBlock *s = req->dev; 520 VirtIODevice *vdev = VIRTIO_DEVICE(s); 521 uint64_t sector; 522 uint32_t num_sectors, flags, max_sectors; 523 uint8_t err_status; 524 int bytes; 525 526 sector = virtio_ldq_p(vdev, &dwz_hdr->sector); 527 num_sectors = virtio_ldl_p(vdev, &dwz_hdr->num_sectors); 528 flags = virtio_ldl_p(vdev, &dwz_hdr->flags); 529 max_sectors = is_write_zeroes ? s->conf.max_write_zeroes_sectors : 530 s->conf.max_discard_sectors; 531 532 /* 533 * max_sectors is at most BDRV_REQUEST_MAX_SECTORS, this check 534 * make us sure that "num_sectors << BDRV_SECTOR_BITS" can fit in 535 * the integer variable. 536 */ 537 if (unlikely(num_sectors > max_sectors)) { 538 err_status = VIRTIO_BLK_S_IOERR; 539 goto err; 540 } 541 542 bytes = num_sectors << BDRV_SECTOR_BITS; 543 544 if (unlikely(!virtio_blk_sect_range_ok(s, sector, bytes))) { 545 err_status = VIRTIO_BLK_S_IOERR; 546 goto err; 547 } 548 549 /* 550 * The device MUST set the status byte to VIRTIO_BLK_S_UNSUPP for discard 551 * and write zeroes commands if any unknown flag is set. 552 */ 553 if (unlikely(flags & ~VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP)) { 554 err_status = VIRTIO_BLK_S_UNSUPP; 555 goto err; 556 } 557 558 if (is_write_zeroes) { /* VIRTIO_BLK_T_WRITE_ZEROES */ 559 int blk_aio_flags = 0; 560 561 if (flags & VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP) { 562 blk_aio_flags |= BDRV_REQ_MAY_UNMAP; 563 } 564 565 block_acct_start(blk_get_stats(s->blk), &req->acct, bytes, 566 BLOCK_ACCT_WRITE); 567 568 blk_aio_pwrite_zeroes(s->blk, sector << BDRV_SECTOR_BITS, 569 bytes, blk_aio_flags, 570 virtio_blk_discard_write_zeroes_complete, req); 571 } else { /* VIRTIO_BLK_T_DISCARD */ 572 /* 573 * The device MUST set the status byte to VIRTIO_BLK_S_UNSUPP for 574 * discard commands if the unmap flag is set. 575 */ 576 if (unlikely(flags & VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP)) { 577 err_status = VIRTIO_BLK_S_UNSUPP; 578 goto err; 579 } 580 581 blk_aio_pdiscard(s->blk, sector << BDRV_SECTOR_BITS, bytes, 582 virtio_blk_discard_write_zeroes_complete, req); 583 } 584 585 return VIRTIO_BLK_S_OK; 586 587 err: 588 if (is_write_zeroes) { 589 block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_WRITE); 590 } 591 return err_status; 592 } 593 594 typedef struct ZoneCmdData { 595 VirtIOBlockReq *req; 596 struct iovec *in_iov; 597 unsigned in_num; 598 union { 599 struct { 600 unsigned int nr_zones; 601 BlockZoneDescriptor *zones; 602 } zone_report_data; 603 struct { 604 int64_t offset; 605 } zone_append_data; 606 }; 607 } ZoneCmdData; 608 609 /* 610 * check zoned_request: error checking before issuing requests. If all checks 611 * passed, return true. 612 * append: true if only zone append requests issued. 613 */ 614 static bool check_zoned_request(VirtIOBlock *s, int64_t offset, int64_t len, 615 bool append, uint8_t *status) { 616 BlockDriverState *bs = blk_bs(s->blk); 617 int index; 618 619 if (!virtio_has_feature(s->host_features, VIRTIO_BLK_F_ZONED)) { 620 *status = VIRTIO_BLK_S_UNSUPP; 621 return false; 622 } 623 624 if (offset < 0 || len < 0 || len > (bs->total_sectors << BDRV_SECTOR_BITS) 625 || offset > (bs->total_sectors << BDRV_SECTOR_BITS) - len) { 626 *status = VIRTIO_BLK_S_ZONE_INVALID_CMD; 627 return false; 628 } 629 630 if (append) { 631 if (bs->bl.write_granularity) { 632 if ((offset % bs->bl.write_granularity) != 0) { 633 *status = VIRTIO_BLK_S_ZONE_UNALIGNED_WP; 634 return false; 635 } 636 } 637 638 index = offset / bs->bl.zone_size; 639 if (BDRV_ZT_IS_CONV(bs->wps->wp[index])) { 640 *status = VIRTIO_BLK_S_ZONE_INVALID_CMD; 641 return false; 642 } 643 644 if (len / 512 > bs->bl.max_append_sectors) { 645 if (bs->bl.max_append_sectors == 0) { 646 *status = VIRTIO_BLK_S_UNSUPP; 647 } else { 648 *status = VIRTIO_BLK_S_ZONE_INVALID_CMD; 649 } 650 return false; 651 } 652 } 653 return true; 654 } 655 656 static void virtio_blk_zone_report_complete(void *opaque, int ret) 657 { 658 ZoneCmdData *data = opaque; 659 VirtIOBlockReq *req = data->req; 660 VirtIODevice *vdev = VIRTIO_DEVICE(req->dev); 661 struct iovec *in_iov = data->in_iov; 662 unsigned in_num = data->in_num; 663 int64_t zrp_size, n, j = 0; 664 int64_t nz = data->zone_report_data.nr_zones; 665 int8_t err_status = VIRTIO_BLK_S_OK; 666 struct virtio_blk_zone_report zrp_hdr = (struct virtio_blk_zone_report) { 667 .nr_zones = cpu_to_le64(nz), 668 }; 669 670 trace_virtio_blk_zone_report_complete(vdev, req, nz, ret); 671 if (ret) { 672 err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD; 673 goto out; 674 } 675 676 zrp_size = sizeof(struct virtio_blk_zone_report) 677 + sizeof(struct virtio_blk_zone_descriptor) * nz; 678 n = iov_from_buf(in_iov, in_num, 0, &zrp_hdr, sizeof(zrp_hdr)); 679 if (n != sizeof(zrp_hdr)) { 680 virtio_error(vdev, "Driver provided input buffer that is too small!"); 681 err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD; 682 goto out; 683 } 684 685 for (size_t i = sizeof(zrp_hdr); i < zrp_size; 686 i += sizeof(struct virtio_blk_zone_descriptor), ++j) { 687 struct virtio_blk_zone_descriptor desc = 688 (struct virtio_blk_zone_descriptor) { 689 .z_start = cpu_to_le64(data->zone_report_data.zones[j].start 690 >> BDRV_SECTOR_BITS), 691 .z_cap = cpu_to_le64(data->zone_report_data.zones[j].cap 692 >> BDRV_SECTOR_BITS), 693 .z_wp = cpu_to_le64(data->zone_report_data.zones[j].wp 694 >> BDRV_SECTOR_BITS), 695 }; 696 697 switch (data->zone_report_data.zones[j].type) { 698 case BLK_ZT_CONV: 699 desc.z_type = VIRTIO_BLK_ZT_CONV; 700 break; 701 case BLK_ZT_SWR: 702 desc.z_type = VIRTIO_BLK_ZT_SWR; 703 break; 704 case BLK_ZT_SWP: 705 desc.z_type = VIRTIO_BLK_ZT_SWP; 706 break; 707 default: 708 g_assert_not_reached(); 709 } 710 711 switch (data->zone_report_data.zones[j].state) { 712 case BLK_ZS_RDONLY: 713 desc.z_state = VIRTIO_BLK_ZS_RDONLY; 714 break; 715 case BLK_ZS_OFFLINE: 716 desc.z_state = VIRTIO_BLK_ZS_OFFLINE; 717 break; 718 case BLK_ZS_EMPTY: 719 desc.z_state = VIRTIO_BLK_ZS_EMPTY; 720 break; 721 case BLK_ZS_CLOSED: 722 desc.z_state = VIRTIO_BLK_ZS_CLOSED; 723 break; 724 case BLK_ZS_FULL: 725 desc.z_state = VIRTIO_BLK_ZS_FULL; 726 break; 727 case BLK_ZS_EOPEN: 728 desc.z_state = VIRTIO_BLK_ZS_EOPEN; 729 break; 730 case BLK_ZS_IOPEN: 731 desc.z_state = VIRTIO_BLK_ZS_IOPEN; 732 break; 733 case BLK_ZS_NOT_WP: 734 desc.z_state = VIRTIO_BLK_ZS_NOT_WP; 735 break; 736 default: 737 g_assert_not_reached(); 738 } 739 740 /* TODO: it takes O(n^2) time complexity. Optimizations required. */ 741 n = iov_from_buf(in_iov, in_num, i, &desc, sizeof(desc)); 742 if (n != sizeof(desc)) { 743 virtio_error(vdev, "Driver provided input buffer " 744 "for descriptors that is too small!"); 745 err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD; 746 } 747 } 748 749 out: 750 virtio_blk_req_complete(req, err_status); 751 virtio_blk_free_request(req); 752 g_free(data->zone_report_data.zones); 753 g_free(data); 754 } 755 756 static void virtio_blk_handle_zone_report(VirtIOBlockReq *req, 757 struct iovec *in_iov, 758 unsigned in_num) 759 { 760 VirtIOBlock *s = req->dev; 761 VirtIODevice *vdev = VIRTIO_DEVICE(s); 762 unsigned int nr_zones; 763 ZoneCmdData *data; 764 int64_t zone_size, offset; 765 uint8_t err_status; 766 767 if (req->in_len < sizeof(struct virtio_blk_inhdr) + 768 sizeof(struct virtio_blk_zone_report) + 769 sizeof(struct virtio_blk_zone_descriptor)) { 770 virtio_error(vdev, "in buffer too small for zone report"); 771 return; 772 } 773 774 /* start byte offset of the zone report */ 775 offset = virtio_ldq_p(vdev, &req->out.sector) << BDRV_SECTOR_BITS; 776 if (!check_zoned_request(s, offset, 0, false, &err_status)) { 777 goto out; 778 } 779 nr_zones = (req->in_len - sizeof(struct virtio_blk_inhdr) - 780 sizeof(struct virtio_blk_zone_report)) / 781 sizeof(struct virtio_blk_zone_descriptor); 782 trace_virtio_blk_handle_zone_report(vdev, req, 783 offset >> BDRV_SECTOR_BITS, nr_zones); 784 785 zone_size = sizeof(BlockZoneDescriptor) * nr_zones; 786 data = g_malloc(sizeof(ZoneCmdData)); 787 data->req = req; 788 data->in_iov = in_iov; 789 data->in_num = in_num; 790 data->zone_report_data.nr_zones = nr_zones; 791 data->zone_report_data.zones = g_malloc(zone_size), 792 793 blk_aio_zone_report(s->blk, offset, &data->zone_report_data.nr_zones, 794 data->zone_report_data.zones, 795 virtio_blk_zone_report_complete, data); 796 return; 797 out: 798 virtio_blk_req_complete(req, err_status); 799 virtio_blk_free_request(req); 800 } 801 802 static void virtio_blk_zone_mgmt_complete(void *opaque, int ret) 803 { 804 VirtIOBlockReq *req = opaque; 805 VirtIOBlock *s = req->dev; 806 VirtIODevice *vdev = VIRTIO_DEVICE(s); 807 int8_t err_status = VIRTIO_BLK_S_OK; 808 trace_virtio_blk_zone_mgmt_complete(vdev, req,ret); 809 810 if (ret) { 811 err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD; 812 } 813 814 virtio_blk_req_complete(req, err_status); 815 virtio_blk_free_request(req); 816 } 817 818 static int virtio_blk_handle_zone_mgmt(VirtIOBlockReq *req, BlockZoneOp op) 819 { 820 VirtIOBlock *s = req->dev; 821 VirtIODevice *vdev = VIRTIO_DEVICE(s); 822 BlockDriverState *bs = blk_bs(s->blk); 823 int64_t offset = virtio_ldq_p(vdev, &req->out.sector) << BDRV_SECTOR_BITS; 824 uint64_t len; 825 uint64_t capacity = bs->total_sectors << BDRV_SECTOR_BITS; 826 uint8_t err_status = VIRTIO_BLK_S_OK; 827 828 uint32_t type = virtio_ldl_p(vdev, &req->out.type); 829 if (type == VIRTIO_BLK_T_ZONE_RESET_ALL) { 830 /* Entire drive capacity */ 831 offset = 0; 832 len = capacity; 833 trace_virtio_blk_handle_zone_reset_all(vdev, req, 0, 834 bs->total_sectors); 835 } else { 836 if (bs->bl.zone_size > capacity - offset) { 837 /* The zoned device allows the last smaller zone. */ 838 len = capacity - bs->bl.zone_size * (bs->bl.nr_zones - 1); 839 } else { 840 len = bs->bl.zone_size; 841 } 842 trace_virtio_blk_handle_zone_mgmt(vdev, req, op, 843 offset >> BDRV_SECTOR_BITS, 844 len >> BDRV_SECTOR_BITS); 845 } 846 847 if (!check_zoned_request(s, offset, len, false, &err_status)) { 848 goto out; 849 } 850 851 blk_aio_zone_mgmt(s->blk, op, offset, len, 852 virtio_blk_zone_mgmt_complete, req); 853 854 return 0; 855 out: 856 virtio_blk_req_complete(req, err_status); 857 virtio_blk_free_request(req); 858 return err_status; 859 } 860 861 static void virtio_blk_zone_append_complete(void *opaque, int ret) 862 { 863 ZoneCmdData *data = opaque; 864 VirtIOBlockReq *req = data->req; 865 VirtIODevice *vdev = VIRTIO_DEVICE(req->dev); 866 int64_t append_sector, n; 867 uint8_t err_status = VIRTIO_BLK_S_OK; 868 869 if (ret) { 870 err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD; 871 goto out; 872 } 873 874 virtio_stq_p(vdev, &append_sector, 875 data->zone_append_data.offset >> BDRV_SECTOR_BITS); 876 n = iov_from_buf(data->in_iov, data->in_num, 0, &append_sector, 877 sizeof(append_sector)); 878 if (n != sizeof(append_sector)) { 879 virtio_error(vdev, "Driver provided input buffer less than size of " 880 "append_sector"); 881 err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD; 882 goto out; 883 } 884 trace_virtio_blk_zone_append_complete(vdev, req, append_sector, ret); 885 886 out: 887 virtio_blk_req_complete(req, err_status); 888 virtio_blk_free_request(req); 889 g_free(data); 890 } 891 892 static int virtio_blk_handle_zone_append(VirtIOBlockReq *req, 893 struct iovec *out_iov, 894 struct iovec *in_iov, 895 uint64_t out_num, 896 unsigned in_num) { 897 VirtIOBlock *s = req->dev; 898 VirtIODevice *vdev = VIRTIO_DEVICE(s); 899 uint8_t err_status = VIRTIO_BLK_S_OK; 900 901 int64_t offset = virtio_ldq_p(vdev, &req->out.sector) << BDRV_SECTOR_BITS; 902 int64_t len = iov_size(out_iov, out_num); 903 ZoneCmdData *data; 904 905 trace_virtio_blk_handle_zone_append(vdev, req, offset >> BDRV_SECTOR_BITS); 906 if (!check_zoned_request(s, offset, len, true, &err_status)) { 907 goto out; 908 } 909 910 data = g_malloc(sizeof(ZoneCmdData)); 911 data->req = req; 912 data->in_iov = in_iov; 913 data->in_num = in_num; 914 data->zone_append_data.offset = offset; 915 qemu_iovec_init_external(&req->qiov, out_iov, out_num); 916 917 block_acct_start(blk_get_stats(s->blk), &req->acct, len, 918 BLOCK_ACCT_ZONE_APPEND); 919 920 blk_aio_zone_append(s->blk, &data->zone_append_data.offset, &req->qiov, 0, 921 virtio_blk_zone_append_complete, data); 922 return 0; 923 924 out: 925 virtio_blk_req_complete(req, err_status); 926 virtio_blk_free_request(req); 927 return err_status; 928 } 929 930 static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb) 931 { 932 uint32_t type; 933 struct iovec *in_iov = req->elem.in_sg; 934 struct iovec *out_iov = req->elem.out_sg; 935 unsigned in_num = req->elem.in_num; 936 unsigned out_num = req->elem.out_num; 937 VirtIOBlock *s = req->dev; 938 VirtIODevice *vdev = VIRTIO_DEVICE(s); 939 940 if (req->elem.out_num < 1 || req->elem.in_num < 1) { 941 virtio_error(vdev, "virtio-blk missing headers"); 942 return -1; 943 } 944 945 if (unlikely(iov_to_buf(out_iov, out_num, 0, &req->out, 946 sizeof(req->out)) != sizeof(req->out))) { 947 virtio_error(vdev, "virtio-blk request outhdr too short"); 948 return -1; 949 } 950 951 iov_discard_front_undoable(&out_iov, &out_num, sizeof(req->out), 952 &req->outhdr_undo); 953 954 if (in_iov[in_num - 1].iov_len < sizeof(struct virtio_blk_inhdr)) { 955 virtio_error(vdev, "virtio-blk request inhdr too short"); 956 iov_discard_undo(&req->outhdr_undo); 957 return -1; 958 } 959 960 /* We always touch the last byte, so just see how big in_iov is. */ 961 req->in_len = iov_size(in_iov, in_num); 962 req->in = (void *)in_iov[in_num - 1].iov_base 963 + in_iov[in_num - 1].iov_len 964 - sizeof(struct virtio_blk_inhdr); 965 iov_discard_back_undoable(in_iov, &in_num, sizeof(struct virtio_blk_inhdr), 966 &req->inhdr_undo); 967 968 type = virtio_ldl_p(vdev, &req->out.type); 969 970 /* VIRTIO_BLK_T_OUT defines the command direction. VIRTIO_BLK_T_BARRIER 971 * is an optional flag. Although a guest should not send this flag if 972 * not negotiated we ignored it in the past. So keep ignoring it. */ 973 switch (type & ~(VIRTIO_BLK_T_OUT | VIRTIO_BLK_T_BARRIER)) { 974 case VIRTIO_BLK_T_IN: 975 { 976 bool is_write = type & VIRTIO_BLK_T_OUT; 977 req->sector_num = virtio_ldq_p(vdev, &req->out.sector); 978 979 if (is_write) { 980 qemu_iovec_init_external(&req->qiov, out_iov, out_num); 981 trace_virtio_blk_handle_write(vdev, req, req->sector_num, 982 req->qiov.size / BDRV_SECTOR_SIZE); 983 } else { 984 qemu_iovec_init_external(&req->qiov, in_iov, in_num); 985 trace_virtio_blk_handle_read(vdev, req, req->sector_num, 986 req->qiov.size / BDRV_SECTOR_SIZE); 987 } 988 989 if (!virtio_blk_sect_range_ok(s, req->sector_num, req->qiov.size)) { 990 virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR); 991 block_acct_invalid(blk_get_stats(s->blk), 992 is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ); 993 virtio_blk_free_request(req); 994 return 0; 995 } 996 997 block_acct_start(blk_get_stats(s->blk), &req->acct, req->qiov.size, 998 is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ); 999 1000 /* merge would exceed maximum number of requests or IO direction 1001 * changes */ 1002 if (mrb->num_reqs > 0 && (mrb->num_reqs == VIRTIO_BLK_MAX_MERGE_REQS || 1003 is_write != mrb->is_write || 1004 !s->conf.request_merging)) { 1005 virtio_blk_submit_multireq(s, mrb); 1006 } 1007 1008 assert(mrb->num_reqs < VIRTIO_BLK_MAX_MERGE_REQS); 1009 mrb->reqs[mrb->num_reqs++] = req; 1010 mrb->is_write = is_write; 1011 break; 1012 } 1013 case VIRTIO_BLK_T_FLUSH: 1014 virtio_blk_handle_flush(req, mrb); 1015 break; 1016 case VIRTIO_BLK_T_ZONE_REPORT: 1017 virtio_blk_handle_zone_report(req, in_iov, in_num); 1018 break; 1019 case VIRTIO_BLK_T_ZONE_OPEN: 1020 virtio_blk_handle_zone_mgmt(req, BLK_ZO_OPEN); 1021 break; 1022 case VIRTIO_BLK_T_ZONE_CLOSE: 1023 virtio_blk_handle_zone_mgmt(req, BLK_ZO_CLOSE); 1024 break; 1025 case VIRTIO_BLK_T_ZONE_FINISH: 1026 virtio_blk_handle_zone_mgmt(req, BLK_ZO_FINISH); 1027 break; 1028 case VIRTIO_BLK_T_ZONE_RESET: 1029 virtio_blk_handle_zone_mgmt(req, BLK_ZO_RESET); 1030 break; 1031 case VIRTIO_BLK_T_ZONE_RESET_ALL: 1032 virtio_blk_handle_zone_mgmt(req, BLK_ZO_RESET); 1033 break; 1034 case VIRTIO_BLK_T_SCSI_CMD: 1035 virtio_blk_handle_scsi(req); 1036 break; 1037 case VIRTIO_BLK_T_GET_ID: 1038 { 1039 /* 1040 * NB: per existing s/n string convention the string is 1041 * terminated by '\0' only when shorter than buffer. 1042 */ 1043 const char *serial = s->conf.serial ? s->conf.serial : ""; 1044 size_t size = MIN(strlen(serial) + 1, 1045 MIN(iov_size(in_iov, in_num), 1046 VIRTIO_BLK_ID_BYTES)); 1047 iov_from_buf(in_iov, in_num, 0, serial, size); 1048 virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); 1049 virtio_blk_free_request(req); 1050 break; 1051 } 1052 case VIRTIO_BLK_T_ZONE_APPEND & ~VIRTIO_BLK_T_OUT: 1053 /* 1054 * Passing out_iov/out_num and in_iov/in_num is not safe 1055 * to access req->elem.out_sg directly because it may be 1056 * modified by virtio_blk_handle_request(). 1057 */ 1058 virtio_blk_handle_zone_append(req, out_iov, in_iov, out_num, in_num); 1059 break; 1060 /* 1061 * VIRTIO_BLK_T_DISCARD and VIRTIO_BLK_T_WRITE_ZEROES are defined with 1062 * VIRTIO_BLK_T_OUT flag set. We masked this flag in the switch statement, 1063 * so we must mask it for these requests, then we will check if it is set. 1064 */ 1065 case VIRTIO_BLK_T_DISCARD & ~VIRTIO_BLK_T_OUT: 1066 case VIRTIO_BLK_T_WRITE_ZEROES & ~VIRTIO_BLK_T_OUT: 1067 { 1068 struct virtio_blk_discard_write_zeroes dwz_hdr; 1069 size_t out_len = iov_size(out_iov, out_num); 1070 bool is_write_zeroes = (type & ~VIRTIO_BLK_T_BARRIER) == 1071 VIRTIO_BLK_T_WRITE_ZEROES; 1072 uint8_t err_status; 1073 1074 /* 1075 * Unsupported if VIRTIO_BLK_T_OUT is not set or the request contains 1076 * more than one segment. 1077 */ 1078 if (unlikely(!(type & VIRTIO_BLK_T_OUT) || 1079 out_len > sizeof(dwz_hdr))) { 1080 virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP); 1081 virtio_blk_free_request(req); 1082 return 0; 1083 } 1084 1085 if (unlikely(iov_to_buf(out_iov, out_num, 0, &dwz_hdr, 1086 sizeof(dwz_hdr)) != sizeof(dwz_hdr))) { 1087 iov_discard_undo(&req->inhdr_undo); 1088 iov_discard_undo(&req->outhdr_undo); 1089 virtio_error(vdev, "virtio-blk discard/write_zeroes header" 1090 " too short"); 1091 return -1; 1092 } 1093 1094 err_status = virtio_blk_handle_discard_write_zeroes(req, &dwz_hdr, 1095 is_write_zeroes); 1096 if (err_status != VIRTIO_BLK_S_OK) { 1097 virtio_blk_req_complete(req, err_status); 1098 virtio_blk_free_request(req); 1099 } 1100 1101 break; 1102 } 1103 default: 1104 virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP); 1105 virtio_blk_free_request(req); 1106 } 1107 return 0; 1108 } 1109 1110 void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq) 1111 { 1112 VirtIOBlockReq *req; 1113 MultiReqBuffer mrb = {}; 1114 bool suppress_notifications = virtio_queue_get_notification(vq); 1115 1116 defer_call_begin(); 1117 1118 do { 1119 if (suppress_notifications) { 1120 virtio_queue_set_notification(vq, 0); 1121 } 1122 1123 while ((req = virtio_blk_get_request(s, vq))) { 1124 if (virtio_blk_handle_request(req, &mrb)) { 1125 virtqueue_detach_element(req->vq, &req->elem, 0); 1126 virtio_blk_free_request(req); 1127 break; 1128 } 1129 } 1130 1131 if (suppress_notifications) { 1132 virtio_queue_set_notification(vq, 1); 1133 } 1134 } while (!virtio_queue_empty(vq)); 1135 1136 if (mrb.num_reqs) { 1137 virtio_blk_submit_multireq(s, &mrb); 1138 } 1139 1140 defer_call_end(); 1141 } 1142 1143 static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq) 1144 { 1145 VirtIOBlock *s = (VirtIOBlock *)vdev; 1146 1147 if (!s->ioeventfd_disabled && !s->ioeventfd_started) { 1148 /* Some guests kick before setting VIRTIO_CONFIG_S_DRIVER_OK so start 1149 * ioeventfd here instead of waiting for .set_status(). 1150 */ 1151 virtio_device_start_ioeventfd(vdev); 1152 if (!s->ioeventfd_disabled) { 1153 return; 1154 } 1155 } 1156 1157 virtio_blk_handle_vq(s, vq); 1158 } 1159 1160 static void virtio_blk_dma_restart_bh(void *opaque) 1161 { 1162 VirtIOBlockReq *req = opaque; 1163 VirtIOBlock *s = req->dev; /* we're called with at least one request */ 1164 1165 MultiReqBuffer mrb = {}; 1166 1167 while (req) { 1168 VirtIOBlockReq *next = req->next; 1169 if (virtio_blk_handle_request(req, &mrb)) { 1170 /* Device is now broken and won't do any processing until it gets 1171 * reset. Already queued requests will be lost: let's purge them. 1172 */ 1173 while (req) { 1174 next = req->next; 1175 virtqueue_detach_element(req->vq, &req->elem, 0); 1176 virtio_blk_free_request(req); 1177 req = next; 1178 } 1179 break; 1180 } 1181 req = next; 1182 } 1183 1184 if (mrb.num_reqs) { 1185 virtio_blk_submit_multireq(s, &mrb); 1186 } 1187 1188 /* Paired with inc in virtio_blk_dma_restart_cb() */ 1189 blk_dec_in_flight(s->conf.conf.blk); 1190 } 1191 1192 static void virtio_blk_dma_restart_cb(void *opaque, bool running, 1193 RunState state) 1194 { 1195 VirtIOBlock *s = opaque; 1196 uint16_t num_queues = s->conf.num_queues; 1197 g_autofree VirtIOBlockReq **vq_rq = NULL; 1198 VirtIOBlockReq *rq; 1199 1200 if (!running) { 1201 return; 1202 } 1203 1204 /* Split the device-wide s->rq request list into per-vq request lists */ 1205 vq_rq = g_new0(VirtIOBlockReq *, num_queues); 1206 1207 WITH_QEMU_LOCK_GUARD(&s->rq_lock) { 1208 rq = s->rq; 1209 s->rq = NULL; 1210 } 1211 1212 while (rq) { 1213 VirtIOBlockReq *next = rq->next; 1214 uint16_t idx = virtio_get_queue_index(rq->vq); 1215 1216 /* Only num_queues vqs were created so vq_rq[idx] is within bounds */ 1217 assert(idx < num_queues); 1218 rq->next = vq_rq[idx]; 1219 vq_rq[idx] = rq; 1220 rq = next; 1221 } 1222 1223 /* Schedule a BH to submit the requests in each vq's AioContext */ 1224 for (uint16_t i = 0; i < num_queues; i++) { 1225 if (!vq_rq[i]) { 1226 continue; 1227 } 1228 1229 /* Paired with dec in virtio_blk_dma_restart_bh() */ 1230 blk_inc_in_flight(s->conf.conf.blk); 1231 1232 aio_bh_schedule_oneshot(s->vq_aio_context[i], 1233 virtio_blk_dma_restart_bh, 1234 vq_rq[i]); 1235 } 1236 } 1237 1238 static void virtio_blk_reset(VirtIODevice *vdev) 1239 { 1240 VirtIOBlock *s = VIRTIO_BLK(vdev); 1241 VirtIOBlockReq *req; 1242 1243 /* Dataplane has stopped... */ 1244 assert(!s->ioeventfd_started); 1245 1246 /* ...but requests may still be in flight. */ 1247 blk_drain(s->blk); 1248 1249 /* We drop queued requests after blk_drain() because blk_drain() itself can 1250 * produce them. */ 1251 WITH_QEMU_LOCK_GUARD(&s->rq_lock) { 1252 while (s->rq) { 1253 req = s->rq; 1254 s->rq = req->next; 1255 1256 /* No other threads can access req->vq here */ 1257 virtqueue_detach_element(req->vq, &req->elem, 0); 1258 1259 virtio_blk_free_request(req); 1260 } 1261 } 1262 1263 blk_set_enable_write_cache(s->blk, s->original_wce); 1264 } 1265 1266 /* coalesce internal state, copy to pci i/o region 0 1267 */ 1268 static void virtio_blk_update_config(VirtIODevice *vdev, uint8_t *config) 1269 { 1270 VirtIOBlock *s = VIRTIO_BLK(vdev); 1271 BlockConf *conf = &s->conf.conf; 1272 BlockDriverState *bs = blk_bs(s->blk); 1273 struct virtio_blk_config blkcfg; 1274 uint64_t capacity; 1275 int64_t length; 1276 int blk_size = conf->logical_block_size; 1277 1278 blk_get_geometry(s->blk, &capacity); 1279 memset(&blkcfg, 0, sizeof(blkcfg)); 1280 virtio_stq_p(vdev, &blkcfg.capacity, capacity); 1281 virtio_stl_p(vdev, &blkcfg.seg_max, 1282 s->conf.seg_max_adjust ? s->conf.queue_size - 2 : 128 - 2); 1283 virtio_stw_p(vdev, &blkcfg.geometry.cylinders, conf->cyls); 1284 virtio_stl_p(vdev, &blkcfg.blk_size, blk_size); 1285 virtio_stw_p(vdev, &blkcfg.min_io_size, conf->min_io_size / blk_size); 1286 virtio_stl_p(vdev, &blkcfg.opt_io_size, conf->opt_io_size / blk_size); 1287 blkcfg.geometry.heads = conf->heads; 1288 /* 1289 * We must ensure that the block device capacity is a multiple of 1290 * the logical block size. If that is not the case, let's use 1291 * sector_mask to adopt the geometry to have a correct picture. 1292 * For those devices where the capacity is ok for the given geometry 1293 * we don't touch the sector value of the geometry, since some devices 1294 * (like s390 dasd) need a specific value. Here the capacity is already 1295 * cyls*heads*secs*blk_size and the sector value is not block size 1296 * divided by 512 - instead it is the amount of blk_size blocks 1297 * per track (cylinder). 1298 */ 1299 length = blk_getlength(s->blk); 1300 if (length > 0 && length / conf->heads / conf->secs % blk_size) { 1301 blkcfg.geometry.sectors = conf->secs & ~s->sector_mask; 1302 } else { 1303 blkcfg.geometry.sectors = conf->secs; 1304 } 1305 blkcfg.size_max = 0; 1306 blkcfg.physical_block_exp = get_physical_block_exp(conf); 1307 blkcfg.alignment_offset = 0; 1308 blkcfg.wce = blk_enable_write_cache(s->blk); 1309 virtio_stw_p(vdev, &blkcfg.num_queues, s->conf.num_queues); 1310 if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_DISCARD)) { 1311 uint32_t discard_granularity = conf->discard_granularity; 1312 if (discard_granularity == -1 || !s->conf.report_discard_granularity) { 1313 discard_granularity = blk_size; 1314 } 1315 virtio_stl_p(vdev, &blkcfg.max_discard_sectors, 1316 s->conf.max_discard_sectors); 1317 virtio_stl_p(vdev, &blkcfg.discard_sector_alignment, 1318 discard_granularity >> BDRV_SECTOR_BITS); 1319 /* 1320 * We support only one segment per request since multiple segments 1321 * are not widely used and there are no userspace APIs that allow 1322 * applications to submit multiple segments in a single call. 1323 */ 1324 virtio_stl_p(vdev, &blkcfg.max_discard_seg, 1); 1325 } 1326 if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_WRITE_ZEROES)) { 1327 virtio_stl_p(vdev, &blkcfg.max_write_zeroes_sectors, 1328 s->conf.max_write_zeroes_sectors); 1329 blkcfg.write_zeroes_may_unmap = 1; 1330 virtio_stl_p(vdev, &blkcfg.max_write_zeroes_seg, 1); 1331 } 1332 if (bs->bl.zoned != BLK_Z_NONE) { 1333 switch (bs->bl.zoned) { 1334 case BLK_Z_HM: 1335 blkcfg.zoned.model = VIRTIO_BLK_Z_HM; 1336 break; 1337 case BLK_Z_HA: 1338 blkcfg.zoned.model = VIRTIO_BLK_Z_HA; 1339 break; 1340 default: 1341 g_assert_not_reached(); 1342 } 1343 1344 virtio_stl_p(vdev, &blkcfg.zoned.zone_sectors, 1345 bs->bl.zone_size / 512); 1346 virtio_stl_p(vdev, &blkcfg.zoned.max_active_zones, 1347 bs->bl.max_active_zones); 1348 virtio_stl_p(vdev, &blkcfg.zoned.max_open_zones, 1349 bs->bl.max_open_zones); 1350 virtio_stl_p(vdev, &blkcfg.zoned.write_granularity, blk_size); 1351 virtio_stl_p(vdev, &blkcfg.zoned.max_append_sectors, 1352 bs->bl.max_append_sectors); 1353 } else { 1354 blkcfg.zoned.model = VIRTIO_BLK_Z_NONE; 1355 } 1356 memcpy(config, &blkcfg, s->config_size); 1357 } 1358 1359 static void virtio_blk_set_config(VirtIODevice *vdev, const uint8_t *config) 1360 { 1361 VirtIOBlock *s = VIRTIO_BLK(vdev); 1362 struct virtio_blk_config blkcfg; 1363 1364 memcpy(&blkcfg, config, s->config_size); 1365 1366 blk_set_enable_write_cache(s->blk, blkcfg.wce != 0); 1367 } 1368 1369 static uint64_t virtio_blk_get_features(VirtIODevice *vdev, uint64_t features, 1370 Error **errp) 1371 { 1372 VirtIOBlock *s = VIRTIO_BLK(vdev); 1373 1374 /* Firstly sync all virtio-blk possible supported features */ 1375 features |= s->host_features; 1376 1377 virtio_add_feature(&features, VIRTIO_BLK_F_SEG_MAX); 1378 virtio_add_feature(&features, VIRTIO_BLK_F_GEOMETRY); 1379 virtio_add_feature(&features, VIRTIO_BLK_F_TOPOLOGY); 1380 virtio_add_feature(&features, VIRTIO_BLK_F_BLK_SIZE); 1381 if (virtio_has_feature(features, VIRTIO_F_VERSION_1)) { 1382 if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_SCSI)) { 1383 error_setg(errp, "Please set scsi=off for virtio-blk devices in order to use virtio 1.0"); 1384 return 0; 1385 } 1386 } else { 1387 virtio_clear_feature(&features, VIRTIO_F_ANY_LAYOUT); 1388 virtio_add_feature(&features, VIRTIO_BLK_F_SCSI); 1389 } 1390 1391 if (blk_enable_write_cache(s->blk) || 1392 (s->conf.x_enable_wce_if_config_wce && 1393 virtio_has_feature(features, VIRTIO_BLK_F_CONFIG_WCE))) { 1394 virtio_add_feature(&features, VIRTIO_BLK_F_WCE); 1395 } 1396 if (!blk_is_writable(s->blk)) { 1397 virtio_add_feature(&features, VIRTIO_BLK_F_RO); 1398 } 1399 if (s->conf.num_queues > 1) { 1400 virtio_add_feature(&features, VIRTIO_BLK_F_MQ); 1401 } 1402 1403 return features; 1404 } 1405 1406 static void virtio_blk_set_status(VirtIODevice *vdev, uint8_t status) 1407 { 1408 VirtIOBlock *s = VIRTIO_BLK(vdev); 1409 1410 if (!(status & (VIRTIO_CONFIG_S_DRIVER | VIRTIO_CONFIG_S_DRIVER_OK))) { 1411 assert(!s->ioeventfd_started); 1412 } 1413 1414 if (!(status & VIRTIO_CONFIG_S_DRIVER_OK)) { 1415 return; 1416 } 1417 1418 /* A guest that supports VIRTIO_BLK_F_CONFIG_WCE must be able to send 1419 * cache flushes. Thus, the "auto writethrough" behavior is never 1420 * necessary for guests that support the VIRTIO_BLK_F_CONFIG_WCE feature. 1421 * Leaving it enabled would break the following sequence: 1422 * 1423 * Guest started with "-drive cache=writethrough" 1424 * Guest sets status to 0 1425 * Guest sets DRIVER bit in status field 1426 * Guest reads host features (WCE=0, CONFIG_WCE=1) 1427 * Guest writes guest features (WCE=0, CONFIG_WCE=1) 1428 * Guest writes 1 to the WCE configuration field (writeback mode) 1429 * Guest sets DRIVER_OK bit in status field 1430 * 1431 * s->blk would erroneously be placed in writethrough mode. 1432 */ 1433 if (!virtio_vdev_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE)) { 1434 blk_set_enable_write_cache(s->blk, 1435 virtio_vdev_has_feature(vdev, 1436 VIRTIO_BLK_F_WCE)); 1437 } 1438 } 1439 1440 static void virtio_blk_save_device(VirtIODevice *vdev, QEMUFile *f) 1441 { 1442 VirtIOBlock *s = VIRTIO_BLK(vdev); 1443 1444 WITH_QEMU_LOCK_GUARD(&s->rq_lock) { 1445 VirtIOBlockReq *req = s->rq; 1446 1447 while (req) { 1448 qemu_put_sbyte(f, 1); 1449 1450 if (s->conf.num_queues > 1) { 1451 qemu_put_be32(f, virtio_get_queue_index(req->vq)); 1452 } 1453 1454 qemu_put_virtqueue_element(vdev, f, &req->elem); 1455 req = req->next; 1456 } 1457 } 1458 1459 qemu_put_sbyte(f, 0); 1460 } 1461 1462 static int virtio_blk_load_device(VirtIODevice *vdev, QEMUFile *f, 1463 int version_id) 1464 { 1465 VirtIOBlock *s = VIRTIO_BLK(vdev); 1466 1467 while (qemu_get_sbyte(f)) { 1468 unsigned nvqs = s->conf.num_queues; 1469 unsigned vq_idx = 0; 1470 VirtIOBlockReq *req; 1471 1472 if (nvqs > 1) { 1473 vq_idx = qemu_get_be32(f); 1474 1475 if (vq_idx >= nvqs) { 1476 error_report("Invalid virtqueue index in request list: %#x", 1477 vq_idx); 1478 return -EINVAL; 1479 } 1480 } 1481 1482 req = qemu_get_virtqueue_element(vdev, f, sizeof(VirtIOBlockReq)); 1483 virtio_blk_init_request(s, virtio_get_queue(vdev, vq_idx), req); 1484 1485 WITH_QEMU_LOCK_GUARD(&s->rq_lock) { 1486 req->next = s->rq; 1487 s->rq = req; 1488 } 1489 } 1490 1491 return 0; 1492 } 1493 1494 static void virtio_resize_cb(void *opaque) 1495 { 1496 VirtIODevice *vdev = opaque; 1497 1498 assert(qemu_get_current_aio_context() == qemu_get_aio_context()); 1499 virtio_notify_config(vdev); 1500 } 1501 1502 static void virtio_blk_resize(void *opaque) 1503 { 1504 VirtIODevice *vdev = VIRTIO_DEVICE(opaque); 1505 1506 /* 1507 * virtio_notify_config() needs to acquire the BQL, 1508 * so it can't be called from an iothread. Instead, schedule 1509 * it to be run in the main context BH. 1510 */ 1511 aio_bh_schedule_oneshot(qemu_get_aio_context(), virtio_resize_cb, vdev); 1512 } 1513 1514 static void virtio_blk_ioeventfd_detach(VirtIOBlock *s) 1515 { 1516 VirtIODevice *vdev = VIRTIO_DEVICE(s); 1517 1518 for (uint16_t i = 0; i < s->conf.num_queues; i++) { 1519 VirtQueue *vq = virtio_get_queue(vdev, i); 1520 virtio_queue_aio_detach_host_notifier(vq, s->vq_aio_context[i]); 1521 } 1522 } 1523 1524 static void virtio_blk_ioeventfd_attach(VirtIOBlock *s) 1525 { 1526 VirtIODevice *vdev = VIRTIO_DEVICE(s); 1527 1528 for (uint16_t i = 0; i < s->conf.num_queues; i++) { 1529 VirtQueue *vq = virtio_get_queue(vdev, i); 1530 virtio_queue_aio_attach_host_notifier(vq, s->vq_aio_context[i]); 1531 } 1532 } 1533 1534 /* Suspend virtqueue ioeventfd processing during drain */ 1535 static void virtio_blk_drained_begin(void *opaque) 1536 { 1537 VirtIOBlock *s = opaque; 1538 1539 if (s->ioeventfd_started) { 1540 virtio_blk_ioeventfd_detach(s); 1541 } 1542 } 1543 1544 /* Resume virtqueue ioeventfd processing after drain */ 1545 static void virtio_blk_drained_end(void *opaque) 1546 { 1547 VirtIOBlock *s = opaque; 1548 1549 if (s->ioeventfd_started) { 1550 virtio_blk_ioeventfd_attach(s); 1551 } 1552 } 1553 1554 static const BlockDevOps virtio_block_ops = { 1555 .resize_cb = virtio_blk_resize, 1556 .drained_begin = virtio_blk_drained_begin, 1557 .drained_end = virtio_blk_drained_end, 1558 }; 1559 1560 static bool 1561 validate_iothread_vq_mapping_list(IOThreadVirtQueueMappingList *list, 1562 uint16_t num_queues, Error **errp) 1563 { 1564 g_autofree unsigned long *vqs = bitmap_new(num_queues); 1565 g_autoptr(GHashTable) iothreads = 1566 g_hash_table_new(g_str_hash, g_str_equal); 1567 1568 for (IOThreadVirtQueueMappingList *node = list; node; node = node->next) { 1569 const char *name = node->value->iothread; 1570 uint16List *vq; 1571 1572 if (!iothread_by_id(name)) { 1573 error_setg(errp, "IOThread \"%s\" object does not exist", name); 1574 return false; 1575 } 1576 1577 if (!g_hash_table_add(iothreads, (gpointer)name)) { 1578 error_setg(errp, 1579 "duplicate IOThread name \"%s\" in iothread-vq-mapping", 1580 name); 1581 return false; 1582 } 1583 1584 if (node != list) { 1585 if (!!node->value->vqs != !!list->value->vqs) { 1586 error_setg(errp, "either all items in iothread-vq-mapping " 1587 "must have vqs or none of them must have it"); 1588 return false; 1589 } 1590 } 1591 1592 for (vq = node->value->vqs; vq; vq = vq->next) { 1593 if (vq->value >= num_queues) { 1594 error_setg(errp, "vq index %u for IOThread \"%s\" must be " 1595 "less than num_queues %u in iothread-vq-mapping", 1596 vq->value, name, num_queues); 1597 return false; 1598 } 1599 1600 if (test_and_set_bit(vq->value, vqs)) { 1601 error_setg(errp, "cannot assign vq %u to IOThread \"%s\" " 1602 "because it is already assigned", vq->value, name); 1603 return false; 1604 } 1605 } 1606 } 1607 1608 if (list->value->vqs) { 1609 for (uint16_t i = 0; i < num_queues; i++) { 1610 if (!test_bit(i, vqs)) { 1611 error_setg(errp, 1612 "missing vq %u IOThread assignment in iothread-vq-mapping", 1613 i); 1614 return false; 1615 } 1616 } 1617 } 1618 1619 return true; 1620 } 1621 1622 /** 1623 * apply_iothread_vq_mapping: 1624 * @iothread_vq_mapping_list: The mapping of virtqueues to IOThreads. 1625 * @vq_aio_context: The array of AioContext pointers to fill in. 1626 * @num_queues: The length of @vq_aio_context. 1627 * @errp: If an error occurs, a pointer to the area to store the error. 1628 * 1629 * Fill in the AioContext for each virtqueue in the @vq_aio_context array given 1630 * the iothread-vq-mapping parameter in @iothread_vq_mapping_list. 1631 * 1632 * Returns: %true on success, %false on failure. 1633 **/ 1634 static bool apply_iothread_vq_mapping( 1635 IOThreadVirtQueueMappingList *iothread_vq_mapping_list, 1636 AioContext **vq_aio_context, 1637 uint16_t num_queues, 1638 Error **errp) 1639 { 1640 IOThreadVirtQueueMappingList *node; 1641 size_t num_iothreads = 0; 1642 size_t cur_iothread = 0; 1643 1644 if (!validate_iothread_vq_mapping_list(iothread_vq_mapping_list, 1645 num_queues, errp)) { 1646 return false; 1647 } 1648 1649 for (node = iothread_vq_mapping_list; node; node = node->next) { 1650 num_iothreads++; 1651 } 1652 1653 for (node = iothread_vq_mapping_list; node; node = node->next) { 1654 IOThread *iothread = iothread_by_id(node->value->iothread); 1655 AioContext *ctx = iothread_get_aio_context(iothread); 1656 1657 /* Released in virtio_blk_vq_aio_context_cleanup() */ 1658 object_ref(OBJECT(iothread)); 1659 1660 if (node->value->vqs) { 1661 uint16List *vq; 1662 1663 /* Explicit vq:IOThread assignment */ 1664 for (vq = node->value->vqs; vq; vq = vq->next) { 1665 assert(vq->value < num_queues); 1666 vq_aio_context[vq->value] = ctx; 1667 } 1668 } else { 1669 /* Round-robin vq:IOThread assignment */ 1670 for (unsigned i = cur_iothread; i < num_queues; 1671 i += num_iothreads) { 1672 vq_aio_context[i] = ctx; 1673 } 1674 } 1675 1676 cur_iothread++; 1677 } 1678 1679 return true; 1680 } 1681 1682 /* Context: BQL held */ 1683 static bool virtio_blk_vq_aio_context_init(VirtIOBlock *s, Error **errp) 1684 { 1685 VirtIODevice *vdev = VIRTIO_DEVICE(s); 1686 VirtIOBlkConf *conf = &s->conf; 1687 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); 1688 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); 1689 1690 if (conf->iothread && conf->iothread_vq_mapping_list) { 1691 error_setg(errp, 1692 "iothread and iothread-vq-mapping properties cannot be set " 1693 "at the same time"); 1694 return false; 1695 } 1696 1697 if (conf->iothread || conf->iothread_vq_mapping_list) { 1698 if (!k->set_guest_notifiers || !k->ioeventfd_assign) { 1699 error_setg(errp, 1700 "device is incompatible with iothread " 1701 "(transport does not support notifiers)"); 1702 return false; 1703 } 1704 if (!virtio_device_ioeventfd_enabled(vdev)) { 1705 error_setg(errp, "ioeventfd is required for iothread"); 1706 return false; 1707 } 1708 1709 /* 1710 * If ioeventfd is (re-)enabled while the guest is running there could 1711 * be block jobs that can conflict. 1712 */ 1713 if (blk_op_is_blocked(conf->conf.blk, BLOCK_OP_TYPE_DATAPLANE, errp)) { 1714 error_prepend(errp, "cannot start virtio-blk ioeventfd: "); 1715 return false; 1716 } 1717 } 1718 1719 s->vq_aio_context = g_new(AioContext *, conf->num_queues); 1720 1721 if (conf->iothread_vq_mapping_list) { 1722 if (!apply_iothread_vq_mapping(conf->iothread_vq_mapping_list, 1723 s->vq_aio_context, 1724 conf->num_queues, 1725 errp)) { 1726 g_free(s->vq_aio_context); 1727 s->vq_aio_context = NULL; 1728 return false; 1729 } 1730 } else if (conf->iothread) { 1731 AioContext *ctx = iothread_get_aio_context(conf->iothread); 1732 for (unsigned i = 0; i < conf->num_queues; i++) { 1733 s->vq_aio_context[i] = ctx; 1734 } 1735 1736 /* Released in virtio_blk_vq_aio_context_cleanup() */ 1737 object_ref(OBJECT(conf->iothread)); 1738 } else { 1739 AioContext *ctx = qemu_get_aio_context(); 1740 for (unsigned i = 0; i < conf->num_queues; i++) { 1741 s->vq_aio_context[i] = ctx; 1742 } 1743 } 1744 1745 return true; 1746 } 1747 1748 /* Context: BQL held */ 1749 static void virtio_blk_vq_aio_context_cleanup(VirtIOBlock *s) 1750 { 1751 VirtIOBlkConf *conf = &s->conf; 1752 1753 assert(!s->ioeventfd_started); 1754 1755 if (conf->iothread_vq_mapping_list) { 1756 IOThreadVirtQueueMappingList *node; 1757 1758 for (node = conf->iothread_vq_mapping_list; node; node = node->next) { 1759 IOThread *iothread = iothread_by_id(node->value->iothread); 1760 object_unref(OBJECT(iothread)); 1761 } 1762 } 1763 1764 if (conf->iothread) { 1765 object_unref(OBJECT(conf->iothread)); 1766 } 1767 1768 g_free(s->vq_aio_context); 1769 s->vq_aio_context = NULL; 1770 } 1771 1772 /* Context: BQL held */ 1773 static int virtio_blk_start_ioeventfd(VirtIODevice *vdev) 1774 { 1775 VirtIOBlock *s = VIRTIO_BLK(vdev); 1776 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s))); 1777 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); 1778 unsigned i; 1779 unsigned nvqs = s->conf.num_queues; 1780 Error *local_err = NULL; 1781 int r; 1782 1783 if (s->ioeventfd_started || s->ioeventfd_starting) { 1784 return 0; 1785 } 1786 1787 s->ioeventfd_starting = true; 1788 1789 /* Set up guest notifier (irq) */ 1790 r = k->set_guest_notifiers(qbus->parent, nvqs, true); 1791 if (r != 0) { 1792 error_report("virtio-blk failed to set guest notifier (%d), " 1793 "ensure -accel kvm is set.", r); 1794 goto fail_guest_notifiers; 1795 } 1796 1797 /* 1798 * Batch all the host notifiers in a single transaction to avoid 1799 * quadratic time complexity in address_space_update_ioeventfds(). 1800 */ 1801 memory_region_transaction_begin(); 1802 1803 /* Set up virtqueue notify */ 1804 for (i = 0; i < nvqs; i++) { 1805 r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, true); 1806 if (r != 0) { 1807 int j = i; 1808 1809 fprintf(stderr, "virtio-blk failed to set host notifier (%d)\n", r); 1810 while (i--) { 1811 virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false); 1812 } 1813 1814 /* 1815 * The transaction expects the ioeventfds to be open when it 1816 * commits. Do it now, before the cleanup loop. 1817 */ 1818 memory_region_transaction_commit(); 1819 1820 while (j--) { 1821 virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), j); 1822 } 1823 goto fail_host_notifiers; 1824 } 1825 } 1826 1827 memory_region_transaction_commit(); 1828 1829 /* 1830 * Try to change the AioContext so that block jobs and other operations can 1831 * co-locate their activity in the same AioContext. If it fails, nevermind. 1832 */ 1833 assert(nvqs > 0); /* enforced during ->realize() */ 1834 r = blk_set_aio_context(s->conf.conf.blk, s->vq_aio_context[0], 1835 &local_err); 1836 if (r < 0) { 1837 warn_report_err(local_err); 1838 } 1839 1840 /* 1841 * These fields must be visible to the IOThread when it processes the 1842 * virtqueue, otherwise it will think ioeventfd has not started yet. 1843 * 1844 * Make sure ->ioeventfd_started is false when blk_set_aio_context() is 1845 * called above so that draining does not cause the host notifier to be 1846 * detached/attached prematurely. 1847 */ 1848 s->ioeventfd_starting = false; 1849 s->ioeventfd_started = true; 1850 smp_wmb(); /* paired with aio_notify_accept() on the read side */ 1851 1852 /* 1853 * Get this show started by hooking up our callbacks. If drained now, 1854 * virtio_blk_drained_end() will do this later. 1855 * Attaching the notifier also kicks the virtqueues, processing any requests 1856 * they may already have. 1857 */ 1858 if (!blk_in_drain(s->conf.conf.blk)) { 1859 virtio_blk_ioeventfd_attach(s); 1860 } 1861 return 0; 1862 1863 fail_host_notifiers: 1864 k->set_guest_notifiers(qbus->parent, nvqs, false); 1865 fail_guest_notifiers: 1866 s->ioeventfd_disabled = true; 1867 s->ioeventfd_starting = false; 1868 return -ENOSYS; 1869 } 1870 1871 /* Stop notifications for new requests from guest. 1872 * 1873 * Context: BH in IOThread 1874 */ 1875 static void virtio_blk_ioeventfd_stop_vq_bh(void *opaque) 1876 { 1877 VirtQueue *vq = opaque; 1878 EventNotifier *host_notifier = virtio_queue_get_host_notifier(vq); 1879 1880 virtio_queue_aio_detach_host_notifier(vq, qemu_get_current_aio_context()); 1881 1882 /* 1883 * Test and clear notifier after disabling event, in case poll callback 1884 * didn't have time to run. 1885 */ 1886 virtio_queue_host_notifier_read(host_notifier); 1887 } 1888 1889 /* Context: BQL held */ 1890 static void virtio_blk_stop_ioeventfd(VirtIODevice *vdev) 1891 { 1892 VirtIOBlock *s = VIRTIO_BLK(vdev); 1893 BusState *qbus = qdev_get_parent_bus(DEVICE(s)); 1894 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); 1895 unsigned i; 1896 unsigned nvqs = s->conf.num_queues; 1897 1898 if (!s->ioeventfd_started || s->ioeventfd_stopping) { 1899 return; 1900 } 1901 1902 /* Better luck next time. */ 1903 if (s->ioeventfd_disabled) { 1904 s->ioeventfd_disabled = false; 1905 s->ioeventfd_started = false; 1906 return; 1907 } 1908 s->ioeventfd_stopping = true; 1909 1910 if (!blk_in_drain(s->conf.conf.blk)) { 1911 for (i = 0; i < nvqs; i++) { 1912 VirtQueue *vq = virtio_get_queue(vdev, i); 1913 AioContext *ctx = s->vq_aio_context[i]; 1914 1915 aio_wait_bh_oneshot(ctx, virtio_blk_ioeventfd_stop_vq_bh, vq); 1916 } 1917 } 1918 1919 /* 1920 * Batch all the host notifiers in a single transaction to avoid 1921 * quadratic time complexity in address_space_update_ioeventfds(). 1922 */ 1923 memory_region_transaction_begin(); 1924 1925 for (i = 0; i < nvqs; i++) { 1926 virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false); 1927 } 1928 1929 /* 1930 * The transaction expects the ioeventfds to be open when it 1931 * commits. Do it now, before the cleanup loop. 1932 */ 1933 memory_region_transaction_commit(); 1934 1935 for (i = 0; i < nvqs; i++) { 1936 virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), i); 1937 } 1938 1939 /* 1940 * Set ->ioeventfd_started to false before draining so that host notifiers 1941 * are not detached/attached anymore. 1942 */ 1943 s->ioeventfd_started = false; 1944 1945 /* Wait for virtio_blk_dma_restart_bh() and in flight I/O to complete */ 1946 blk_drain(s->conf.conf.blk); 1947 1948 /* 1949 * Try to switch bs back to the QEMU main loop. If other users keep the 1950 * BlockBackend in the iothread, that's ok 1951 */ 1952 blk_set_aio_context(s->conf.conf.blk, qemu_get_aio_context(), NULL); 1953 1954 /* Clean up guest notifier (irq) */ 1955 k->set_guest_notifiers(qbus->parent, nvqs, false); 1956 1957 s->ioeventfd_stopping = false; 1958 } 1959 1960 static void virtio_blk_device_realize(DeviceState *dev, Error **errp) 1961 { 1962 VirtIODevice *vdev = VIRTIO_DEVICE(dev); 1963 VirtIOBlock *s = VIRTIO_BLK(dev); 1964 VirtIOBlkConf *conf = &s->conf; 1965 BlockDriverState *bs; 1966 Error *err = NULL; 1967 unsigned i; 1968 1969 if (!conf->conf.blk) { 1970 error_setg(errp, "drive property not set"); 1971 return; 1972 } 1973 if (!blk_is_inserted(conf->conf.blk)) { 1974 error_setg(errp, "Device needs media, but drive is empty"); 1975 return; 1976 } 1977 if (conf->num_queues == VIRTIO_BLK_AUTO_NUM_QUEUES) { 1978 conf->num_queues = 1; 1979 } 1980 if (!conf->num_queues) { 1981 error_setg(errp, "num-queues property must be larger than 0"); 1982 return; 1983 } 1984 if (conf->queue_size <= 2) { 1985 error_setg(errp, "invalid queue-size property (%" PRIu16 "), " 1986 "must be > 2", conf->queue_size); 1987 return; 1988 } 1989 if (!is_power_of_2(conf->queue_size) || 1990 conf->queue_size > VIRTQUEUE_MAX_SIZE) { 1991 error_setg(errp, "invalid queue-size property (%" PRIu16 "), " 1992 "must be a power of 2 (max %d)", 1993 conf->queue_size, VIRTQUEUE_MAX_SIZE); 1994 return; 1995 } 1996 1997 if (!blkconf_apply_backend_options(&conf->conf, 1998 !blk_supports_write_perm(conf->conf.blk), 1999 true, errp)) { 2000 return; 2001 } 2002 s->original_wce = blk_enable_write_cache(conf->conf.blk); 2003 if (!blkconf_geometry(&conf->conf, NULL, 65535, 255, 255, errp)) { 2004 return; 2005 } 2006 2007 if (!blkconf_blocksizes(&conf->conf, errp)) { 2008 return; 2009 } 2010 2011 bs = blk_bs(conf->conf.blk); 2012 if (bs->bl.zoned != BLK_Z_NONE) { 2013 virtio_add_feature(&s->host_features, VIRTIO_BLK_F_ZONED); 2014 if (bs->bl.zoned == BLK_Z_HM) { 2015 virtio_clear_feature(&s->host_features, VIRTIO_BLK_F_DISCARD); 2016 } 2017 } 2018 2019 if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_DISCARD) && 2020 (!conf->max_discard_sectors || 2021 conf->max_discard_sectors > BDRV_REQUEST_MAX_SECTORS)) { 2022 error_setg(errp, "invalid max-discard-sectors property (%" PRIu32 ")" 2023 ", must be between 1 and %d", 2024 conf->max_discard_sectors, (int)BDRV_REQUEST_MAX_SECTORS); 2025 return; 2026 } 2027 2028 if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_WRITE_ZEROES) && 2029 (!conf->max_write_zeroes_sectors || 2030 conf->max_write_zeroes_sectors > BDRV_REQUEST_MAX_SECTORS)) { 2031 error_setg(errp, "invalid max-write-zeroes-sectors property (%" PRIu32 2032 "), must be between 1 and %d", 2033 conf->max_write_zeroes_sectors, 2034 (int)BDRV_REQUEST_MAX_SECTORS); 2035 return; 2036 } 2037 2038 s->config_size = virtio_get_config_size(&virtio_blk_cfg_size_params, 2039 s->host_features); 2040 virtio_init(vdev, VIRTIO_ID_BLOCK, s->config_size); 2041 2042 qemu_mutex_init(&s->rq_lock); 2043 2044 s->blk = conf->conf.blk; 2045 s->rq = NULL; 2046 s->sector_mask = (s->conf.conf.logical_block_size / BDRV_SECTOR_SIZE) - 1; 2047 2048 for (i = 0; i < conf->num_queues; i++) { 2049 virtio_add_queue(vdev, conf->queue_size, virtio_blk_handle_output); 2050 } 2051 qemu_coroutine_inc_pool_size(conf->num_queues * conf->queue_size / 2); 2052 2053 /* Don't start ioeventfd if transport does not support notifiers. */ 2054 if (!virtio_device_ioeventfd_enabled(vdev)) { 2055 s->ioeventfd_disabled = true; 2056 } 2057 2058 virtio_blk_vq_aio_context_init(s, &err); 2059 if (err != NULL) { 2060 error_propagate(errp, err); 2061 for (i = 0; i < conf->num_queues; i++) { 2062 virtio_del_queue(vdev, i); 2063 } 2064 virtio_cleanup(vdev); 2065 return; 2066 } 2067 2068 /* 2069 * This must be after virtio_init() so virtio_blk_dma_restart_cb() gets 2070 * called after ->start_ioeventfd() has already set blk's AioContext. 2071 */ 2072 s->change = 2073 qdev_add_vm_change_state_handler(dev, virtio_blk_dma_restart_cb, s); 2074 2075 blk_ram_registrar_init(&s->blk_ram_registrar, s->blk); 2076 blk_set_dev_ops(s->blk, &virtio_block_ops, s); 2077 2078 blk_iostatus_enable(s->blk); 2079 2080 add_boot_device_lchs(dev, "/disk@0,0", 2081 conf->conf.lcyls, 2082 conf->conf.lheads, 2083 conf->conf.lsecs); 2084 } 2085 2086 static void virtio_blk_device_unrealize(DeviceState *dev) 2087 { 2088 VirtIODevice *vdev = VIRTIO_DEVICE(dev); 2089 VirtIOBlock *s = VIRTIO_BLK(dev); 2090 VirtIOBlkConf *conf = &s->conf; 2091 unsigned i; 2092 2093 blk_drain(s->blk); 2094 del_boot_device_lchs(dev, "/disk@0,0"); 2095 virtio_blk_vq_aio_context_cleanup(s); 2096 for (i = 0; i < conf->num_queues; i++) { 2097 virtio_del_queue(vdev, i); 2098 } 2099 qemu_coroutine_dec_pool_size(conf->num_queues * conf->queue_size / 2); 2100 qemu_mutex_destroy(&s->rq_lock); 2101 blk_ram_registrar_destroy(&s->blk_ram_registrar); 2102 qemu_del_vm_change_state_handler(s->change); 2103 blockdev_mark_auto_del(s->blk); 2104 virtio_cleanup(vdev); 2105 } 2106 2107 static void virtio_blk_instance_init(Object *obj) 2108 { 2109 VirtIOBlock *s = VIRTIO_BLK(obj); 2110 2111 device_add_bootindex_property(obj, &s->conf.conf.bootindex, 2112 "bootindex", "/disk@0,0", 2113 DEVICE(obj)); 2114 } 2115 2116 static const VMStateDescription vmstate_virtio_blk = { 2117 .name = "virtio-blk", 2118 .minimum_version_id = 2, 2119 .version_id = 2, 2120 .fields = (const VMStateField[]) { 2121 VMSTATE_VIRTIO_DEVICE, 2122 VMSTATE_END_OF_LIST() 2123 }, 2124 }; 2125 2126 static Property virtio_blk_properties[] = { 2127 DEFINE_BLOCK_PROPERTIES(VirtIOBlock, conf.conf), 2128 DEFINE_BLOCK_ERROR_PROPERTIES(VirtIOBlock, conf.conf), 2129 DEFINE_BLOCK_CHS_PROPERTIES(VirtIOBlock, conf.conf), 2130 DEFINE_PROP_STRING("serial", VirtIOBlock, conf.serial), 2131 DEFINE_PROP_BIT64("config-wce", VirtIOBlock, host_features, 2132 VIRTIO_BLK_F_CONFIG_WCE, true), 2133 #ifdef __linux__ 2134 DEFINE_PROP_BIT64("scsi", VirtIOBlock, host_features, 2135 VIRTIO_BLK_F_SCSI, false), 2136 #endif 2137 DEFINE_PROP_BIT("request-merging", VirtIOBlock, conf.request_merging, 0, 2138 true), 2139 DEFINE_PROP_UINT16("num-queues", VirtIOBlock, conf.num_queues, 2140 VIRTIO_BLK_AUTO_NUM_QUEUES), 2141 DEFINE_PROP_UINT16("queue-size", VirtIOBlock, conf.queue_size, 256), 2142 DEFINE_PROP_BOOL("seg-max-adjust", VirtIOBlock, conf.seg_max_adjust, true), 2143 DEFINE_PROP_LINK("iothread", VirtIOBlock, conf.iothread, TYPE_IOTHREAD, 2144 IOThread *), 2145 DEFINE_PROP_IOTHREAD_VQ_MAPPING_LIST("iothread-vq-mapping", VirtIOBlock, 2146 conf.iothread_vq_mapping_list), 2147 DEFINE_PROP_BIT64("discard", VirtIOBlock, host_features, 2148 VIRTIO_BLK_F_DISCARD, true), 2149 DEFINE_PROP_BOOL("report-discard-granularity", VirtIOBlock, 2150 conf.report_discard_granularity, true), 2151 DEFINE_PROP_BIT64("write-zeroes", VirtIOBlock, host_features, 2152 VIRTIO_BLK_F_WRITE_ZEROES, true), 2153 DEFINE_PROP_UINT32("max-discard-sectors", VirtIOBlock, 2154 conf.max_discard_sectors, BDRV_REQUEST_MAX_SECTORS), 2155 DEFINE_PROP_UINT32("max-write-zeroes-sectors", VirtIOBlock, 2156 conf.max_write_zeroes_sectors, BDRV_REQUEST_MAX_SECTORS), 2157 DEFINE_PROP_BOOL("x-enable-wce-if-config-wce", VirtIOBlock, 2158 conf.x_enable_wce_if_config_wce, true), 2159 DEFINE_PROP_END_OF_LIST(), 2160 }; 2161 2162 static void virtio_blk_class_init(ObjectClass *klass, void *data) 2163 { 2164 DeviceClass *dc = DEVICE_CLASS(klass); 2165 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); 2166 2167 device_class_set_props(dc, virtio_blk_properties); 2168 dc->vmsd = &vmstate_virtio_blk; 2169 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); 2170 vdc->realize = virtio_blk_device_realize; 2171 vdc->unrealize = virtio_blk_device_unrealize; 2172 vdc->get_config = virtio_blk_update_config; 2173 vdc->set_config = virtio_blk_set_config; 2174 vdc->get_features = virtio_blk_get_features; 2175 vdc->set_status = virtio_blk_set_status; 2176 vdc->reset = virtio_blk_reset; 2177 vdc->save = virtio_blk_save_device; 2178 vdc->load = virtio_blk_load_device; 2179 vdc->start_ioeventfd = virtio_blk_start_ioeventfd; 2180 vdc->stop_ioeventfd = virtio_blk_stop_ioeventfd; 2181 } 2182 2183 static const TypeInfo virtio_blk_info = { 2184 .name = TYPE_VIRTIO_BLK, 2185 .parent = TYPE_VIRTIO_DEVICE, 2186 .instance_size = sizeof(VirtIOBlock), 2187 .instance_init = virtio_blk_instance_init, 2188 .class_init = virtio_blk_class_init, 2189 }; 2190 2191 static void virtio_register_types(void) 2192 { 2193 type_register_static(&virtio_blk_info); 2194 } 2195 2196 type_init(virtio_register_types) 2197