1 /* 2 * Virtio Block Device 3 * 4 * Copyright IBM, Corp. 2007 5 * 6 * Authors: 7 * Anthony Liguori <aliguori@us.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qapi/error.h" 16 #include "qemu-common.h" 17 #include "qemu/iov.h" 18 #include "qemu/error-report.h" 19 #include "trace.h" 20 #include "hw/block/block.h" 21 #include "sysemu/block-backend.h" 22 #include "sysemu/blockdev.h" 23 #include "hw/virtio/virtio-blk.h" 24 #include "dataplane/virtio-blk.h" 25 #include "block/scsi.h" 26 #ifdef __linux__ 27 # include <scsi/sg.h> 28 #endif 29 #include "hw/virtio/virtio-bus.h" 30 #include "hw/virtio/virtio-access.h" 31 32 static void virtio_blk_init_request(VirtIOBlock *s, VirtQueue *vq, 33 VirtIOBlockReq *req) 34 { 35 req->dev = s; 36 req->vq = vq; 37 req->qiov.size = 0; 38 req->in_len = 0; 39 req->next = NULL; 40 req->mr_next = NULL; 41 } 42 43 static void virtio_blk_free_request(VirtIOBlockReq *req) 44 { 45 if (req) { 46 g_free(req); 47 } 48 } 49 50 static void virtio_blk_req_complete(VirtIOBlockReq *req, unsigned char status) 51 { 52 VirtIOBlock *s = req->dev; 53 VirtIODevice *vdev = VIRTIO_DEVICE(s); 54 55 trace_virtio_blk_req_complete(req, status); 56 57 stb_p(&req->in->status, status); 58 virtqueue_push(req->vq, &req->elem, req->in_len); 59 if (s->dataplane_started && !s->dataplane_disabled) { 60 virtio_blk_data_plane_notify(s->dataplane, req->vq); 61 } else { 62 virtio_notify(vdev, req->vq); 63 } 64 } 65 66 static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, int error, 67 bool is_read) 68 { 69 BlockErrorAction action = blk_get_error_action(req->dev->blk, 70 is_read, error); 71 VirtIOBlock *s = req->dev; 72 73 if (action == BLOCK_ERROR_ACTION_STOP) { 74 /* Break the link as the next request is going to be parsed from the 75 * ring again. Otherwise we may end up doing a double completion! */ 76 req->mr_next = NULL; 77 req->next = s->rq; 78 s->rq = req; 79 } else if (action == BLOCK_ERROR_ACTION_REPORT) { 80 virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR); 81 block_acct_failed(blk_get_stats(s->blk), &req->acct); 82 virtio_blk_free_request(req); 83 } 84 85 blk_error_action(s->blk, action, is_read, error); 86 return action != BLOCK_ERROR_ACTION_IGNORE; 87 } 88 89 static void virtio_blk_rw_complete(void *opaque, int ret) 90 { 91 VirtIOBlockReq *next = opaque; 92 93 while (next) { 94 VirtIOBlockReq *req = next; 95 next = req->mr_next; 96 trace_virtio_blk_rw_complete(req, ret); 97 98 if (req->qiov.nalloc != -1) { 99 /* If nalloc is != 1 req->qiov is a local copy of the original 100 * external iovec. It was allocated in submit_merged_requests 101 * to be able to merge requests. */ 102 qemu_iovec_destroy(&req->qiov); 103 } 104 105 if (ret) { 106 int p = virtio_ldl_p(VIRTIO_DEVICE(req->dev), &req->out.type); 107 bool is_read = !(p & VIRTIO_BLK_T_OUT); 108 /* Note that memory may be dirtied on read failure. If the 109 * virtio request is not completed here, as is the case for 110 * BLOCK_ERROR_ACTION_STOP, the memory may not be copied 111 * correctly during live migration. While this is ugly, 112 * it is acceptable because the device is free to write to 113 * the memory until the request is completed (which will 114 * happen on the other side of the migration). 115 */ 116 if (virtio_blk_handle_rw_error(req, -ret, is_read)) { 117 continue; 118 } 119 } 120 121 virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); 122 block_acct_done(blk_get_stats(req->dev->blk), &req->acct); 123 virtio_blk_free_request(req); 124 } 125 } 126 127 static void virtio_blk_flush_complete(void *opaque, int ret) 128 { 129 VirtIOBlockReq *req = opaque; 130 131 if (ret) { 132 if (virtio_blk_handle_rw_error(req, -ret, 0)) { 133 return; 134 } 135 } 136 137 virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); 138 block_acct_done(blk_get_stats(req->dev->blk), &req->acct); 139 virtio_blk_free_request(req); 140 } 141 142 #ifdef __linux__ 143 144 typedef struct { 145 VirtIOBlockReq *req; 146 struct sg_io_hdr hdr; 147 } VirtIOBlockIoctlReq; 148 149 static void virtio_blk_ioctl_complete(void *opaque, int status) 150 { 151 VirtIOBlockIoctlReq *ioctl_req = opaque; 152 VirtIOBlockReq *req = ioctl_req->req; 153 VirtIODevice *vdev = VIRTIO_DEVICE(req->dev); 154 struct virtio_scsi_inhdr *scsi; 155 struct sg_io_hdr *hdr; 156 157 scsi = (void *)req->elem.in_sg[req->elem.in_num - 2].iov_base; 158 159 if (status) { 160 status = VIRTIO_BLK_S_UNSUPP; 161 virtio_stl_p(vdev, &scsi->errors, 255); 162 goto out; 163 } 164 165 hdr = &ioctl_req->hdr; 166 /* 167 * From SCSI-Generic-HOWTO: "Some lower level drivers (e.g. ide-scsi) 168 * clear the masked_status field [hence status gets cleared too, see 169 * block/scsi_ioctl.c] even when a CHECK_CONDITION or COMMAND_TERMINATED 170 * status has occurred. However they do set DRIVER_SENSE in driver_status 171 * field. Also a (sb_len_wr > 0) indicates there is a sense buffer. 172 */ 173 if (hdr->status == 0 && hdr->sb_len_wr > 0) { 174 hdr->status = CHECK_CONDITION; 175 } 176 177 virtio_stl_p(vdev, &scsi->errors, 178 hdr->status | (hdr->msg_status << 8) | 179 (hdr->host_status << 16) | (hdr->driver_status << 24)); 180 virtio_stl_p(vdev, &scsi->residual, hdr->resid); 181 virtio_stl_p(vdev, &scsi->sense_len, hdr->sb_len_wr); 182 virtio_stl_p(vdev, &scsi->data_len, hdr->dxfer_len); 183 184 out: 185 virtio_blk_req_complete(req, status); 186 virtio_blk_free_request(req); 187 g_free(ioctl_req); 188 } 189 190 #endif 191 192 static VirtIOBlockReq *virtio_blk_get_request(VirtIOBlock *s, VirtQueue *vq) 193 { 194 VirtIOBlockReq *req = virtqueue_pop(vq, sizeof(VirtIOBlockReq)); 195 196 if (req) { 197 virtio_blk_init_request(s, vq, req); 198 } 199 return req; 200 } 201 202 static int virtio_blk_handle_scsi_req(VirtIOBlockReq *req) 203 { 204 int status = VIRTIO_BLK_S_OK; 205 struct virtio_scsi_inhdr *scsi = NULL; 206 VirtIODevice *vdev = VIRTIO_DEVICE(req->dev); 207 VirtQueueElement *elem = &req->elem; 208 VirtIOBlock *blk = req->dev; 209 210 #ifdef __linux__ 211 int i; 212 VirtIOBlockIoctlReq *ioctl_req; 213 BlockAIOCB *acb; 214 #endif 215 216 /* 217 * We require at least one output segment each for the virtio_blk_outhdr 218 * and the SCSI command block. 219 * 220 * We also at least require the virtio_blk_inhdr, the virtio_scsi_inhdr 221 * and the sense buffer pointer in the input segments. 222 */ 223 if (elem->out_num < 2 || elem->in_num < 3) { 224 status = VIRTIO_BLK_S_IOERR; 225 goto fail; 226 } 227 228 /* 229 * The scsi inhdr is placed in the second-to-last input segment, just 230 * before the regular inhdr. 231 */ 232 scsi = (void *)elem->in_sg[elem->in_num - 2].iov_base; 233 234 if (!blk->conf.scsi) { 235 status = VIRTIO_BLK_S_UNSUPP; 236 goto fail; 237 } 238 239 /* 240 * No support for bidirection commands yet. 241 */ 242 if (elem->out_num > 2 && elem->in_num > 3) { 243 status = VIRTIO_BLK_S_UNSUPP; 244 goto fail; 245 } 246 247 #ifdef __linux__ 248 ioctl_req = g_new0(VirtIOBlockIoctlReq, 1); 249 ioctl_req->req = req; 250 ioctl_req->hdr.interface_id = 'S'; 251 ioctl_req->hdr.cmd_len = elem->out_sg[1].iov_len; 252 ioctl_req->hdr.cmdp = elem->out_sg[1].iov_base; 253 ioctl_req->hdr.dxfer_len = 0; 254 255 if (elem->out_num > 2) { 256 /* 257 * If there are more than the minimally required 2 output segments 258 * there is write payload starting from the third iovec. 259 */ 260 ioctl_req->hdr.dxfer_direction = SG_DXFER_TO_DEV; 261 ioctl_req->hdr.iovec_count = elem->out_num - 2; 262 263 for (i = 0; i < ioctl_req->hdr.iovec_count; i++) { 264 ioctl_req->hdr.dxfer_len += elem->out_sg[i + 2].iov_len; 265 } 266 267 ioctl_req->hdr.dxferp = elem->out_sg + 2; 268 269 } else if (elem->in_num > 3) { 270 /* 271 * If we have more than 3 input segments the guest wants to actually 272 * read data. 273 */ 274 ioctl_req->hdr.dxfer_direction = SG_DXFER_FROM_DEV; 275 ioctl_req->hdr.iovec_count = elem->in_num - 3; 276 for (i = 0; i < ioctl_req->hdr.iovec_count; i++) { 277 ioctl_req->hdr.dxfer_len += elem->in_sg[i].iov_len; 278 } 279 280 ioctl_req->hdr.dxferp = elem->in_sg; 281 } else { 282 /* 283 * Some SCSI commands don't actually transfer any data. 284 */ 285 ioctl_req->hdr.dxfer_direction = SG_DXFER_NONE; 286 } 287 288 ioctl_req->hdr.sbp = elem->in_sg[elem->in_num - 3].iov_base; 289 ioctl_req->hdr.mx_sb_len = elem->in_sg[elem->in_num - 3].iov_len; 290 291 acb = blk_aio_ioctl(blk->blk, SG_IO, &ioctl_req->hdr, 292 virtio_blk_ioctl_complete, ioctl_req); 293 if (!acb) { 294 g_free(ioctl_req); 295 status = VIRTIO_BLK_S_UNSUPP; 296 goto fail; 297 } 298 return -EINPROGRESS; 299 #else 300 abort(); 301 #endif 302 303 fail: 304 /* Just put anything nonzero so that the ioctl fails in the guest. */ 305 if (scsi) { 306 virtio_stl_p(vdev, &scsi->errors, 255); 307 } 308 return status; 309 } 310 311 static void virtio_blk_handle_scsi(VirtIOBlockReq *req) 312 { 313 int status; 314 315 status = virtio_blk_handle_scsi_req(req); 316 if (status != -EINPROGRESS) { 317 virtio_blk_req_complete(req, status); 318 virtio_blk_free_request(req); 319 } 320 } 321 322 static inline void submit_requests(BlockBackend *blk, MultiReqBuffer *mrb, 323 int start, int num_reqs, int niov) 324 { 325 QEMUIOVector *qiov = &mrb->reqs[start]->qiov; 326 int64_t sector_num = mrb->reqs[start]->sector_num; 327 bool is_write = mrb->is_write; 328 329 if (num_reqs > 1) { 330 int i; 331 struct iovec *tmp_iov = qiov->iov; 332 int tmp_niov = qiov->niov; 333 334 /* mrb->reqs[start]->qiov was initialized from external so we can't 335 * modify it here. We need to initialize it locally and then add the 336 * external iovecs. */ 337 qemu_iovec_init(qiov, niov); 338 339 for (i = 0; i < tmp_niov; i++) { 340 qemu_iovec_add(qiov, tmp_iov[i].iov_base, tmp_iov[i].iov_len); 341 } 342 343 for (i = start + 1; i < start + num_reqs; i++) { 344 qemu_iovec_concat(qiov, &mrb->reqs[i]->qiov, 0, 345 mrb->reqs[i]->qiov.size); 346 mrb->reqs[i - 1]->mr_next = mrb->reqs[i]; 347 } 348 349 trace_virtio_blk_submit_multireq(mrb, start, num_reqs, 350 sector_num << BDRV_SECTOR_BITS, 351 qiov->size, is_write); 352 block_acct_merge_done(blk_get_stats(blk), 353 is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ, 354 num_reqs - 1); 355 } 356 357 if (is_write) { 358 blk_aio_pwritev(blk, sector_num << BDRV_SECTOR_BITS, qiov, 0, 359 virtio_blk_rw_complete, mrb->reqs[start]); 360 } else { 361 blk_aio_preadv(blk, sector_num << BDRV_SECTOR_BITS, qiov, 0, 362 virtio_blk_rw_complete, mrb->reqs[start]); 363 } 364 } 365 366 static int multireq_compare(const void *a, const void *b) 367 { 368 const VirtIOBlockReq *req1 = *(VirtIOBlockReq **)a, 369 *req2 = *(VirtIOBlockReq **)b; 370 371 /* 372 * Note that we can't simply subtract sector_num1 from sector_num2 373 * here as that could overflow the return value. 374 */ 375 if (req1->sector_num > req2->sector_num) { 376 return 1; 377 } else if (req1->sector_num < req2->sector_num) { 378 return -1; 379 } else { 380 return 0; 381 } 382 } 383 384 static void virtio_blk_submit_multireq(BlockBackend *blk, MultiReqBuffer *mrb) 385 { 386 int i = 0, start = 0, num_reqs = 0, niov = 0, nb_sectors = 0; 387 uint32_t max_transfer; 388 int64_t sector_num = 0; 389 390 if (mrb->num_reqs == 1) { 391 submit_requests(blk, mrb, 0, 1, -1); 392 mrb->num_reqs = 0; 393 return; 394 } 395 396 max_transfer = blk_get_max_transfer(mrb->reqs[0]->dev->blk); 397 398 qsort(mrb->reqs, mrb->num_reqs, sizeof(*mrb->reqs), 399 &multireq_compare); 400 401 for (i = 0; i < mrb->num_reqs; i++) { 402 VirtIOBlockReq *req = mrb->reqs[i]; 403 if (num_reqs > 0) { 404 /* 405 * NOTE: We cannot merge the requests in below situations: 406 * 1. requests are not sequential 407 * 2. merge would exceed maximum number of IOVs 408 * 3. merge would exceed maximum transfer length of backend device 409 */ 410 if (sector_num + nb_sectors != req->sector_num || 411 niov > blk_get_max_iov(blk) - req->qiov.niov || 412 req->qiov.size > max_transfer || 413 nb_sectors > (max_transfer - 414 req->qiov.size) / BDRV_SECTOR_SIZE) { 415 submit_requests(blk, mrb, start, num_reqs, niov); 416 num_reqs = 0; 417 } 418 } 419 420 if (num_reqs == 0) { 421 sector_num = req->sector_num; 422 nb_sectors = niov = 0; 423 start = i; 424 } 425 426 nb_sectors += req->qiov.size / BDRV_SECTOR_SIZE; 427 niov += req->qiov.niov; 428 num_reqs++; 429 } 430 431 submit_requests(blk, mrb, start, num_reqs, niov); 432 mrb->num_reqs = 0; 433 } 434 435 static void virtio_blk_handle_flush(VirtIOBlockReq *req, MultiReqBuffer *mrb) 436 { 437 block_acct_start(blk_get_stats(req->dev->blk), &req->acct, 0, 438 BLOCK_ACCT_FLUSH); 439 440 /* 441 * Make sure all outstanding writes are posted to the backing device. 442 */ 443 if (mrb->is_write && mrb->num_reqs > 0) { 444 virtio_blk_submit_multireq(req->dev->blk, mrb); 445 } 446 blk_aio_flush(req->dev->blk, virtio_blk_flush_complete, req); 447 } 448 449 static bool virtio_blk_sect_range_ok(VirtIOBlock *dev, 450 uint64_t sector, size_t size) 451 { 452 uint64_t nb_sectors = size >> BDRV_SECTOR_BITS; 453 uint64_t total_sectors; 454 455 if (nb_sectors > BDRV_REQUEST_MAX_SECTORS) { 456 return false; 457 } 458 if (sector & dev->sector_mask) { 459 return false; 460 } 461 if (size % dev->conf.conf.logical_block_size) { 462 return false; 463 } 464 blk_get_geometry(dev->blk, &total_sectors); 465 if (sector > total_sectors || nb_sectors > total_sectors - sector) { 466 return false; 467 } 468 return true; 469 } 470 471 static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb) 472 { 473 uint32_t type; 474 struct iovec *in_iov = req->elem.in_sg; 475 struct iovec *iov = req->elem.out_sg; 476 unsigned in_num = req->elem.in_num; 477 unsigned out_num = req->elem.out_num; 478 VirtIOBlock *s = req->dev; 479 VirtIODevice *vdev = VIRTIO_DEVICE(s); 480 481 if (req->elem.out_num < 1 || req->elem.in_num < 1) { 482 virtio_error(vdev, "virtio-blk missing headers"); 483 return -1; 484 } 485 486 if (unlikely(iov_to_buf(iov, out_num, 0, &req->out, 487 sizeof(req->out)) != sizeof(req->out))) { 488 virtio_error(vdev, "virtio-blk request outhdr too short"); 489 return -1; 490 } 491 492 iov_discard_front(&iov, &out_num, sizeof(req->out)); 493 494 if (in_iov[in_num - 1].iov_len < sizeof(struct virtio_blk_inhdr)) { 495 virtio_error(vdev, "virtio-blk request inhdr too short"); 496 return -1; 497 } 498 499 /* We always touch the last byte, so just see how big in_iov is. */ 500 req->in_len = iov_size(in_iov, in_num); 501 req->in = (void *)in_iov[in_num - 1].iov_base 502 + in_iov[in_num - 1].iov_len 503 - sizeof(struct virtio_blk_inhdr); 504 iov_discard_back(in_iov, &in_num, sizeof(struct virtio_blk_inhdr)); 505 506 type = virtio_ldl_p(VIRTIO_DEVICE(req->dev), &req->out.type); 507 508 /* VIRTIO_BLK_T_OUT defines the command direction. VIRTIO_BLK_T_BARRIER 509 * is an optional flag. Although a guest should not send this flag if 510 * not negotiated we ignored it in the past. So keep ignoring it. */ 511 switch (type & ~(VIRTIO_BLK_T_OUT | VIRTIO_BLK_T_BARRIER)) { 512 case VIRTIO_BLK_T_IN: 513 { 514 bool is_write = type & VIRTIO_BLK_T_OUT; 515 req->sector_num = virtio_ldq_p(VIRTIO_DEVICE(req->dev), 516 &req->out.sector); 517 518 if (is_write) { 519 qemu_iovec_init_external(&req->qiov, iov, out_num); 520 trace_virtio_blk_handle_write(req, req->sector_num, 521 req->qiov.size / BDRV_SECTOR_SIZE); 522 } else { 523 qemu_iovec_init_external(&req->qiov, in_iov, in_num); 524 trace_virtio_blk_handle_read(req, req->sector_num, 525 req->qiov.size / BDRV_SECTOR_SIZE); 526 } 527 528 if (!virtio_blk_sect_range_ok(req->dev, req->sector_num, 529 req->qiov.size)) { 530 virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR); 531 block_acct_invalid(blk_get_stats(req->dev->blk), 532 is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ); 533 virtio_blk_free_request(req); 534 return 0; 535 } 536 537 block_acct_start(blk_get_stats(req->dev->blk), 538 &req->acct, req->qiov.size, 539 is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ); 540 541 /* merge would exceed maximum number of requests or IO direction 542 * changes */ 543 if (mrb->num_reqs > 0 && (mrb->num_reqs == VIRTIO_BLK_MAX_MERGE_REQS || 544 is_write != mrb->is_write || 545 !req->dev->conf.request_merging)) { 546 virtio_blk_submit_multireq(req->dev->blk, mrb); 547 } 548 549 assert(mrb->num_reqs < VIRTIO_BLK_MAX_MERGE_REQS); 550 mrb->reqs[mrb->num_reqs++] = req; 551 mrb->is_write = is_write; 552 break; 553 } 554 case VIRTIO_BLK_T_FLUSH: 555 virtio_blk_handle_flush(req, mrb); 556 break; 557 case VIRTIO_BLK_T_SCSI_CMD: 558 virtio_blk_handle_scsi(req); 559 break; 560 case VIRTIO_BLK_T_GET_ID: 561 { 562 VirtIOBlock *s = req->dev; 563 564 /* 565 * NB: per existing s/n string convention the string is 566 * terminated by '\0' only when shorter than buffer. 567 */ 568 const char *serial = s->conf.serial ? s->conf.serial : ""; 569 size_t size = MIN(strlen(serial) + 1, 570 MIN(iov_size(in_iov, in_num), 571 VIRTIO_BLK_ID_BYTES)); 572 iov_from_buf(in_iov, in_num, 0, serial, size); 573 virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); 574 virtio_blk_free_request(req); 575 break; 576 } 577 default: 578 virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP); 579 virtio_blk_free_request(req); 580 } 581 return 0; 582 } 583 584 void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq) 585 { 586 VirtIOBlockReq *req; 587 MultiReqBuffer mrb = {}; 588 589 blk_io_plug(s->blk); 590 591 do { 592 virtio_queue_set_notification(vq, 0); 593 594 while ((req = virtio_blk_get_request(s, vq))) { 595 if (virtio_blk_handle_request(req, &mrb)) { 596 virtqueue_detach_element(req->vq, &req->elem, 0); 597 virtio_blk_free_request(req); 598 break; 599 } 600 } 601 602 virtio_queue_set_notification(vq, 1); 603 } while (!virtio_queue_empty(vq)); 604 605 if (mrb.num_reqs) { 606 virtio_blk_submit_multireq(s->blk, &mrb); 607 } 608 609 blk_io_unplug(s->blk); 610 } 611 612 static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq) 613 { 614 VirtIOBlock *s = (VirtIOBlock *)vdev; 615 616 if (s->dataplane) { 617 /* Some guests kick before setting VIRTIO_CONFIG_S_DRIVER_OK so start 618 * dataplane here instead of waiting for .set_status(). 619 */ 620 virtio_device_start_ioeventfd(vdev); 621 if (!s->dataplane_disabled) { 622 return; 623 } 624 } 625 virtio_blk_handle_vq(s, vq); 626 } 627 628 static void virtio_blk_dma_restart_bh(void *opaque) 629 { 630 VirtIOBlock *s = opaque; 631 VirtIOBlockReq *req = s->rq; 632 MultiReqBuffer mrb = {}; 633 634 qemu_bh_delete(s->bh); 635 s->bh = NULL; 636 637 s->rq = NULL; 638 639 while (req) { 640 VirtIOBlockReq *next = req->next; 641 if (virtio_blk_handle_request(req, &mrb)) { 642 /* Device is now broken and won't do any processing until it gets 643 * reset. Already queued requests will be lost: let's purge them. 644 */ 645 while (req) { 646 next = req->next; 647 virtqueue_detach_element(req->vq, &req->elem, 0); 648 virtio_blk_free_request(req); 649 req = next; 650 } 651 break; 652 } 653 req = next; 654 } 655 656 if (mrb.num_reqs) { 657 virtio_blk_submit_multireq(s->blk, &mrb); 658 } 659 } 660 661 static void virtio_blk_dma_restart_cb(void *opaque, int running, 662 RunState state) 663 { 664 VirtIOBlock *s = opaque; 665 666 if (!running) { 667 return; 668 } 669 670 if (!s->bh) { 671 s->bh = aio_bh_new(blk_get_aio_context(s->conf.conf.blk), 672 virtio_blk_dma_restart_bh, s); 673 qemu_bh_schedule(s->bh); 674 } 675 } 676 677 static void virtio_blk_reset(VirtIODevice *vdev) 678 { 679 VirtIOBlock *s = VIRTIO_BLK(vdev); 680 AioContext *ctx; 681 VirtIOBlockReq *req; 682 683 ctx = blk_get_aio_context(s->blk); 684 aio_context_acquire(ctx); 685 blk_drain(s->blk); 686 687 /* We drop queued requests after blk_drain() because blk_drain() itself can 688 * produce them. */ 689 while (s->rq) { 690 req = s->rq; 691 s->rq = req->next; 692 virtqueue_detach_element(req->vq, &req->elem, 0); 693 virtio_blk_free_request(req); 694 } 695 696 aio_context_release(ctx); 697 698 assert(!s->dataplane_started); 699 blk_set_enable_write_cache(s->blk, s->original_wce); 700 } 701 702 /* coalesce internal state, copy to pci i/o region 0 703 */ 704 static void virtio_blk_update_config(VirtIODevice *vdev, uint8_t *config) 705 { 706 VirtIOBlock *s = VIRTIO_BLK(vdev); 707 BlockConf *conf = &s->conf.conf; 708 struct virtio_blk_config blkcfg; 709 uint64_t capacity; 710 int blk_size = conf->logical_block_size; 711 712 blk_get_geometry(s->blk, &capacity); 713 memset(&blkcfg, 0, sizeof(blkcfg)); 714 virtio_stq_p(vdev, &blkcfg.capacity, capacity); 715 virtio_stl_p(vdev, &blkcfg.seg_max, 128 - 2); 716 virtio_stw_p(vdev, &blkcfg.geometry.cylinders, conf->cyls); 717 virtio_stl_p(vdev, &blkcfg.blk_size, blk_size); 718 virtio_stw_p(vdev, &blkcfg.min_io_size, conf->min_io_size / blk_size); 719 virtio_stw_p(vdev, &blkcfg.opt_io_size, conf->opt_io_size / blk_size); 720 blkcfg.geometry.heads = conf->heads; 721 /* 722 * We must ensure that the block device capacity is a multiple of 723 * the logical block size. If that is not the case, let's use 724 * sector_mask to adopt the geometry to have a correct picture. 725 * For those devices where the capacity is ok for the given geometry 726 * we don't touch the sector value of the geometry, since some devices 727 * (like s390 dasd) need a specific value. Here the capacity is already 728 * cyls*heads*secs*blk_size and the sector value is not block size 729 * divided by 512 - instead it is the amount of blk_size blocks 730 * per track (cylinder). 731 */ 732 if (blk_getlength(s->blk) / conf->heads / conf->secs % blk_size) { 733 blkcfg.geometry.sectors = conf->secs & ~s->sector_mask; 734 } else { 735 blkcfg.geometry.sectors = conf->secs; 736 } 737 blkcfg.size_max = 0; 738 blkcfg.physical_block_exp = get_physical_block_exp(conf); 739 blkcfg.alignment_offset = 0; 740 blkcfg.wce = blk_enable_write_cache(s->blk); 741 virtio_stw_p(vdev, &blkcfg.num_queues, s->conf.num_queues); 742 memcpy(config, &blkcfg, sizeof(struct virtio_blk_config)); 743 } 744 745 static void virtio_blk_set_config(VirtIODevice *vdev, const uint8_t *config) 746 { 747 VirtIOBlock *s = VIRTIO_BLK(vdev); 748 struct virtio_blk_config blkcfg; 749 750 memcpy(&blkcfg, config, sizeof(blkcfg)); 751 752 aio_context_acquire(blk_get_aio_context(s->blk)); 753 blk_set_enable_write_cache(s->blk, blkcfg.wce != 0); 754 aio_context_release(blk_get_aio_context(s->blk)); 755 } 756 757 static uint64_t virtio_blk_get_features(VirtIODevice *vdev, uint64_t features, 758 Error **errp) 759 { 760 VirtIOBlock *s = VIRTIO_BLK(vdev); 761 762 virtio_add_feature(&features, VIRTIO_BLK_F_SEG_MAX); 763 virtio_add_feature(&features, VIRTIO_BLK_F_GEOMETRY); 764 virtio_add_feature(&features, VIRTIO_BLK_F_TOPOLOGY); 765 virtio_add_feature(&features, VIRTIO_BLK_F_BLK_SIZE); 766 if (virtio_has_feature(features, VIRTIO_F_VERSION_1)) { 767 if (s->conf.scsi) { 768 error_setg(errp, "Please set scsi=off for virtio-blk devices in order to use virtio 1.0"); 769 return 0; 770 } 771 } else { 772 virtio_clear_feature(&features, VIRTIO_F_ANY_LAYOUT); 773 virtio_add_feature(&features, VIRTIO_BLK_F_SCSI); 774 } 775 776 if (s->conf.config_wce) { 777 virtio_add_feature(&features, VIRTIO_BLK_F_CONFIG_WCE); 778 } 779 if (blk_enable_write_cache(s->blk)) { 780 virtio_add_feature(&features, VIRTIO_BLK_F_WCE); 781 } 782 if (blk_is_read_only(s->blk)) { 783 virtio_add_feature(&features, VIRTIO_BLK_F_RO); 784 } 785 if (s->conf.num_queues > 1) { 786 virtio_add_feature(&features, VIRTIO_BLK_F_MQ); 787 } 788 789 return features; 790 } 791 792 static void virtio_blk_set_status(VirtIODevice *vdev, uint8_t status) 793 { 794 VirtIOBlock *s = VIRTIO_BLK(vdev); 795 796 if (!(status & (VIRTIO_CONFIG_S_DRIVER | VIRTIO_CONFIG_S_DRIVER_OK))) { 797 assert(!s->dataplane_started); 798 } 799 800 if (!(status & VIRTIO_CONFIG_S_DRIVER_OK)) { 801 return; 802 } 803 804 /* A guest that supports VIRTIO_BLK_F_CONFIG_WCE must be able to send 805 * cache flushes. Thus, the "auto writethrough" behavior is never 806 * necessary for guests that support the VIRTIO_BLK_F_CONFIG_WCE feature. 807 * Leaving it enabled would break the following sequence: 808 * 809 * Guest started with "-drive cache=writethrough" 810 * Guest sets status to 0 811 * Guest sets DRIVER bit in status field 812 * Guest reads host features (WCE=0, CONFIG_WCE=1) 813 * Guest writes guest features (WCE=0, CONFIG_WCE=1) 814 * Guest writes 1 to the WCE configuration field (writeback mode) 815 * Guest sets DRIVER_OK bit in status field 816 * 817 * s->blk would erroneously be placed in writethrough mode. 818 */ 819 if (!virtio_vdev_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE)) { 820 aio_context_acquire(blk_get_aio_context(s->blk)); 821 blk_set_enable_write_cache(s->blk, 822 virtio_vdev_has_feature(vdev, 823 VIRTIO_BLK_F_WCE)); 824 aio_context_release(blk_get_aio_context(s->blk)); 825 } 826 } 827 828 static void virtio_blk_save_device(VirtIODevice *vdev, QEMUFile *f) 829 { 830 VirtIOBlock *s = VIRTIO_BLK(vdev); 831 VirtIOBlockReq *req = s->rq; 832 833 while (req) { 834 qemu_put_sbyte(f, 1); 835 836 if (s->conf.num_queues > 1) { 837 qemu_put_be32(f, virtio_get_queue_index(req->vq)); 838 } 839 840 qemu_put_virtqueue_element(f, &req->elem); 841 req = req->next; 842 } 843 qemu_put_sbyte(f, 0); 844 } 845 846 static int virtio_blk_load_device(VirtIODevice *vdev, QEMUFile *f, 847 int version_id) 848 { 849 VirtIOBlock *s = VIRTIO_BLK(vdev); 850 851 while (qemu_get_sbyte(f)) { 852 unsigned nvqs = s->conf.num_queues; 853 unsigned vq_idx = 0; 854 VirtIOBlockReq *req; 855 856 if (nvqs > 1) { 857 vq_idx = qemu_get_be32(f); 858 859 if (vq_idx >= nvqs) { 860 error_report("Invalid virtqueue index in request list: %#x", 861 vq_idx); 862 return -EINVAL; 863 } 864 } 865 866 req = qemu_get_virtqueue_element(vdev, f, sizeof(VirtIOBlockReq)); 867 virtio_blk_init_request(s, virtio_get_queue(vdev, vq_idx), req); 868 req->next = s->rq; 869 s->rq = req; 870 } 871 872 return 0; 873 } 874 875 static void virtio_blk_resize(void *opaque) 876 { 877 VirtIODevice *vdev = VIRTIO_DEVICE(opaque); 878 879 virtio_notify_config(vdev); 880 } 881 882 static const BlockDevOps virtio_block_ops = { 883 .resize_cb = virtio_blk_resize, 884 }; 885 886 static void virtio_blk_device_realize(DeviceState *dev, Error **errp) 887 { 888 VirtIODevice *vdev = VIRTIO_DEVICE(dev); 889 VirtIOBlock *s = VIRTIO_BLK(dev); 890 VirtIOBlkConf *conf = &s->conf; 891 Error *err = NULL; 892 unsigned i; 893 894 if (!conf->conf.blk) { 895 error_setg(errp, "drive property not set"); 896 return; 897 } 898 if (!blk_is_inserted(conf->conf.blk)) { 899 error_setg(errp, "Device needs media, but drive is empty"); 900 return; 901 } 902 if (!conf->num_queues) { 903 error_setg(errp, "num-queues property must be larger than 0"); 904 return; 905 } 906 907 blkconf_serial(&conf->conf, &conf->serial); 908 blkconf_apply_backend_options(&conf->conf); 909 s->original_wce = blk_enable_write_cache(conf->conf.blk); 910 blkconf_geometry(&conf->conf, NULL, 65535, 255, 255, &err); 911 if (err) { 912 error_propagate(errp, err); 913 return; 914 } 915 blkconf_blocksizes(&conf->conf); 916 917 virtio_init(vdev, "virtio-blk", VIRTIO_ID_BLOCK, 918 sizeof(struct virtio_blk_config)); 919 920 s->blk = conf->conf.blk; 921 s->rq = NULL; 922 s->sector_mask = (s->conf.conf.logical_block_size / BDRV_SECTOR_SIZE) - 1; 923 924 for (i = 0; i < conf->num_queues; i++) { 925 virtio_add_queue(vdev, 128, virtio_blk_handle_output); 926 } 927 virtio_blk_data_plane_create(vdev, conf, &s->dataplane, &err); 928 if (err != NULL) { 929 error_propagate(errp, err); 930 virtio_cleanup(vdev); 931 return; 932 } 933 934 s->change = qemu_add_vm_change_state_handler(virtio_blk_dma_restart_cb, s); 935 blk_set_dev_ops(s->blk, &virtio_block_ops, s); 936 blk_set_guest_block_size(s->blk, s->conf.conf.logical_block_size); 937 938 blk_iostatus_enable(s->blk); 939 } 940 941 static void virtio_blk_device_unrealize(DeviceState *dev, Error **errp) 942 { 943 VirtIODevice *vdev = VIRTIO_DEVICE(dev); 944 VirtIOBlock *s = VIRTIO_BLK(dev); 945 946 virtio_blk_data_plane_destroy(s->dataplane); 947 s->dataplane = NULL; 948 qemu_del_vm_change_state_handler(s->change); 949 blockdev_mark_auto_del(s->blk); 950 virtio_cleanup(vdev); 951 } 952 953 static void virtio_blk_instance_init(Object *obj) 954 { 955 VirtIOBlock *s = VIRTIO_BLK(obj); 956 957 object_property_add_link(obj, "iothread", TYPE_IOTHREAD, 958 (Object **)&s->conf.iothread, 959 qdev_prop_allow_set_link_before_realize, 960 OBJ_PROP_LINK_UNREF_ON_RELEASE, NULL); 961 device_add_bootindex_property(obj, &s->conf.conf.bootindex, 962 "bootindex", "/disk@0,0", 963 DEVICE(obj), NULL); 964 } 965 966 static const VMStateDescription vmstate_virtio_blk = { 967 .name = "virtio-blk", 968 .minimum_version_id = 2, 969 .version_id = 2, 970 .fields = (VMStateField[]) { 971 VMSTATE_VIRTIO_DEVICE, 972 VMSTATE_END_OF_LIST() 973 }, 974 }; 975 976 static Property virtio_blk_properties[] = { 977 DEFINE_BLOCK_PROPERTIES(VirtIOBlock, conf.conf), 978 DEFINE_BLOCK_ERROR_PROPERTIES(VirtIOBlock, conf.conf), 979 DEFINE_BLOCK_CHS_PROPERTIES(VirtIOBlock, conf.conf), 980 DEFINE_PROP_STRING("serial", VirtIOBlock, conf.serial), 981 DEFINE_PROP_BIT("config-wce", VirtIOBlock, conf.config_wce, 0, true), 982 #ifdef __linux__ 983 DEFINE_PROP_BIT("scsi", VirtIOBlock, conf.scsi, 0, false), 984 #endif 985 DEFINE_PROP_BIT("request-merging", VirtIOBlock, conf.request_merging, 0, 986 true), 987 DEFINE_PROP_UINT16("num-queues", VirtIOBlock, conf.num_queues, 1), 988 DEFINE_PROP_END_OF_LIST(), 989 }; 990 991 static void virtio_blk_class_init(ObjectClass *klass, void *data) 992 { 993 DeviceClass *dc = DEVICE_CLASS(klass); 994 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); 995 996 dc->props = virtio_blk_properties; 997 dc->vmsd = &vmstate_virtio_blk; 998 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); 999 vdc->realize = virtio_blk_device_realize; 1000 vdc->unrealize = virtio_blk_device_unrealize; 1001 vdc->get_config = virtio_blk_update_config; 1002 vdc->set_config = virtio_blk_set_config; 1003 vdc->get_features = virtio_blk_get_features; 1004 vdc->set_status = virtio_blk_set_status; 1005 vdc->reset = virtio_blk_reset; 1006 vdc->save = virtio_blk_save_device; 1007 vdc->load = virtio_blk_load_device; 1008 vdc->start_ioeventfd = virtio_blk_data_plane_start; 1009 vdc->stop_ioeventfd = virtio_blk_data_plane_stop; 1010 } 1011 1012 static const TypeInfo virtio_blk_info = { 1013 .name = TYPE_VIRTIO_BLK, 1014 .parent = TYPE_VIRTIO_DEVICE, 1015 .instance_size = sizeof(VirtIOBlock), 1016 .instance_init = virtio_blk_instance_init, 1017 .class_init = virtio_blk_class_init, 1018 }; 1019 1020 static void virtio_register_types(void) 1021 { 1022 type_register_static(&virtio_blk_info); 1023 } 1024 1025 type_init(virtio_register_types) 1026